1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/bio.h>
35 #include <linux/genhd.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <scsi/scsi_proto.h>
39 #include <asm/unaligned.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h>
44 #include "target_core_iblock.h"
46 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
47 #define IBLOCK_BIO_POOL_SIZE 128
49 static inline struct iblock_dev
*IBLOCK_DEV(struct se_device
*dev
)
51 return container_of(dev
, struct iblock_dev
, dev
);
55 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
57 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
58 " Generic Target Core Stack %s\n", hba
->hba_id
,
59 IBLOCK_VERSION
, TARGET_CORE_VERSION
);
63 static void iblock_detach_hba(struct se_hba
*hba
)
67 static struct se_device
*iblock_alloc_device(struct se_hba
*hba
, const char *name
)
69 struct iblock_dev
*ib_dev
= NULL
;
71 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
73 pr_err("Unable to allocate struct iblock_dev\n");
77 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
82 static int iblock_configure_device(struct se_device
*dev
)
84 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
85 struct request_queue
*q
;
86 struct block_device
*bd
= NULL
;
87 struct blk_integrity
*bi
;
91 if (!(ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
92 pr_err("Missing udev_path= parameters for IBLOCK\n");
96 ib_dev
->ibd_bio_set
= bioset_create(IBLOCK_BIO_POOL_SIZE
, 0);
97 if (!ib_dev
->ibd_bio_set
) {
98 pr_err("IBLOCK: Unable to create bioset\n");
102 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
103 ib_dev
->ibd_udev_path
);
105 mode
= FMODE_READ
|FMODE_EXCL
;
106 if (!ib_dev
->ibd_readonly
)
109 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
, mode
, ib_dev
);
112 goto out_free_bioset
;
116 q
= bdev_get_queue(bd
);
118 dev
->dev_attrib
.hw_block_size
= bdev_logical_block_size(bd
);
119 dev
->dev_attrib
.hw_max_sectors
= queue_max_hw_sectors(q
);
120 dev
->dev_attrib
.hw_queue_depth
= q
->nr_requests
;
123 * Check if the underlying struct block_device request_queue supports
124 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
125 * in ATA and we need to set TPE=1
127 if (blk_queue_discard(q
)) {
128 dev
->dev_attrib
.max_unmap_lba_count
=
129 q
->limits
.max_discard_sectors
;
132 * Currently hardcoded to 1 in Linux/SCSI code..
134 dev
->dev_attrib
.max_unmap_block_desc_count
= 1;
135 dev
->dev_attrib
.unmap_granularity
=
136 q
->limits
.discard_granularity
>> 9;
137 dev
->dev_attrib
.unmap_granularity_alignment
=
138 q
->limits
.discard_alignment
;
140 pr_debug("IBLOCK: BLOCK Discard support available,"
141 " disabled by default\n");
144 * Enable write same emulation for IBLOCK and use 0xFFFF as
145 * the smaller WRITE_SAME(10) only has a two-byte block count.
147 dev
->dev_attrib
.max_write_same_len
= 0xFFFF;
149 if (blk_queue_nonrot(q
))
150 dev
->dev_attrib
.is_nonrot
= 1;
152 bi
= bdev_get_integrity(bd
);
154 struct bio_set
*bs
= ib_dev
->ibd_bio_set
;
156 if (!strcmp(bi
->name
, "T10-DIF-TYPE3-IP") ||
157 !strcmp(bi
->name
, "T10-DIF-TYPE1-IP")) {
158 pr_err("IBLOCK export of blk_integrity: %s not"
159 " supported\n", bi
->name
);
164 if (!strcmp(bi
->name
, "T10-DIF-TYPE3-CRC")) {
165 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE3_PROT
;
166 } else if (!strcmp(bi
->name
, "T10-DIF-TYPE1-CRC")) {
167 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE1_PROT
;
170 if (dev
->dev_attrib
.pi_prot_type
) {
171 if (bioset_integrity_create(bs
, IBLOCK_BIO_POOL_SIZE
) < 0) {
172 pr_err("Unable to allocate bioset for PI\n");
176 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
177 bs
->bio_integrity_pool
);
179 dev
->dev_attrib
.hw_pi_prot_type
= dev
->dev_attrib
.pi_prot_type
;
185 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
187 bioset_free(ib_dev
->ibd_bio_set
);
188 ib_dev
->ibd_bio_set
= NULL
;
193 static void iblock_dev_call_rcu(struct rcu_head
*p
)
195 struct se_device
*dev
= container_of(p
, struct se_device
, rcu_head
);
196 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
201 static void iblock_free_device(struct se_device
*dev
)
203 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
205 if (ib_dev
->ibd_bd
!= NULL
)
206 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
207 if (ib_dev
->ibd_bio_set
!= NULL
)
208 bioset_free(ib_dev
->ibd_bio_set
);
210 call_rcu(&dev
->rcu_head
, iblock_dev_call_rcu
);
213 static unsigned long long iblock_emulate_read_cap_with_block_size(
214 struct se_device
*dev
,
215 struct block_device
*bd
,
216 struct request_queue
*q
)
218 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
219 bdev_logical_block_size(bd
)) - 1);
220 u32 block_size
= bdev_logical_block_size(bd
);
222 if (block_size
== dev
->dev_attrib
.block_size
)
225 switch (block_size
) {
227 switch (dev
->dev_attrib
.block_size
) {
241 switch (dev
->dev_attrib
.block_size
) {
256 switch (dev
->dev_attrib
.block_size
) {
271 switch (dev
->dev_attrib
.block_size
) {
292 static void iblock_complete_cmd(struct se_cmd
*cmd
)
294 struct iblock_req
*ibr
= cmd
->priv
;
297 if (!atomic_dec_and_test(&ibr
->pending
))
300 if (atomic_read(&ibr
->ib_bio_err_cnt
))
301 status
= SAM_STAT_CHECK_CONDITION
;
303 status
= SAM_STAT_GOOD
;
305 target_complete_cmd(cmd
, status
);
309 static void iblock_bio_done(struct bio
*bio
)
311 struct se_cmd
*cmd
= bio
->bi_private
;
312 struct iblock_req
*ibr
= cmd
->priv
;
315 pr_err("bio error: %p, err: %d\n", bio
, bio
->bi_error
);
317 * Bump the ib_bio_err_cnt and release bio.
319 atomic_inc(&ibr
->ib_bio_err_cnt
);
320 smp_mb__after_atomic();
325 iblock_complete_cmd(cmd
);
329 iblock_get_bio(struct se_cmd
*cmd
, sector_t lba
, u32 sg_num
)
331 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
335 * Only allocate as many vector entries as the bio code allows us to,
336 * we'll loop later on until we have handled the whole request.
338 if (sg_num
> BIO_MAX_PAGES
)
339 sg_num
= BIO_MAX_PAGES
;
341 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
343 pr_err("Unable to allocate memory for bio\n");
347 bio
->bi_bdev
= ib_dev
->ibd_bd
;
348 bio
->bi_private
= cmd
;
349 bio
->bi_end_io
= &iblock_bio_done
;
350 bio
->bi_iter
.bi_sector
= lba
;
355 static void iblock_submit_bios(struct bio_list
*list
, int rw
)
357 struct blk_plug plug
;
360 blk_start_plug(&plug
);
361 while ((bio
= bio_list_pop(list
)))
363 blk_finish_plug(&plug
);
366 static void iblock_end_io_flush(struct bio
*bio
)
368 struct se_cmd
*cmd
= bio
->bi_private
;
371 pr_err("IBLOCK: cache flush failed: %d\n", bio
->bi_error
);
375 target_complete_cmd(cmd
, SAM_STAT_CHECK_CONDITION
);
377 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
384 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
385 * always flush the whole cache.
387 static sense_reason_t
388 iblock_execute_sync_cache(struct se_cmd
*cmd
)
390 struct iblock_dev
*ib_dev
= IBLOCK_DEV(cmd
->se_dev
);
391 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
395 * If the Immediate bit is set, queue up the GOOD response
396 * for this SYNCHRONIZE_CACHE op.
399 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
401 bio
= bio_alloc(GFP_KERNEL
, 0);
402 bio
->bi_end_io
= iblock_end_io_flush
;
403 bio
->bi_bdev
= ib_dev
->ibd_bd
;
405 bio
->bi_private
= cmd
;
406 submit_bio(WRITE_FLUSH
, bio
);
410 static sense_reason_t
411 iblock_execute_unmap(struct se_cmd
*cmd
, sector_t lba
, sector_t nolb
)
413 struct block_device
*bdev
= IBLOCK_DEV(cmd
->se_dev
)->ibd_bd
;
416 ret
= blkdev_issue_discard(bdev
, lba
, nolb
, GFP_KERNEL
, 0);
418 pr_err("blkdev_issue_discard() failed: %d\n", ret
);
419 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
425 static sense_reason_t
426 iblock_execute_write_same(struct se_cmd
*cmd
)
428 struct iblock_req
*ibr
;
429 struct scatterlist
*sg
;
431 struct bio_list list
;
432 sector_t block_lba
= cmd
->t_task_lba
;
433 sector_t sectors
= sbc_get_write_same_sectors(cmd
);
436 pr_err("WRITE_SAME: Protection information with IBLOCK"
437 " backends not supported\n");
438 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
440 sg
= &cmd
->t_data_sg
[0];
442 if (cmd
->t_data_nents
> 1 ||
443 sg
->length
!= cmd
->se_dev
->dev_attrib
.block_size
) {
444 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
445 " block_size: %u\n", cmd
->t_data_nents
, sg
->length
,
446 cmd
->se_dev
->dev_attrib
.block_size
);
447 return TCM_INVALID_CDB_FIELD
;
450 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
455 bio
= iblock_get_bio(cmd
, block_lba
, 1);
459 bio_list_init(&list
);
460 bio_list_add(&list
, bio
);
462 atomic_set(&ibr
->pending
, 1);
465 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
468 bio
= iblock_get_bio(cmd
, block_lba
, 1);
472 atomic_inc(&ibr
->pending
);
473 bio_list_add(&list
, bio
);
476 /* Always in 512 byte units for Linux/Block */
477 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
481 iblock_submit_bios(&list
, WRITE
);
485 while ((bio
= bio_list_pop(&list
)))
490 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
494 Opt_udev_path
, Opt_readonly
, Opt_force
, Opt_err
497 static match_table_t tokens
= {
498 {Opt_udev_path
, "udev_path=%s"},
499 {Opt_readonly
, "readonly=%d"},
500 {Opt_force
, "force=%d"},
504 static ssize_t
iblock_set_configfs_dev_params(struct se_device
*dev
,
505 const char *page
, ssize_t count
)
507 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
508 char *orig
, *ptr
, *arg_p
, *opts
;
509 substring_t args
[MAX_OPT_ARGS
];
511 unsigned long tmp_readonly
;
513 opts
= kstrdup(page
, GFP_KERNEL
);
519 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
523 token
= match_token(ptr
, tokens
, args
);
526 if (ib_dev
->ibd_bd
) {
527 pr_err("Unable to set udev_path= while"
528 " ib_dev->ibd_bd exists\n");
532 if (match_strlcpy(ib_dev
->ibd_udev_path
, &args
[0],
533 SE_UDEV_PATH_LEN
) == 0) {
537 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
538 ib_dev
->ibd_udev_path
);
539 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
542 arg_p
= match_strdup(&args
[0]);
547 ret
= kstrtoul(arg_p
, 0, &tmp_readonly
);
550 pr_err("kstrtoul() failed for"
554 ib_dev
->ibd_readonly
= tmp_readonly
;
555 pr_debug("IBLOCK: readonly: %d\n", ib_dev
->ibd_readonly
);
566 return (!ret
) ? count
: ret
;
569 static ssize_t
iblock_show_configfs_dev_params(struct se_device
*dev
, char *b
)
571 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
572 struct block_device
*bd
= ib_dev
->ibd_bd
;
573 char buf
[BDEVNAME_SIZE
];
577 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
579 if (ib_dev
->ibd_flags
& IBDF_HAS_UDEV_PATH
)
580 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s",
581 ib_dev
->ibd_udev_path
);
582 bl
+= sprintf(b
+ bl
, " readonly: %d\n", ib_dev
->ibd_readonly
);
584 bl
+= sprintf(b
+ bl
, " ");
586 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
587 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
588 "" : (bd
->bd_holder
== ib_dev
) ?
589 "CLAIMED: IBLOCK" : "CLAIMED: OS");
591 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
598 iblock_alloc_bip(struct se_cmd
*cmd
, struct bio
*bio
)
600 struct se_device
*dev
= cmd
->se_dev
;
601 struct blk_integrity
*bi
;
602 struct bio_integrity_payload
*bip
;
603 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
604 struct scatterlist
*sg
;
607 bi
= bdev_get_integrity(ib_dev
->ibd_bd
);
609 pr_err("Unable to locate bio_integrity\n");
613 bip
= bio_integrity_alloc(bio
, GFP_NOIO
, cmd
->t_prot_nents
);
615 pr_err("Unable to allocate bio_integrity_payload\n");
619 bip
->bip_iter
.bi_size
= (cmd
->data_length
/ dev
->dev_attrib
.block_size
) *
621 bip
->bip_iter
.bi_sector
= bio
->bi_iter
.bi_sector
;
623 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip
->bip_iter
.bi_size
,
624 (unsigned long long)bip
->bip_iter
.bi_sector
);
626 for_each_sg(cmd
->t_prot_sg
, sg
, cmd
->t_prot_nents
, i
) {
628 rc
= bio_integrity_add_page(bio
, sg_page(sg
), sg
->length
,
630 if (rc
!= sg
->length
) {
631 pr_err("bio_integrity_add_page() failed; %d\n", rc
);
635 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
636 sg_page(sg
), sg
->length
, sg
->offset
);
642 static sense_reason_t
643 iblock_execute_rw(struct se_cmd
*cmd
, struct scatterlist
*sgl
, u32 sgl_nents
,
644 enum dma_data_direction data_direction
)
646 struct se_device
*dev
= cmd
->se_dev
;
647 struct iblock_req
*ibr
;
648 struct bio
*bio
, *bio_start
;
649 struct bio_list list
;
650 struct scatterlist
*sg
;
651 u32 sg_num
= sgl_nents
;
657 if (data_direction
== DMA_TO_DEVICE
) {
658 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
659 struct request_queue
*q
= bdev_get_queue(ib_dev
->ibd_bd
);
661 * Force writethrough using WRITE_FUA if a volatile write cache
662 * is not enabled, or if initiator set the Force Unit Access bit.
664 if (q
->flush_flags
& REQ_FUA
) {
665 if (cmd
->se_cmd_flags
& SCF_FUA
)
667 else if (!(q
->flush_flags
& REQ_FLUSH
))
679 * Convert the blocksize advertised to the initiator to the 512 byte
680 * units unconditionally used by the Linux block layer.
682 if (dev
->dev_attrib
.block_size
== 4096)
683 block_lba
= (cmd
->t_task_lba
<< 3);
684 else if (dev
->dev_attrib
.block_size
== 2048)
685 block_lba
= (cmd
->t_task_lba
<< 2);
686 else if (dev
->dev_attrib
.block_size
== 1024)
687 block_lba
= (cmd
->t_task_lba
<< 1);
688 else if (dev
->dev_attrib
.block_size
== 512)
689 block_lba
= cmd
->t_task_lba
;
691 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
692 " %u\n", dev
->dev_attrib
.block_size
);
693 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
696 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
702 atomic_set(&ibr
->pending
, 1);
703 iblock_complete_cmd(cmd
);
707 bio
= iblock_get_bio(cmd
, block_lba
, sgl_nents
);
712 bio_list_init(&list
);
713 bio_list_add(&list
, bio
);
715 atomic_set(&ibr
->pending
, 2);
718 for_each_sg(sgl
, sg
, sgl_nents
, i
) {
720 * XXX: if the length the device accepts is shorter than the
721 * length of the S/G list entry this will cause and
722 * endless loop. Better hope no driver uses huge pages.
724 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
726 if (bio_cnt
>= IBLOCK_MAX_BIO_PER_TASK
) {
727 iblock_submit_bios(&list
, rw
);
731 bio
= iblock_get_bio(cmd
, block_lba
, sg_num
);
735 atomic_inc(&ibr
->pending
);
736 bio_list_add(&list
, bio
);
740 /* Always in 512 byte units for Linux/Block */
741 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
745 if (cmd
->prot_type
&& dev
->dev_attrib
.pi_prot_type
) {
746 int rc
= iblock_alloc_bip(cmd
, bio_start
);
751 iblock_submit_bios(&list
, rw
);
752 iblock_complete_cmd(cmd
);
756 while ((bio
= bio_list_pop(&list
)))
761 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
764 static sector_t
iblock_get_blocks(struct se_device
*dev
)
766 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
767 struct block_device
*bd
= ib_dev
->ibd_bd
;
768 struct request_queue
*q
= bdev_get_queue(bd
);
770 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
773 static sector_t
iblock_get_alignment_offset_lbas(struct se_device
*dev
)
775 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
776 struct block_device
*bd
= ib_dev
->ibd_bd
;
779 ret
= bdev_alignment_offset(bd
);
783 /* convert offset-bytes to offset-lbas */
784 return ret
/ bdev_logical_block_size(bd
);
787 static unsigned int iblock_get_lbppbe(struct se_device
*dev
)
789 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
790 struct block_device
*bd
= ib_dev
->ibd_bd
;
791 int logs_per_phys
= bdev_physical_block_size(bd
) / bdev_logical_block_size(bd
);
793 return ilog2(logs_per_phys
);
796 static unsigned int iblock_get_io_min(struct se_device
*dev
)
798 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
799 struct block_device
*bd
= ib_dev
->ibd_bd
;
801 return bdev_io_min(bd
);
804 static unsigned int iblock_get_io_opt(struct se_device
*dev
)
806 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
807 struct block_device
*bd
= ib_dev
->ibd_bd
;
809 return bdev_io_opt(bd
);
812 static struct sbc_ops iblock_sbc_ops
= {
813 .execute_rw
= iblock_execute_rw
,
814 .execute_sync_cache
= iblock_execute_sync_cache
,
815 .execute_write_same
= iblock_execute_write_same
,
816 .execute_unmap
= iblock_execute_unmap
,
819 static sense_reason_t
820 iblock_parse_cdb(struct se_cmd
*cmd
)
822 return sbc_parse_cdb(cmd
, &iblock_sbc_ops
);
825 static bool iblock_get_write_cache(struct se_device
*dev
)
827 struct iblock_dev
*ib_dev
= IBLOCK_DEV(dev
);
828 struct block_device
*bd
= ib_dev
->ibd_bd
;
829 struct request_queue
*q
= bdev_get_queue(bd
);
831 return q
->flush_flags
& REQ_FLUSH
;
834 static const struct target_backend_ops iblock_ops
= {
836 .inquiry_prod
= "IBLOCK",
837 .inquiry_rev
= IBLOCK_VERSION
,
838 .owner
= THIS_MODULE
,
839 .attach_hba
= iblock_attach_hba
,
840 .detach_hba
= iblock_detach_hba
,
841 .alloc_device
= iblock_alloc_device
,
842 .configure_device
= iblock_configure_device
,
843 .free_device
= iblock_free_device
,
844 .parse_cdb
= iblock_parse_cdb
,
845 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
846 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
847 .get_device_type
= sbc_get_device_type
,
848 .get_blocks
= iblock_get_blocks
,
849 .get_alignment_offset_lbas
= iblock_get_alignment_offset_lbas
,
850 .get_lbppbe
= iblock_get_lbppbe
,
851 .get_io_min
= iblock_get_io_min
,
852 .get_io_opt
= iblock_get_io_opt
,
853 .get_write_cache
= iblock_get_write_cache
,
854 .tb_dev_attrib_attrs
= sbc_attrib_attrs
,
857 static int __init
iblock_module_init(void)
859 return transport_backend_register(&iblock_ops
);
862 static void __exit
iblock_module_exit(void)
864 target_backend_unregister(&iblock_ops
);
867 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
868 MODULE_AUTHOR("nab@Linux-iSCSI.org");
869 MODULE_LICENSE("GPL");
871 module_init(iblock_module_init
);
872 module_exit(iblock_module_exit
);