1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_iblock.c
5 * This file contains the Storage Engine <-> Linux BlockIO transport
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/bio.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <scsi/scsi_proto.h>
27 #include <asm/unaligned.h>
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
32 #include "target_core_iblock.h"
34 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
35 #define IBLOCK_BIO_POOL_SIZE 128
37 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
39 return container_of(dev, struct iblock_dev, dev);
43 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
45 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
46 " Generic Target Core Stack %s\n", hba->hba_id,
47 IBLOCK_VERSION, TARGET_CORE_VERSION);
51 static void iblock_detach_hba(struct se_hba *hba)
55 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
57 struct iblock_dev *ib_dev = NULL;
59 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
61 pr_err("Unable to allocate struct iblock_dev\n");
65 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
67 if (!ib_dev->ibd_plug)
70 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
79 static int iblock_configure_device(struct se_device *dev)
81 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
82 struct request_queue *q;
83 struct block_device *bd = NULL;
84 struct blk_integrity *bi;
86 unsigned int max_write_zeroes_sectors;
89 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
90 pr_err("Missing udev_path= parameters for IBLOCK\n");
94 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
96 pr_err("IBLOCK: Unable to create bioset\n");
100 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
101 ib_dev->ibd_udev_path);
103 mode = FMODE_READ|FMODE_EXCL;
104 if (!ib_dev->ibd_readonly)
107 dev->dev_flags |= DF_READ_ONLY;
109 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
112 goto out_free_bioset;
116 q = bdev_get_queue(bd);
118 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
119 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
120 dev->dev_attrib.hw_queue_depth = q->nr_requests;
122 if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
123 pr_debug("IBLOCK: BLOCK Discard support available,"
124 " disabled by default\n");
127 * Enable write same emulation for IBLOCK and use 0xFFFF as
128 * the smaller WRITE_SAME(10) only has a two-byte block count.
130 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
131 if (max_write_zeroes_sectors)
132 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
134 dev->dev_attrib.max_write_same_len = 0xFFFF;
137 dev->dev_attrib.is_nonrot = 1;
139 bi = bdev_get_integrity(bd);
141 struct bio_set *bs = &ib_dev->ibd_bio_set;
143 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
144 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
145 pr_err("IBLOCK export of blk_integrity: %s not"
146 " supported\n", bi->profile->name);
151 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
152 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
153 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
154 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
157 if (dev->dev_attrib.pi_prot_type) {
158 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
159 pr_err("Unable to allocate bioset for PI\n");
163 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
164 &bs->bio_integrity_pool);
166 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
172 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
174 bioset_exit(&ib_dev->ibd_bio_set);
179 static void iblock_dev_call_rcu(struct rcu_head *p)
181 struct se_device *dev = container_of(p, struct se_device, rcu_head);
182 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
184 kfree(ib_dev->ibd_plug);
188 static void iblock_free_device(struct se_device *dev)
190 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
193 static void iblock_destroy_device(struct se_device *dev)
195 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
197 if (ib_dev->ibd_bd != NULL)
198 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
199 bioset_exit(&ib_dev->ibd_bio_set);
202 static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
204 struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
205 struct iblock_dev_plug *ib_dev_plug;
208 * Each se_device has a per cpu work this can be run from. We
209 * shouldn't have multiple threads on the same cpu calling this
212 ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
213 if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
216 blk_start_plug(&ib_dev_plug->blk_plug);
217 return &ib_dev_plug->se_plug;
220 static void iblock_unplug_device(struct se_dev_plug *se_plug)
222 struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
223 struct iblock_dev_plug, se_plug);
225 blk_finish_plug(&ib_dev_plug->blk_plug);
226 clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
229 static unsigned long long iblock_emulate_read_cap_with_block_size(
230 struct se_device *dev,
231 struct block_device *bd,
232 struct request_queue *q)
234 u32 block_size = bdev_logical_block_size(bd);
235 unsigned long long blocks_long =
236 div_u64(bdev_nr_bytes(bd), block_size) - 1;
238 if (block_size == dev->dev_attrib.block_size)
241 switch (block_size) {
243 switch (dev->dev_attrib.block_size) {
258 switch (dev->dev_attrib.block_size) {
273 switch (dev->dev_attrib.block_size) {
288 switch (dev->dev_attrib.block_size) {
309 static void iblock_complete_cmd(struct se_cmd *cmd)
311 struct iblock_req *ibr = cmd->priv;
314 if (!refcount_dec_and_test(&ibr->pending))
317 if (atomic_read(&ibr->ib_bio_err_cnt))
318 status = SAM_STAT_CHECK_CONDITION;
320 status = SAM_STAT_GOOD;
322 target_complete_cmd(cmd, status);
326 static void iblock_bio_done(struct bio *bio)
328 struct se_cmd *cmd = bio->bi_private;
329 struct iblock_req *ibr = cmd->priv;
331 if (bio->bi_status) {
332 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
334 * Bump the ib_bio_err_cnt and release bio.
336 atomic_inc(&ibr->ib_bio_err_cnt);
337 smp_mb__after_atomic();
342 iblock_complete_cmd(cmd);
345 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
348 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
352 * Only allocate as many vector entries as the bio code allows us to,
353 * we'll loop later on until we have handled the whole request.
355 bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
356 GFP_NOIO, &ib_dev->ibd_bio_set);
358 pr_err("Unable to allocate memory for bio\n");
362 bio->bi_private = cmd;
363 bio->bi_end_io = &iblock_bio_done;
364 bio->bi_iter.bi_sector = lba;
369 static void iblock_submit_bios(struct bio_list *list)
371 struct blk_plug plug;
374 * The block layer handles nested plugs, so just plug/unplug to handle
375 * fabric drivers that didn't support batching and multi bio cmds.
377 blk_start_plug(&plug);
378 while ((bio = bio_list_pop(list)))
380 blk_finish_plug(&plug);
383 static void iblock_end_io_flush(struct bio *bio)
385 struct se_cmd *cmd = bio->bi_private;
388 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
392 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
394 target_complete_cmd(cmd, SAM_STAT_GOOD);
401 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
402 * always flush the whole cache.
404 static sense_reason_t
405 iblock_execute_sync_cache(struct se_cmd *cmd)
407 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
408 int immed = (cmd->t_task_cdb[1] & 0x2);
412 * If the Immediate bit is set, queue up the GOOD response
413 * for this SYNCHRONIZE_CACHE op.
416 target_complete_cmd(cmd, SAM_STAT_GOOD);
418 bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
420 bio->bi_end_io = iblock_end_io_flush;
422 bio->bi_private = cmd;
427 static sense_reason_t
428 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
430 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
431 struct se_device *dev = cmd->se_dev;
434 ret = blkdev_issue_discard(bdev,
435 target_to_linux_sector(dev, lba),
436 target_to_linux_sector(dev, nolb),
439 pr_err("blkdev_issue_discard() failed: %d\n", ret);
440 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
446 static sense_reason_t
447 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
449 struct se_device *dev = cmd->se_dev;
450 struct scatterlist *sg = &cmd->t_data_sg[0];
451 unsigned char *buf, *not_zero;
454 buf = kmap(sg_page(sg)) + sg->offset;
456 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
458 * Fall back to block_execute_write_same() slow-path if
459 * incoming WRITE_SAME payload does not contain zeros.
461 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
465 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
467 ret = blkdev_issue_zeroout(bdev,
468 target_to_linux_sector(dev, cmd->t_task_lba),
469 target_to_linux_sector(dev,
470 sbc_get_write_same_sectors(cmd)),
471 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
473 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
475 target_complete_cmd(cmd, SAM_STAT_GOOD);
479 static sense_reason_t
480 iblock_execute_write_same(struct se_cmd *cmd)
482 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
483 struct iblock_req *ibr;
484 struct scatterlist *sg;
486 struct bio_list list;
487 struct se_device *dev = cmd->se_dev;
488 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
489 sector_t sectors = target_to_linux_sector(dev,
490 sbc_get_write_same_sectors(cmd));
493 pr_err("WRITE_SAME: Protection information with IBLOCK"
494 " backends not supported\n");
495 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498 if (!cmd->t_data_nents)
499 return TCM_INVALID_CDB_FIELD;
501 sg = &cmd->t_data_sg[0];
503 if (cmd->t_data_nents > 1 ||
504 sg->length != cmd->se_dev->dev_attrib.block_size) {
505 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
506 " block_size: %u\n", cmd->t_data_nents, sg->length,
507 cmd->se_dev->dev_attrib.block_size);
508 return TCM_INVALID_CDB_FIELD;
511 if (bdev_write_zeroes_sectors(bdev)) {
512 if (!iblock_execute_zero_out(bdev, cmd))
516 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
521 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
525 bio_list_init(&list);
526 bio_list_add(&list, bio);
528 refcount_set(&ibr->pending, 1);
531 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
534 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
538 refcount_inc(&ibr->pending);
539 bio_list_add(&list, bio);
542 /* Always in 512 byte units for Linux/Block */
543 block_lba += sg->length >> SECTOR_SHIFT;
544 sectors -= sg->length >> SECTOR_SHIFT;
547 iblock_submit_bios(&list);
551 while ((bio = bio_list_pop(&list)))
556 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
560 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
563 static match_table_t tokens = {
564 {Opt_udev_path, "udev_path=%s"},
565 {Opt_readonly, "readonly=%d"},
566 {Opt_force, "force=%d"},
570 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
571 const char *page, ssize_t count)
573 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
574 char *orig, *ptr, *arg_p, *opts;
575 substring_t args[MAX_OPT_ARGS];
577 unsigned long tmp_readonly;
579 opts = kstrdup(page, GFP_KERNEL);
585 while ((ptr = strsep(&opts, ",\n")) != NULL) {
589 token = match_token(ptr, tokens, args);
592 if (ib_dev->ibd_bd) {
593 pr_err("Unable to set udev_path= while"
594 " ib_dev->ibd_bd exists\n");
598 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
599 SE_UDEV_PATH_LEN) == 0) {
603 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
604 ib_dev->ibd_udev_path);
605 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
608 arg_p = match_strdup(&args[0]);
613 ret = kstrtoul(arg_p, 0, &tmp_readonly);
616 pr_err("kstrtoul() failed for"
620 ib_dev->ibd_readonly = tmp_readonly;
621 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
632 return (!ret) ? count : ret;
635 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
637 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
638 struct block_device *bd = ib_dev->ibd_bd;
642 bl += sprintf(b + bl, "iBlock device: %pg", bd);
643 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
644 bl += sprintf(b + bl, " UDEV PATH: %s",
645 ib_dev->ibd_udev_path);
646 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
648 bl += sprintf(b + bl, " ");
650 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
651 MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
654 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
661 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
662 struct sg_mapping_iter *miter)
664 struct se_device *dev = cmd->se_dev;
665 struct blk_integrity *bi;
666 struct bio_integrity_payload *bip;
667 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
671 bi = bdev_get_integrity(ib_dev->ibd_bd);
673 pr_err("Unable to locate bio_integrity\n");
677 bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
679 pr_err("Unable to allocate bio_integrity_payload\n");
683 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
684 /* virtual start sector must be in integrity interval units */
685 bip_set_seed(bip, bio->bi_iter.bi_sector >>
686 (bi->interval_exp - SECTOR_SHIFT));
688 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
689 (unsigned long long)bip->bip_iter.bi_sector);
691 resid = bip->bip_iter.bi_size;
692 while (resid > 0 && sg_miter_next(miter)) {
694 len = min_t(size_t, miter->length, resid);
695 rc = bio_integrity_add_page(bio, miter->page, len,
696 offset_in_page(miter->addr));
698 pr_err("bio_integrity_add_page() failed; %d\n", rc);
699 sg_miter_stop(miter);
703 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
704 miter->page, len, offset_in_page(miter->addr));
707 if (len < miter->length)
708 miter->consumed -= miter->length - len;
710 sg_miter_stop(miter);
715 static sense_reason_t
716 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
717 enum dma_data_direction data_direction)
719 struct se_device *dev = cmd->se_dev;
720 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
721 struct iblock_req *ibr;
723 struct bio_list list;
724 struct scatterlist *sg;
725 u32 sg_num = sgl_nents;
729 struct sg_mapping_iter prot_miter;
730 unsigned int miter_dir;
732 if (data_direction == DMA_TO_DEVICE) {
733 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
735 * Force writethrough using REQ_FUA if a volatile write cache
736 * is not enabled, or if initiator set the Force Unit Access bit.
739 miter_dir = SG_MITER_TO_SG;
740 if (bdev_fua(ib_dev->ibd_bd)) {
741 if (cmd->se_cmd_flags & SCF_FUA)
743 else if (!bdev_write_cache(ib_dev->ibd_bd))
748 miter_dir = SG_MITER_FROM_SG;
751 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
757 refcount_set(&ibr->pending, 1);
758 iblock_complete_cmd(cmd);
762 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
766 bio_list_init(&list);
767 bio_list_add(&list, bio);
769 refcount_set(&ibr->pending, 2);
772 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
773 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
776 for_each_sg(sgl, sg, sgl_nents, i) {
778 * XXX: if the length the device accepts is shorter than the
779 * length of the S/G list entry this will cause and
780 * endless loop. Better hope no driver uses huge pages.
782 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
784 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
785 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
790 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
791 iblock_submit_bios(&list);
795 bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
799 refcount_inc(&ibr->pending);
800 bio_list_add(&list, bio);
804 /* Always in 512 byte units for Linux/Block */
805 block_lba += sg->length >> SECTOR_SHIFT;
809 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
810 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
815 iblock_submit_bios(&list);
816 iblock_complete_cmd(cmd);
820 while ((bio = bio_list_pop(&list)))
825 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
828 static sector_t iblock_get_blocks(struct se_device *dev)
830 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
831 struct block_device *bd = ib_dev->ibd_bd;
832 struct request_queue *q = bdev_get_queue(bd);
834 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
837 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
839 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
840 struct block_device *bd = ib_dev->ibd_bd;
843 ret = bdev_alignment_offset(bd);
847 /* convert offset-bytes to offset-lbas */
848 return ret / bdev_logical_block_size(bd);
851 static unsigned int iblock_get_lbppbe(struct se_device *dev)
853 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
854 struct block_device *bd = ib_dev->ibd_bd;
855 unsigned int logs_per_phys =
856 bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
858 return ilog2(logs_per_phys);
861 static unsigned int iblock_get_io_min(struct se_device *dev)
863 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
864 struct block_device *bd = ib_dev->ibd_bd;
866 return bdev_io_min(bd);
869 static unsigned int iblock_get_io_opt(struct se_device *dev)
871 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
872 struct block_device *bd = ib_dev->ibd_bd;
874 return bdev_io_opt(bd);
877 static struct sbc_ops iblock_sbc_ops = {
878 .execute_rw = iblock_execute_rw,
879 .execute_sync_cache = iblock_execute_sync_cache,
880 .execute_write_same = iblock_execute_write_same,
881 .execute_unmap = iblock_execute_unmap,
884 static sense_reason_t
885 iblock_parse_cdb(struct se_cmd *cmd)
887 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
890 static bool iblock_get_write_cache(struct se_device *dev)
892 return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
895 static const struct target_backend_ops iblock_ops = {
897 .inquiry_prod = "IBLOCK",
898 .inquiry_rev = IBLOCK_VERSION,
899 .owner = THIS_MODULE,
900 .attach_hba = iblock_attach_hba,
901 .detach_hba = iblock_detach_hba,
902 .alloc_device = iblock_alloc_device,
903 .configure_device = iblock_configure_device,
904 .destroy_device = iblock_destroy_device,
905 .free_device = iblock_free_device,
906 .plug_device = iblock_plug_device,
907 .unplug_device = iblock_unplug_device,
908 .parse_cdb = iblock_parse_cdb,
909 .set_configfs_dev_params = iblock_set_configfs_dev_params,
910 .show_configfs_dev_params = iblock_show_configfs_dev_params,
911 .get_device_type = sbc_get_device_type,
912 .get_blocks = iblock_get_blocks,
913 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
914 .get_lbppbe = iblock_get_lbppbe,
915 .get_io_min = iblock_get_io_min,
916 .get_io_opt = iblock_get_io_opt,
917 .get_write_cache = iblock_get_write_cache,
918 .tb_dev_attrib_attrs = sbc_attrib_attrs,
921 static int __init iblock_module_init(void)
923 return transport_backend_register(&iblock_ops);
926 static void __exit iblock_module_exit(void)
928 target_backend_unregister(&iblock_ops);
931 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
932 MODULE_AUTHOR("nab@Linux-iSCSI.org");
933 MODULE_LICENSE("GPL");
935 module_init(iblock_module_init);
936 module_exit(iblock_module_exit);