Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / target / target_core_iblock.c
1 /*******************************************************************************
2 * Filename: target_core_iblock.c
3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions.
6 *
7 * (c) Copyright 2003-2012 RisingTide Systems LLC.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/fs.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/bio.h>
35 #include <linux/genhd.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <asm/unaligned.h>
41
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44
45 #include "target_core_iblock.h"
46
47 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
48 #define IBLOCK_BIO_POOL_SIZE 128
49
50 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
51 {
52 return container_of(dev, struct iblock_dev, dev);
53 }
54
55
56 static struct se_subsystem_api iblock_template;
57
58 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
59 *
60 *
61 */
62 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
63 {
64 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba->hba_id,
66 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
67 return 0;
68 }
69
70 static void iblock_detach_hba(struct se_hba *hba)
71 {
72 }
73
74 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
75 {
76 struct iblock_dev *ib_dev = NULL;
77
78 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
79 if (!ib_dev) {
80 pr_err("Unable to allocate struct iblock_dev\n");
81 return NULL;
82 }
83
84 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
85
86 return &ib_dev->dev;
87 }
88
89 static int iblock_configure_device(struct se_device *dev)
90 {
91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92 struct request_queue *q;
93 struct block_device *bd = NULL;
94 fmode_t mode;
95 int ret = -ENOMEM;
96
97 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
98 pr_err("Missing udev_path= parameters for IBLOCK\n");
99 return -EINVAL;
100 }
101
102 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
103 if (!ib_dev->ibd_bio_set) {
104 pr_err("IBLOCK: Unable to create bioset\n");
105 goto out;
106 }
107
108 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
109 ib_dev->ibd_udev_path);
110
111 mode = FMODE_READ|FMODE_EXCL;
112 if (!ib_dev->ibd_readonly)
113 mode |= FMODE_WRITE;
114
115 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
116 if (IS_ERR(bd)) {
117 ret = PTR_ERR(bd);
118 goto out_free_bioset;
119 }
120 ib_dev->ibd_bd = bd;
121
122 q = bdev_get_queue(bd);
123
124 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
125 dev->dev_attrib.hw_max_sectors = UINT_MAX;
126 dev->dev_attrib.hw_queue_depth = q->nr_requests;
127
128 /*
129 * Check if the underlying struct block_device request_queue supports
130 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
131 * in ATA and we need to set TPE=1
132 */
133 if (blk_queue_discard(q)) {
134 dev->dev_attrib.max_unmap_lba_count =
135 q->limits.max_discard_sectors;
136
137 /*
138 * Currently hardcoded to 1 in Linux/SCSI code..
139 */
140 dev->dev_attrib.max_unmap_block_desc_count = 1;
141 dev->dev_attrib.unmap_granularity =
142 q->limits.discard_granularity >> 9;
143 dev->dev_attrib.unmap_granularity_alignment =
144 q->limits.discard_alignment;
145
146 pr_debug("IBLOCK: BLOCK Discard support available,"
147 " disabled by default\n");
148 }
149 /*
150 * Enable write same emulation for IBLOCK and use 0xFFFF as
151 * the smaller WRITE_SAME(10) only has a two-byte block count.
152 */
153 dev->dev_attrib.max_write_same_len = 0xFFFF;
154
155 if (blk_queue_nonrot(q))
156 dev->dev_attrib.is_nonrot = 1;
157
158 return 0;
159
160 out_free_bioset:
161 bioset_free(ib_dev->ibd_bio_set);
162 ib_dev->ibd_bio_set = NULL;
163 out:
164 return ret;
165 }
166
167 static void iblock_free_device(struct se_device *dev)
168 {
169 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
170
171 if (ib_dev->ibd_bd != NULL)
172 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
173 if (ib_dev->ibd_bio_set != NULL)
174 bioset_free(ib_dev->ibd_bio_set);
175 kfree(ib_dev);
176 }
177
178 static unsigned long long iblock_emulate_read_cap_with_block_size(
179 struct se_device *dev,
180 struct block_device *bd,
181 struct request_queue *q)
182 {
183 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
184 bdev_logical_block_size(bd)) - 1);
185 u32 block_size = bdev_logical_block_size(bd);
186
187 if (block_size == dev->dev_attrib.block_size)
188 return blocks_long;
189
190 switch (block_size) {
191 case 4096:
192 switch (dev->dev_attrib.block_size) {
193 case 2048:
194 blocks_long <<= 1;
195 break;
196 case 1024:
197 blocks_long <<= 2;
198 break;
199 case 512:
200 blocks_long <<= 3;
201 default:
202 break;
203 }
204 break;
205 case 2048:
206 switch (dev->dev_attrib.block_size) {
207 case 4096:
208 blocks_long >>= 1;
209 break;
210 case 1024:
211 blocks_long <<= 1;
212 break;
213 case 512:
214 blocks_long <<= 2;
215 break;
216 default:
217 break;
218 }
219 break;
220 case 1024:
221 switch (dev->dev_attrib.block_size) {
222 case 4096:
223 blocks_long >>= 2;
224 break;
225 case 2048:
226 blocks_long >>= 1;
227 break;
228 case 512:
229 blocks_long <<= 1;
230 break;
231 default:
232 break;
233 }
234 break;
235 case 512:
236 switch (dev->dev_attrib.block_size) {
237 case 4096:
238 blocks_long >>= 3;
239 break;
240 case 2048:
241 blocks_long >>= 2;
242 break;
243 case 1024:
244 blocks_long >>= 1;
245 break;
246 default:
247 break;
248 }
249 break;
250 default:
251 break;
252 }
253
254 return blocks_long;
255 }
256
257 static void iblock_complete_cmd(struct se_cmd *cmd)
258 {
259 struct iblock_req *ibr = cmd->priv;
260 u8 status;
261
262 if (!atomic_dec_and_test(&ibr->pending))
263 return;
264
265 if (atomic_read(&ibr->ib_bio_err_cnt))
266 status = SAM_STAT_CHECK_CONDITION;
267 else
268 status = SAM_STAT_GOOD;
269
270 target_complete_cmd(cmd, status);
271 kfree(ibr);
272 }
273
274 static void iblock_bio_done(struct bio *bio, int err)
275 {
276 struct se_cmd *cmd = bio->bi_private;
277 struct iblock_req *ibr = cmd->priv;
278
279 /*
280 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
281 */
282 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
283 err = -EIO;
284
285 if (err != 0) {
286 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
287 " err: %d\n", bio, err);
288 /*
289 * Bump the ib_bio_err_cnt and release bio.
290 */
291 atomic_inc(&ibr->ib_bio_err_cnt);
292 smp_mb__after_atomic_inc();
293 }
294
295 bio_put(bio);
296
297 iblock_complete_cmd(cmd);
298 }
299
300 static struct bio *
301 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
302 {
303 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
304 struct bio *bio;
305
306 /*
307 * Only allocate as many vector entries as the bio code allows us to,
308 * we'll loop later on until we have handled the whole request.
309 */
310 if (sg_num > BIO_MAX_PAGES)
311 sg_num = BIO_MAX_PAGES;
312
313 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
314 if (!bio) {
315 pr_err("Unable to allocate memory for bio\n");
316 return NULL;
317 }
318
319 bio->bi_bdev = ib_dev->ibd_bd;
320 bio->bi_private = cmd;
321 bio->bi_end_io = &iblock_bio_done;
322 bio->bi_sector = lba;
323
324 return bio;
325 }
326
327 static void iblock_submit_bios(struct bio_list *list, int rw)
328 {
329 struct blk_plug plug;
330 struct bio *bio;
331
332 blk_start_plug(&plug);
333 while ((bio = bio_list_pop(list)))
334 submit_bio(rw, bio);
335 blk_finish_plug(&plug);
336 }
337
338 static void iblock_end_io_flush(struct bio *bio, int err)
339 {
340 struct se_cmd *cmd = bio->bi_private;
341
342 if (err)
343 pr_err("IBLOCK: cache flush failed: %d\n", err);
344
345 if (cmd) {
346 if (err)
347 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
348 else
349 target_complete_cmd(cmd, SAM_STAT_GOOD);
350 }
351
352 bio_put(bio);
353 }
354
355 /*
356 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
357 * always flush the whole cache.
358 */
359 static sense_reason_t
360 iblock_execute_sync_cache(struct se_cmd *cmd)
361 {
362 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
363 int immed = (cmd->t_task_cdb[1] & 0x2);
364 struct bio *bio;
365
366 /*
367 * If the Immediate bit is set, queue up the GOOD response
368 * for this SYNCHRONIZE_CACHE op.
369 */
370 if (immed)
371 target_complete_cmd(cmd, SAM_STAT_GOOD);
372
373 bio = bio_alloc(GFP_KERNEL, 0);
374 bio->bi_end_io = iblock_end_io_flush;
375 bio->bi_bdev = ib_dev->ibd_bd;
376 if (!immed)
377 bio->bi_private = cmd;
378 submit_bio(WRITE_FLUSH, bio);
379 return 0;
380 }
381
382 static sense_reason_t
383 iblock_do_unmap(struct se_cmd *cmd, void *priv,
384 sector_t lba, sector_t nolb)
385 {
386 struct block_device *bdev = priv;
387 int ret;
388
389 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
390 if (ret < 0) {
391 pr_err("blkdev_issue_discard() failed: %d\n", ret);
392 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
393 }
394
395 return 0;
396 }
397
398 static sense_reason_t
399 iblock_execute_unmap(struct se_cmd *cmd)
400 {
401 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
402
403 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
404 }
405
406 static sense_reason_t
407 iblock_execute_write_same_unmap(struct se_cmd *cmd)
408 {
409 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
410 sector_t lba = cmd->t_task_lba;
411 sector_t nolb = sbc_get_write_same_sectors(cmd);
412 int ret;
413
414 ret = iblock_do_unmap(cmd, bdev, lba, nolb);
415 if (ret)
416 return ret;
417
418 target_complete_cmd(cmd, GOOD);
419 return 0;
420 }
421
422 static sense_reason_t
423 iblock_execute_write_same(struct se_cmd *cmd)
424 {
425 struct iblock_req *ibr;
426 struct scatterlist *sg;
427 struct bio *bio;
428 struct bio_list list;
429 sector_t block_lba = cmd->t_task_lba;
430 sector_t sectors = sbc_get_write_same_sectors(cmd);
431
432 sg = &cmd->t_data_sg[0];
433
434 if (cmd->t_data_nents > 1 ||
435 sg->length != cmd->se_dev->dev_attrib.block_size) {
436 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
437 " block_size: %u\n", cmd->t_data_nents, sg->length,
438 cmd->se_dev->dev_attrib.block_size);
439 return TCM_INVALID_CDB_FIELD;
440 }
441
442 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
443 if (!ibr)
444 goto fail;
445 cmd->priv = ibr;
446
447 bio = iblock_get_bio(cmd, block_lba, 1);
448 if (!bio)
449 goto fail_free_ibr;
450
451 bio_list_init(&list);
452 bio_list_add(&list, bio);
453
454 atomic_set(&ibr->pending, 1);
455
456 while (sectors) {
457 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
458 != sg->length) {
459
460 bio = iblock_get_bio(cmd, block_lba, 1);
461 if (!bio)
462 goto fail_put_bios;
463
464 atomic_inc(&ibr->pending);
465 bio_list_add(&list, bio);
466 }
467
468 /* Always in 512 byte units for Linux/Block */
469 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
470 sectors -= 1;
471 }
472
473 iblock_submit_bios(&list, WRITE);
474 return 0;
475
476 fail_put_bios:
477 while ((bio = bio_list_pop(&list)))
478 bio_put(bio);
479 fail_free_ibr:
480 kfree(ibr);
481 fail:
482 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
483 }
484
485 enum {
486 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
487 };
488
489 static match_table_t tokens = {
490 {Opt_udev_path, "udev_path=%s"},
491 {Opt_readonly, "readonly=%d"},
492 {Opt_force, "force=%d"},
493 {Opt_err, NULL}
494 };
495
496 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
497 const char *page, ssize_t count)
498 {
499 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
500 char *orig, *ptr, *arg_p, *opts;
501 substring_t args[MAX_OPT_ARGS];
502 int ret = 0, token;
503 unsigned long tmp_readonly;
504
505 opts = kstrdup(page, GFP_KERNEL);
506 if (!opts)
507 return -ENOMEM;
508
509 orig = opts;
510
511 while ((ptr = strsep(&opts, ",\n")) != NULL) {
512 if (!*ptr)
513 continue;
514
515 token = match_token(ptr, tokens, args);
516 switch (token) {
517 case Opt_udev_path:
518 if (ib_dev->ibd_bd) {
519 pr_err("Unable to set udev_path= while"
520 " ib_dev->ibd_bd exists\n");
521 ret = -EEXIST;
522 goto out;
523 }
524 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
525 SE_UDEV_PATH_LEN) == 0) {
526 ret = -EINVAL;
527 break;
528 }
529 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
530 ib_dev->ibd_udev_path);
531 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
532 break;
533 case Opt_readonly:
534 arg_p = match_strdup(&args[0]);
535 if (!arg_p) {
536 ret = -ENOMEM;
537 break;
538 }
539 ret = strict_strtoul(arg_p, 0, &tmp_readonly);
540 kfree(arg_p);
541 if (ret < 0) {
542 pr_err("strict_strtoul() failed for"
543 " readonly=\n");
544 goto out;
545 }
546 ib_dev->ibd_readonly = tmp_readonly;
547 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
548 break;
549 case Opt_force:
550 break;
551 default:
552 break;
553 }
554 }
555
556 out:
557 kfree(orig);
558 return (!ret) ? count : ret;
559 }
560
561 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
562 {
563 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
564 struct block_device *bd = ib_dev->ibd_bd;
565 char buf[BDEVNAME_SIZE];
566 ssize_t bl = 0;
567
568 if (bd)
569 bl += sprintf(b + bl, "iBlock device: %s",
570 bdevname(bd, buf));
571 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
572 bl += sprintf(b + bl, " UDEV PATH: %s",
573 ib_dev->ibd_udev_path);
574 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
575
576 bl += sprintf(b + bl, " ");
577 if (bd) {
578 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
579 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
580 "" : (bd->bd_holder == ib_dev) ?
581 "CLAIMED: IBLOCK" : "CLAIMED: OS");
582 } else {
583 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
584 }
585
586 return bl;
587 }
588
589 static sense_reason_t
590 iblock_execute_rw(struct se_cmd *cmd)
591 {
592 struct scatterlist *sgl = cmd->t_data_sg;
593 u32 sgl_nents = cmd->t_data_nents;
594 enum dma_data_direction data_direction = cmd->data_direction;
595 struct se_device *dev = cmd->se_dev;
596 struct iblock_req *ibr;
597 struct bio *bio;
598 struct bio_list list;
599 struct scatterlist *sg;
600 u32 sg_num = sgl_nents;
601 sector_t block_lba;
602 unsigned bio_cnt;
603 int rw = 0;
604 int i;
605
606 if (data_direction == DMA_TO_DEVICE) {
607 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
608 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
609 /*
610 * Force writethrough using WRITE_FUA if a volatile write cache
611 * is not enabled, or if initiator set the Force Unit Access bit.
612 */
613 if (q->flush_flags & REQ_FUA) {
614 if (cmd->se_cmd_flags & SCF_FUA)
615 rw = WRITE_FUA;
616 else if (!(q->flush_flags & REQ_FLUSH))
617 rw = WRITE_FUA;
618 } else {
619 rw = WRITE;
620 }
621 } else {
622 rw = READ;
623 }
624
625 /*
626 * Convert the blocksize advertised to the initiator to the 512 byte
627 * units unconditionally used by the Linux block layer.
628 */
629 if (dev->dev_attrib.block_size == 4096)
630 block_lba = (cmd->t_task_lba << 3);
631 else if (dev->dev_attrib.block_size == 2048)
632 block_lba = (cmd->t_task_lba << 2);
633 else if (dev->dev_attrib.block_size == 1024)
634 block_lba = (cmd->t_task_lba << 1);
635 else if (dev->dev_attrib.block_size == 512)
636 block_lba = cmd->t_task_lba;
637 else {
638 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
639 " %u\n", dev->dev_attrib.block_size);
640 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
641 }
642
643 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
644 if (!ibr)
645 goto fail;
646 cmd->priv = ibr;
647
648 if (!sgl_nents) {
649 atomic_set(&ibr->pending, 1);
650 iblock_complete_cmd(cmd);
651 return 0;
652 }
653
654 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
655 if (!bio)
656 goto fail_free_ibr;
657
658 bio_list_init(&list);
659 bio_list_add(&list, bio);
660
661 atomic_set(&ibr->pending, 2);
662 bio_cnt = 1;
663
664 for_each_sg(sgl, sg, sgl_nents, i) {
665 /*
666 * XXX: if the length the device accepts is shorter than the
667 * length of the S/G list entry this will cause and
668 * endless loop. Better hope no driver uses huge pages.
669 */
670 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
671 != sg->length) {
672 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
673 iblock_submit_bios(&list, rw);
674 bio_cnt = 0;
675 }
676
677 bio = iblock_get_bio(cmd, block_lba, sg_num);
678 if (!bio)
679 goto fail_put_bios;
680
681 atomic_inc(&ibr->pending);
682 bio_list_add(&list, bio);
683 bio_cnt++;
684 }
685
686 /* Always in 512 byte units for Linux/Block */
687 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
688 sg_num--;
689 }
690
691 iblock_submit_bios(&list, rw);
692 iblock_complete_cmd(cmd);
693 return 0;
694
695 fail_put_bios:
696 while ((bio = bio_list_pop(&list)))
697 bio_put(bio);
698 fail_free_ibr:
699 kfree(ibr);
700 fail:
701 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
702 }
703
704 static sector_t iblock_get_blocks(struct se_device *dev)
705 {
706 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
707 struct block_device *bd = ib_dev->ibd_bd;
708 struct request_queue *q = bdev_get_queue(bd);
709
710 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
711 }
712
713 static struct sbc_ops iblock_sbc_ops = {
714 .execute_rw = iblock_execute_rw,
715 .execute_sync_cache = iblock_execute_sync_cache,
716 .execute_write_same = iblock_execute_write_same,
717 .execute_write_same_unmap = iblock_execute_write_same_unmap,
718 .execute_unmap = iblock_execute_unmap,
719 };
720
721 static sense_reason_t
722 iblock_parse_cdb(struct se_cmd *cmd)
723 {
724 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
725 }
726
727 bool iblock_get_write_cache(struct se_device *dev)
728 {
729 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
730 struct block_device *bd = ib_dev->ibd_bd;
731 struct request_queue *q = bdev_get_queue(bd);
732
733 return q->flush_flags & REQ_FLUSH;
734 }
735
736 static struct se_subsystem_api iblock_template = {
737 .name = "iblock",
738 .inquiry_prod = "IBLOCK",
739 .inquiry_rev = IBLOCK_VERSION,
740 .owner = THIS_MODULE,
741 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
742 .attach_hba = iblock_attach_hba,
743 .detach_hba = iblock_detach_hba,
744 .alloc_device = iblock_alloc_device,
745 .configure_device = iblock_configure_device,
746 .free_device = iblock_free_device,
747 .parse_cdb = iblock_parse_cdb,
748 .set_configfs_dev_params = iblock_set_configfs_dev_params,
749 .show_configfs_dev_params = iblock_show_configfs_dev_params,
750 .get_device_type = sbc_get_device_type,
751 .get_blocks = iblock_get_blocks,
752 .get_write_cache = iblock_get_write_cache,
753 };
754
755 static int __init iblock_module_init(void)
756 {
757 return transport_subsystem_register(&iblock_template);
758 }
759
760 static void __exit iblock_module_exit(void)
761 {
762 transport_subsystem_release(&iblock_template);
763 }
764
765 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
766 MODULE_AUTHOR("nab@Linux-iSCSI.org");
767 MODULE_LICENSE("GPL");
768
769 module_init(iblock_module_init);
770 module_exit(iblock_module_exit);