Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_iblock.c | |
3 | * | |
4 | * This file contains the Storage Engine <-> Linux BlockIO transport | |
5 | * specific functions. | |
6 | * | |
fd9a11d7 | 7 | * (c) Copyright 2003-2012 RisingTide Systems LLC. |
c66ac9db NB |
8 | * |
9 | * Nicholas A. Bellinger <nab@kernel.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by | |
13 | * the Free Software Foundation; either version 2 of the License, or | |
14 | * (at your option) any later version. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
19 | * GNU General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License | |
22 | * along with this program; if not, write to the Free Software | |
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
24 | * | |
25 | ******************************************************************************/ | |
26 | ||
c66ac9db NB |
27 | #include <linux/string.h> |
28 | #include <linux/parser.h> | |
29 | #include <linux/timer.h> | |
30 | #include <linux/fs.h> | |
31 | #include <linux/blkdev.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/spinlock.h> | |
c66ac9db NB |
34 | #include <linux/bio.h> |
35 | #include <linux/genhd.h> | |
36 | #include <linux/file.h> | |
827509e3 | 37 | #include <linux/module.h> |
c66ac9db NB |
38 | #include <scsi/scsi.h> |
39 | #include <scsi/scsi_host.h> | |
14150a6b | 40 | #include <asm/unaligned.h> |
c66ac9db NB |
41 | |
42 | #include <target/target_core_base.h> | |
c4795fb2 | 43 | #include <target/target_core_backend.h> |
c66ac9db NB |
44 | |
45 | #include "target_core_iblock.h" | |
46 | ||
d5b4a21b CH |
47 | #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ |
48 | #define IBLOCK_BIO_POOL_SIZE 128 | |
49 | ||
0fd97ccf CH |
50 | static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) |
51 | { | |
52 | return container_of(dev, struct iblock_dev, dev); | |
53 | } | |
54 | ||
55 | ||
c66ac9db NB |
56 | static struct se_subsystem_api iblock_template; |
57 | ||
58 | static void iblock_bio_done(struct bio *, int); | |
59 | ||
60 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | |
61 | * | |
62 | * | |
63 | */ | |
64 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | |
65 | { | |
6708bb27 | 66 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
c66ac9db NB |
67 | " Generic Target Core Stack %s\n", hba->hba_id, |
68 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | |
c66ac9db NB |
69 | return 0; |
70 | } | |
71 | ||
72 | static void iblock_detach_hba(struct se_hba *hba) | |
73 | { | |
c66ac9db NB |
74 | } |
75 | ||
0fd97ccf | 76 | static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) |
c66ac9db NB |
77 | { |
78 | struct iblock_dev *ib_dev = NULL; | |
c66ac9db NB |
79 | |
80 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); | |
6708bb27 AG |
81 | if (!ib_dev) { |
82 | pr_err("Unable to allocate struct iblock_dev\n"); | |
c66ac9db NB |
83 | return NULL; |
84 | } | |
c66ac9db | 85 | |
6708bb27 | 86 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
c66ac9db | 87 | |
0fd97ccf | 88 | return &ib_dev->dev; |
c66ac9db NB |
89 | } |
90 | ||
0fd97ccf | 91 | static int iblock_configure_device(struct se_device *dev) |
c66ac9db | 92 | { |
0fd97ccf | 93 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
c66ac9db | 94 | struct request_queue *q; |
0fd97ccf | 95 | struct block_device *bd = NULL; |
44bfd018 | 96 | fmode_t mode; |
0fd97ccf | 97 | int ret = -ENOMEM; |
c66ac9db | 98 | |
0fd97ccf CH |
99 | if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { |
100 | pr_err("Missing udev_path= parameters for IBLOCK\n"); | |
101 | return -EINVAL; | |
c66ac9db | 102 | } |
d5b4a21b CH |
103 | |
104 | ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); | |
6708bb27 | 105 | if (!ib_dev->ibd_bio_set) { |
0fd97ccf CH |
106 | pr_err("IBLOCK: Unable to create bioset\n"); |
107 | goto out; | |
c66ac9db | 108 | } |
0fd97ccf | 109 | |
6708bb27 | 110 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
c66ac9db NB |
111 | ib_dev->ibd_udev_path); |
112 | ||
44bfd018 AG |
113 | mode = FMODE_READ|FMODE_EXCL; |
114 | if (!ib_dev->ibd_readonly) | |
115 | mode |= FMODE_WRITE; | |
116 | ||
117 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); | |
613640e4 NB |
118 | if (IS_ERR(bd)) { |
119 | ret = PTR_ERR(bd); | |
0fd97ccf | 120 | goto out_free_bioset; |
613640e4 | 121 | } |
c66ac9db NB |
122 | ib_dev->ibd_bd = bd; |
123 | ||
0fd97ccf CH |
124 | q = bdev_get_queue(bd); |
125 | ||
126 | dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); | |
127 | dev->dev_attrib.hw_max_sectors = UINT_MAX; | |
128 | dev->dev_attrib.hw_queue_depth = q->nr_requests; | |
c66ac9db | 129 | |
c66ac9db NB |
130 | /* |
131 | * Check if the underlying struct block_device request_queue supports | |
132 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | |
133 | * in ATA and we need to set TPE=1 | |
134 | */ | |
613640e4 | 135 | if (blk_queue_discard(q)) { |
0fd97ccf | 136 | dev->dev_attrib.max_unmap_lba_count = |
c66ac9db | 137 | q->limits.max_discard_sectors; |
0fd97ccf | 138 | |
c66ac9db NB |
139 | /* |
140 | * Currently hardcoded to 1 in Linux/SCSI code.. | |
141 | */ | |
0fd97ccf CH |
142 | dev->dev_attrib.max_unmap_block_desc_count = 1; |
143 | dev->dev_attrib.unmap_granularity = | |
7347b5ff | 144 | q->limits.discard_granularity >> 9; |
0fd97ccf | 145 | dev->dev_attrib.unmap_granularity_alignment = |
c66ac9db NB |
146 | q->limits.discard_alignment; |
147 | ||
6708bb27 | 148 | pr_debug("IBLOCK: BLOCK Discard support available," |
c66ac9db NB |
149 | " disabled by default\n"); |
150 | } | |
f6970ad3 NB |
151 | /* |
152 | * Enable write same emulation for IBLOCK and use 0xFFFF as | |
153 | * the smaller WRITE_SAME(10) only has a two-byte block count. | |
154 | */ | |
155 | dev->dev_attrib.max_write_same_len = 0xFFFF; | |
c66ac9db | 156 | |
e22a7f07 | 157 | if (blk_queue_nonrot(q)) |
0fd97ccf CH |
158 | dev->dev_attrib.is_nonrot = 1; |
159 | return 0; | |
c66ac9db | 160 | |
0fd97ccf CH |
161 | out_free_bioset: |
162 | bioset_free(ib_dev->ibd_bio_set); | |
163 | ib_dev->ibd_bio_set = NULL; | |
164 | out: | |
165 | return ret; | |
c66ac9db NB |
166 | } |
167 | ||
0fd97ccf | 168 | static void iblock_free_device(struct se_device *dev) |
c66ac9db | 169 | { |
0fd97ccf | 170 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
c66ac9db | 171 | |
bc665524 NB |
172 | if (ib_dev->ibd_bd != NULL) |
173 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | |
174 | if (ib_dev->ibd_bio_set != NULL) | |
175 | bioset_free(ib_dev->ibd_bio_set); | |
c66ac9db NB |
176 | kfree(ib_dev); |
177 | } | |
178 | ||
c66ac9db NB |
179 | static unsigned long long iblock_emulate_read_cap_with_block_size( |
180 | struct se_device *dev, | |
181 | struct block_device *bd, | |
182 | struct request_queue *q) | |
183 | { | |
184 | unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), | |
185 | bdev_logical_block_size(bd)) - 1); | |
186 | u32 block_size = bdev_logical_block_size(bd); | |
187 | ||
0fd97ccf | 188 | if (block_size == dev->dev_attrib.block_size) |
c66ac9db NB |
189 | return blocks_long; |
190 | ||
191 | switch (block_size) { | |
192 | case 4096: | |
0fd97ccf | 193 | switch (dev->dev_attrib.block_size) { |
c66ac9db NB |
194 | case 2048: |
195 | blocks_long <<= 1; | |
196 | break; | |
197 | case 1024: | |
198 | blocks_long <<= 2; | |
199 | break; | |
200 | case 512: | |
201 | blocks_long <<= 3; | |
202 | default: | |
203 | break; | |
204 | } | |
205 | break; | |
206 | case 2048: | |
0fd97ccf | 207 | switch (dev->dev_attrib.block_size) { |
c66ac9db NB |
208 | case 4096: |
209 | blocks_long >>= 1; | |
210 | break; | |
211 | case 1024: | |
212 | blocks_long <<= 1; | |
213 | break; | |
214 | case 512: | |
215 | blocks_long <<= 2; | |
216 | break; | |
217 | default: | |
218 | break; | |
219 | } | |
220 | break; | |
221 | case 1024: | |
0fd97ccf | 222 | switch (dev->dev_attrib.block_size) { |
c66ac9db NB |
223 | case 4096: |
224 | blocks_long >>= 2; | |
225 | break; | |
226 | case 2048: | |
227 | blocks_long >>= 1; | |
228 | break; | |
229 | case 512: | |
230 | blocks_long <<= 1; | |
231 | break; | |
232 | default: | |
233 | break; | |
234 | } | |
235 | break; | |
236 | case 512: | |
0fd97ccf | 237 | switch (dev->dev_attrib.block_size) { |
c66ac9db NB |
238 | case 4096: |
239 | blocks_long >>= 3; | |
240 | break; | |
241 | case 2048: | |
242 | blocks_long >>= 2; | |
243 | break; | |
244 | case 1024: | |
245 | blocks_long >>= 1; | |
246 | break; | |
247 | default: | |
248 | break; | |
249 | } | |
250 | break; | |
251 | default: | |
252 | break; | |
253 | } | |
254 | ||
255 | return blocks_long; | |
256 | } | |
257 | ||
df5fa691 CH |
258 | static void iblock_end_io_flush(struct bio *bio, int err) |
259 | { | |
260 | struct se_cmd *cmd = bio->bi_private; | |
261 | ||
262 | if (err) | |
263 | pr_err("IBLOCK: cache flush failed: %d\n", err); | |
264 | ||
5787cacd | 265 | if (cmd) { |
de103c93 | 266 | if (err) |
5787cacd | 267 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
de103c93 | 268 | else |
5787cacd | 269 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
5787cacd CH |
270 | } |
271 | ||
df5fa691 CH |
272 | bio_put(bio); |
273 | } | |
274 | ||
c66ac9db | 275 | /* |
df5fa691 CH |
276 | * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must |
277 | * always flush the whole cache. | |
c66ac9db | 278 | */ |
de103c93 CH |
279 | static sense_reason_t |
280 | iblock_execute_sync_cache(struct se_cmd *cmd) | |
c66ac9db | 281 | { |
0fd97ccf | 282 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
a1d8b49a | 283 | int immed = (cmd->t_task_cdb[1] & 0x2); |
df5fa691 | 284 | struct bio *bio; |
c66ac9db NB |
285 | |
286 | /* | |
287 | * If the Immediate bit is set, queue up the GOOD response | |
df5fa691 | 288 | * for this SYNCHRONIZE_CACHE op. |
c66ac9db NB |
289 | */ |
290 | if (immed) | |
5787cacd | 291 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
c66ac9db | 292 | |
df5fa691 CH |
293 | bio = bio_alloc(GFP_KERNEL, 0); |
294 | bio->bi_end_io = iblock_end_io_flush; | |
295 | bio->bi_bdev = ib_dev->ibd_bd; | |
c66ac9db | 296 | if (!immed) |
df5fa691 CH |
297 | bio->bi_private = cmd; |
298 | submit_bio(WRITE_FLUSH, bio); | |
ad67f0d9 | 299 | return 0; |
c66ac9db NB |
300 | } |
301 | ||
de103c93 CH |
302 | static sense_reason_t |
303 | iblock_execute_unmap(struct se_cmd *cmd) | |
c66ac9db | 304 | { |
14150a6b | 305 | struct se_device *dev = cmd->se_dev; |
0fd97ccf | 306 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
14150a6b | 307 | unsigned char *buf, *ptr = NULL; |
14150a6b | 308 | sector_t lba; |
0d7f1299 | 309 | int size; |
b7fc7f37 | 310 | u32 range; |
de103c93 CH |
311 | sense_reason_t ret = 0; |
312 | int dl, bd_dl, err; | |
14150a6b | 313 | |
0d7f1299 PB |
314 | if (cmd->data_length < 8) { |
315 | pr_warn("UNMAP parameter list length %u too small\n", | |
316 | cmd->data_length); | |
de103c93 | 317 | return TCM_INVALID_PARAMETER_LIST; |
0d7f1299 PB |
318 | } |
319 | ||
14150a6b | 320 | buf = transport_kmap_data_sg(cmd); |
de103c93 CH |
321 | if (!buf) |
322 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
14150a6b | 323 | |
1a5fa457 RD |
324 | dl = get_unaligned_be16(&buf[0]); |
325 | bd_dl = get_unaligned_be16(&buf[2]); | |
326 | ||
0d7f1299 PB |
327 | size = cmd->data_length - 8; |
328 | if (bd_dl > size) | |
329 | pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", | |
330 | cmd->data_length, bd_dl); | |
331 | else | |
332 | size = bd_dl; | |
333 | ||
0fd97ccf | 334 | if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { |
de103c93 | 335 | ret = TCM_INVALID_PARAMETER_LIST; |
7409a665 RD |
336 | goto err; |
337 | } | |
b7fc7f37 RD |
338 | |
339 | /* First UNMAP block descriptor starts at 8 byte offset */ | |
340 | ptr = &buf[8]; | |
341 | pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" | |
14150a6b CH |
342 | " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); |
343 | ||
b7fc7f37 | 344 | while (size >= 16) { |
14150a6b CH |
345 | lba = get_unaligned_be64(&ptr[0]); |
346 | range = get_unaligned_be32(&ptr[8]); | |
347 | pr_debug("UNMAP: Using lba: %llu and range: %u\n", | |
348 | (unsigned long long)lba, range); | |
349 | ||
0fd97ccf | 350 | if (range > dev->dev_attrib.max_unmap_lba_count) { |
de103c93 | 351 | ret = TCM_INVALID_PARAMETER_LIST; |
2594e298 RD |
352 | goto err; |
353 | } | |
354 | ||
355 | if (lba + range > dev->transport->get_blocks(dev) + 1) { | |
de103c93 | 356 | ret = TCM_ADDRESS_OUT_OF_RANGE; |
2594e298 RD |
357 | goto err; |
358 | } | |
359 | ||
de103c93 | 360 | err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, |
14150a6b | 361 | GFP_KERNEL, 0); |
de103c93 | 362 | if (err < 0) { |
14150a6b | 363 | pr_err("blkdev_issue_discard() failed: %d\n", |
de103c93 CH |
364 | err); |
365 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
14150a6b CH |
366 | goto err; |
367 | } | |
368 | ||
369 | ptr += 16; | |
370 | size -= 16; | |
371 | } | |
c66ac9db | 372 | |
14150a6b CH |
373 | err: |
374 | transport_kunmap_data_sg(cmd); | |
375 | if (!ret) | |
376 | target_complete_cmd(cmd, GOOD); | |
377 | return ret; | |
c66ac9db NB |
378 | } |
379 | ||
f6970ad3 NB |
380 | static struct bio *iblock_get_bio(struct se_cmd *, sector_t, u32); |
381 | static void iblock_submit_bios(struct bio_list *, int); | |
382 | static void iblock_complete_cmd(struct se_cmd *); | |
383 | ||
de103c93 | 384 | static sense_reason_t |
f6970ad3 | 385 | iblock_execute_write_same_unmap(struct se_cmd *cmd) |
6f974e8c | 386 | { |
0fd97ccf | 387 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
f6970ad3 | 388 | int rc; |
6f974e8c | 389 | |
f6970ad3 NB |
390 | rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, |
391 | spc_get_write_same_sectors(cmd), GFP_KERNEL, 0); | |
392 | if (rc < 0) { | |
393 | pr_warn("blkdev_issue_discard() failed: %d\n", rc); | |
de103c93 | 394 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
6f974e8c CH |
395 | } |
396 | ||
397 | target_complete_cmd(cmd, GOOD); | |
398 | return 0; | |
399 | } | |
400 | ||
f6970ad3 NB |
401 | static sense_reason_t |
402 | iblock_execute_write_same(struct se_cmd *cmd) | |
403 | { | |
404 | struct iblock_req *ibr; | |
405 | struct scatterlist *sg; | |
406 | struct bio *bio; | |
407 | struct bio_list list; | |
408 | sector_t block_lba = cmd->t_task_lba; | |
409 | unsigned int sectors = spc_get_write_same_sectors(cmd); | |
410 | ||
411 | sg = &cmd->t_data_sg[0]; | |
412 | ||
413 | if (cmd->t_data_nents > 1 || | |
414 | sg->length != cmd->se_dev->dev_attrib.block_size) { | |
415 | pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" | |
416 | " block_size: %u\n", cmd->t_data_nents, sg->length, | |
417 | cmd->se_dev->dev_attrib.block_size); | |
418 | return TCM_INVALID_CDB_FIELD; | |
419 | } | |
420 | ||
421 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | |
422 | if (!ibr) | |
423 | goto fail; | |
424 | cmd->priv = ibr; | |
425 | ||
426 | bio = iblock_get_bio(cmd, block_lba, 1); | |
427 | if (!bio) | |
428 | goto fail_free_ibr; | |
429 | ||
430 | bio_list_init(&list); | |
431 | bio_list_add(&list, bio); | |
432 | ||
433 | atomic_set(&ibr->pending, 1); | |
434 | ||
435 | while (sectors) { | |
436 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | |
437 | != sg->length) { | |
438 | ||
439 | bio = iblock_get_bio(cmd, block_lba, 1); | |
440 | if (!bio) | |
441 | goto fail_put_bios; | |
442 | ||
443 | atomic_inc(&ibr->pending); | |
444 | bio_list_add(&list, bio); | |
445 | } | |
446 | ||
447 | /* Always in 512 byte units for Linux/Block */ | |
448 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | |
449 | sectors -= 1; | |
450 | } | |
451 | ||
452 | iblock_submit_bios(&list, WRITE); | |
453 | return 0; | |
454 | ||
455 | fail_put_bios: | |
456 | while ((bio = bio_list_pop(&list))) | |
457 | bio_put(bio); | |
458 | fail_free_ibr: | |
459 | kfree(ibr); | |
460 | fail: | |
461 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
462 | } | |
463 | ||
c66ac9db | 464 | enum { |
44bfd018 | 465 | Opt_udev_path, Opt_readonly, Opt_force, Opt_err |
c66ac9db NB |
466 | }; |
467 | ||
468 | static match_table_t tokens = { | |
469 | {Opt_udev_path, "udev_path=%s"}, | |
44bfd018 | 470 | {Opt_readonly, "readonly=%d"}, |
c66ac9db NB |
471 | {Opt_force, "force=%d"}, |
472 | {Opt_err, NULL} | |
473 | }; | |
474 | ||
0fd97ccf CH |
475 | static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, |
476 | const char *page, ssize_t count) | |
c66ac9db | 477 | { |
0fd97ccf | 478 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
6d180253 | 479 | char *orig, *ptr, *arg_p, *opts; |
c66ac9db | 480 | substring_t args[MAX_OPT_ARGS]; |
21bca31c | 481 | int ret = 0, token; |
44bfd018 | 482 | unsigned long tmp_readonly; |
c66ac9db NB |
483 | |
484 | opts = kstrdup(page, GFP_KERNEL); | |
485 | if (!opts) | |
486 | return -ENOMEM; | |
487 | ||
488 | orig = opts; | |
489 | ||
90c161b6 | 490 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
c66ac9db NB |
491 | if (!*ptr) |
492 | continue; | |
493 | ||
494 | token = match_token(ptr, tokens, args); | |
495 | switch (token) { | |
496 | case Opt_udev_path: | |
497 | if (ib_dev->ibd_bd) { | |
6708bb27 | 498 | pr_err("Unable to set udev_path= while" |
c66ac9db NB |
499 | " ib_dev->ibd_bd exists\n"); |
500 | ret = -EEXIST; | |
501 | goto out; | |
502 | } | |
852b6ed1 NB |
503 | if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], |
504 | SE_UDEV_PATH_LEN) == 0) { | |
505 | ret = -EINVAL; | |
6d180253 JJ |
506 | break; |
507 | } | |
6708bb27 | 508 | pr_debug("IBLOCK: Referencing UDEV path: %s\n", |
c66ac9db NB |
509 | ib_dev->ibd_udev_path); |
510 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | |
511 | break; | |
44bfd018 AG |
512 | case Opt_readonly: |
513 | arg_p = match_strdup(&args[0]); | |
514 | if (!arg_p) { | |
515 | ret = -ENOMEM; | |
516 | break; | |
517 | } | |
518 | ret = strict_strtoul(arg_p, 0, &tmp_readonly); | |
519 | kfree(arg_p); | |
520 | if (ret < 0) { | |
521 | pr_err("strict_strtoul() failed for" | |
522 | " readonly=\n"); | |
523 | goto out; | |
524 | } | |
525 | ib_dev->ibd_readonly = tmp_readonly; | |
526 | pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); | |
527 | break; | |
c66ac9db | 528 | case Opt_force: |
c66ac9db NB |
529 | break; |
530 | default: | |
531 | break; | |
532 | } | |
533 | } | |
534 | ||
535 | out: | |
536 | kfree(orig); | |
537 | return (!ret) ? count : ret; | |
538 | } | |
539 | ||
0fd97ccf | 540 | static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) |
c66ac9db | 541 | { |
0fd97ccf CH |
542 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
543 | struct block_device *bd = ib_dev->ibd_bd; | |
c66ac9db NB |
544 | char buf[BDEVNAME_SIZE]; |
545 | ssize_t bl = 0; | |
546 | ||
547 | if (bd) | |
548 | bl += sprintf(b + bl, "iBlock device: %s", | |
549 | bdevname(bd, buf)); | |
0fd97ccf | 550 | if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) |
44bfd018 | 551 | bl += sprintf(b + bl, " UDEV PATH: %s", |
0fd97ccf CH |
552 | ib_dev->ibd_udev_path); |
553 | bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); | |
c66ac9db NB |
554 | |
555 | bl += sprintf(b + bl, " "); | |
556 | if (bd) { | |
557 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", | |
21bca31c | 558 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? |
0fd97ccf | 559 | "" : (bd->bd_holder == ib_dev) ? |
c66ac9db NB |
560 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); |
561 | } else { | |
21bca31c | 562 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); |
c66ac9db NB |
563 | } |
564 | ||
565 | return bl; | |
566 | } | |
567 | ||
5787cacd CH |
568 | static void iblock_complete_cmd(struct se_cmd *cmd) |
569 | { | |
570 | struct iblock_req *ibr = cmd->priv; | |
571 | u8 status; | |
572 | ||
573 | if (!atomic_dec_and_test(&ibr->pending)) | |
574 | return; | |
575 | ||
576 | if (atomic_read(&ibr->ib_bio_err_cnt)) | |
577 | status = SAM_STAT_CHECK_CONDITION; | |
578 | else | |
579 | status = SAM_STAT_GOOD; | |
580 | ||
581 | target_complete_cmd(cmd, status); | |
582 | kfree(ibr); | |
583 | } | |
584 | ||
dbbf3e94 | 585 | static struct bio * |
5787cacd | 586 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) |
c66ac9db | 587 | { |
0fd97ccf | 588 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); |
c66ac9db NB |
589 | struct bio *bio; |
590 | ||
5c55125f CH |
591 | /* |
592 | * Only allocate as many vector entries as the bio code allows us to, | |
593 | * we'll loop later on until we have handled the whole request. | |
594 | */ | |
595 | if (sg_num > BIO_MAX_PAGES) | |
596 | sg_num = BIO_MAX_PAGES; | |
597 | ||
c66ac9db | 598 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); |
6708bb27 AG |
599 | if (!bio) { |
600 | pr_err("Unable to allocate memory for bio\n"); | |
c66ac9db NB |
601 | return NULL; |
602 | } | |
603 | ||
c66ac9db | 604 | bio->bi_bdev = ib_dev->ibd_bd; |
5787cacd | 605 | bio->bi_private = cmd; |
c66ac9db NB |
606 | bio->bi_end_io = &iblock_bio_done; |
607 | bio->bi_sector = lba; | |
c66ac9db NB |
608 | return bio; |
609 | } | |
610 | ||
d5b4a21b CH |
611 | static void iblock_submit_bios(struct bio_list *list, int rw) |
612 | { | |
613 | struct blk_plug plug; | |
614 | struct bio *bio; | |
615 | ||
616 | blk_start_plug(&plug); | |
617 | while ((bio = bio_list_pop(list))) | |
618 | submit_bio(rw, bio); | |
619 | blk_finish_plug(&plug); | |
620 | } | |
621 | ||
de103c93 CH |
622 | static sense_reason_t |
623 | iblock_execute_rw(struct se_cmd *cmd) | |
c66ac9db | 624 | { |
0c2ad7d1 CH |
625 | struct scatterlist *sgl = cmd->t_data_sg; |
626 | u32 sgl_nents = cmd->t_data_nents; | |
627 | enum dma_data_direction data_direction = cmd->data_direction; | |
5951146d | 628 | struct se_device *dev = cmd->se_dev; |
5787cacd | 629 | struct iblock_req *ibr; |
dbbf3e94 CH |
630 | struct bio *bio; |
631 | struct bio_list list; | |
c66ac9db | 632 | struct scatterlist *sg; |
5787cacd | 633 | u32 sg_num = sgl_nents; |
c66ac9db | 634 | sector_t block_lba; |
d5b4a21b | 635 | unsigned bio_cnt; |
dbbf3e94 | 636 | int rw; |
5787cacd | 637 | int i; |
dbbf3e94 | 638 | |
5787cacd | 639 | if (data_direction == DMA_TO_DEVICE) { |
dbbf3e94 CH |
640 | /* |
641 | * Force data to disk if we pretend to not have a volatile | |
642 | * write cache, or the initiator set the Force Unit Access bit. | |
643 | */ | |
0fd97ccf CH |
644 | if (dev->dev_attrib.emulate_write_cache == 0 || |
645 | (dev->dev_attrib.emulate_fua_write > 0 && | |
2d3a4b51 | 646 | (cmd->se_cmd_flags & SCF_FUA))) |
dbbf3e94 CH |
647 | rw = WRITE_FUA; |
648 | else | |
649 | rw = WRITE; | |
650 | } else { | |
651 | rw = READ; | |
652 | } | |
653 | ||
c66ac9db | 654 | /* |
5787cacd CH |
655 | * Convert the blocksize advertised to the initiator to the 512 byte |
656 | * units unconditionally used by the Linux block layer. | |
c66ac9db | 657 | */ |
0fd97ccf | 658 | if (dev->dev_attrib.block_size == 4096) |
72a0e5e2 | 659 | block_lba = (cmd->t_task_lba << 3); |
0fd97ccf | 660 | else if (dev->dev_attrib.block_size == 2048) |
72a0e5e2 | 661 | block_lba = (cmd->t_task_lba << 2); |
0fd97ccf | 662 | else if (dev->dev_attrib.block_size == 1024) |
72a0e5e2 | 663 | block_lba = (cmd->t_task_lba << 1); |
0fd97ccf | 664 | else if (dev->dev_attrib.block_size == 512) |
72a0e5e2 | 665 | block_lba = cmd->t_task_lba; |
c66ac9db | 666 | else { |
6708bb27 | 667 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" |
0fd97ccf | 668 | " %u\n", dev->dev_attrib.block_size); |
de103c93 | 669 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
c66ac9db NB |
670 | } |
671 | ||
5787cacd CH |
672 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
673 | if (!ibr) | |
674 | goto fail; | |
675 | cmd->priv = ibr; | |
676 | ||
e0de4457 PB |
677 | if (!sgl_nents) { |
678 | atomic_set(&ibr->pending, 1); | |
679 | iblock_complete_cmd(cmd); | |
680 | return 0; | |
681 | } | |
682 | ||
5787cacd CH |
683 | bio = iblock_get_bio(cmd, block_lba, sgl_nents); |
684 | if (!bio) | |
685 | goto fail_free_ibr; | |
dbbf3e94 CH |
686 | |
687 | bio_list_init(&list); | |
688 | bio_list_add(&list, bio); | |
5787cacd CH |
689 | |
690 | atomic_set(&ibr->pending, 2); | |
d5b4a21b | 691 | bio_cnt = 1; |
c66ac9db | 692 | |
5787cacd | 693 | for_each_sg(sgl, sg, sgl_nents, i) { |
dbbf3e94 CH |
694 | /* |
695 | * XXX: if the length the device accepts is shorter than the | |
696 | * length of the S/G list entry this will cause and | |
697 | * endless loop. Better hope no driver uses huge pages. | |
698 | */ | |
699 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) | |
700 | != sg->length) { | |
d5b4a21b CH |
701 | if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { |
702 | iblock_submit_bios(&list, rw); | |
703 | bio_cnt = 0; | |
704 | } | |
705 | ||
5787cacd | 706 | bio = iblock_get_bio(cmd, block_lba, sg_num); |
6708bb27 | 707 | if (!bio) |
5787cacd CH |
708 | goto fail_put_bios; |
709 | ||
710 | atomic_inc(&ibr->pending); | |
dbbf3e94 | 711 | bio_list_add(&list, bio); |
d5b4a21b | 712 | bio_cnt++; |
c66ac9db | 713 | } |
dbbf3e94 | 714 | |
c66ac9db NB |
715 | /* Always in 512 byte units for Linux/Block */ |
716 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | |
717 | sg_num--; | |
c66ac9db NB |
718 | } |
719 | ||
d5b4a21b | 720 | iblock_submit_bios(&list, rw); |
5787cacd | 721 | iblock_complete_cmd(cmd); |
03e98c9e | 722 | return 0; |
dbbf3e94 | 723 | |
5787cacd | 724 | fail_put_bios: |
dbbf3e94 | 725 | while ((bio = bio_list_pop(&list))) |
c66ac9db | 726 | bio_put(bio); |
5787cacd CH |
727 | fail_free_ibr: |
728 | kfree(ibr); | |
5787cacd | 729 | fail: |
de103c93 | 730 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
c66ac9db NB |
731 | } |
732 | ||
c66ac9db NB |
733 | static sector_t iblock_get_blocks(struct se_device *dev) |
734 | { | |
0fd97ccf CH |
735 | struct iblock_dev *ib_dev = IBLOCK_DEV(dev); |
736 | struct block_device *bd = ib_dev->ibd_bd; | |
c66ac9db NB |
737 | struct request_queue *q = bdev_get_queue(bd); |
738 | ||
739 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | |
740 | } | |
741 | ||
742 | static void iblock_bio_done(struct bio *bio, int err) | |
743 | { | |
5787cacd CH |
744 | struct se_cmd *cmd = bio->bi_private; |
745 | struct iblock_req *ibr = cmd->priv; | |
dbbf3e94 | 746 | |
c66ac9db NB |
747 | /* |
748 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | |
749 | */ | |
6708bb27 | 750 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) |
c66ac9db NB |
751 | err = -EIO; |
752 | ||
753 | if (err != 0) { | |
6708bb27 | 754 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," |
c66ac9db NB |
755 | " err: %d\n", bio, err); |
756 | /* | |
757 | * Bump the ib_bio_err_cnt and release bio. | |
758 | */ | |
759 | atomic_inc(&ibr->ib_bio_err_cnt); | |
760 | smp_mb__after_atomic_inc(); | |
c66ac9db | 761 | } |
dbbf3e94 | 762 | |
c66ac9db | 763 | bio_put(bio); |
dbbf3e94 | 764 | |
5787cacd | 765 | iblock_complete_cmd(cmd); |
c66ac9db NB |
766 | } |
767 | ||
9e999a6c | 768 | static struct sbc_ops iblock_sbc_ops = { |
0c2ad7d1 | 769 | .execute_rw = iblock_execute_rw, |
ad67f0d9 | 770 | .execute_sync_cache = iblock_execute_sync_cache, |
6f974e8c | 771 | .execute_write_same = iblock_execute_write_same, |
f6970ad3 | 772 | .execute_write_same_unmap = iblock_execute_write_same_unmap, |
14150a6b | 773 | .execute_unmap = iblock_execute_unmap, |
0c2ad7d1 CH |
774 | }; |
775 | ||
de103c93 CH |
776 | static sense_reason_t |
777 | iblock_parse_cdb(struct se_cmd *cmd) | |
0c2ad7d1 | 778 | { |
9e999a6c | 779 | return sbc_parse_cdb(cmd, &iblock_sbc_ops); |
0c2ad7d1 CH |
780 | } |
781 | ||
c66ac9db NB |
782 | static struct se_subsystem_api iblock_template = { |
783 | .name = "iblock", | |
0fd97ccf CH |
784 | .inquiry_prod = "IBLOCK", |
785 | .inquiry_rev = IBLOCK_VERSION, | |
c66ac9db NB |
786 | .owner = THIS_MODULE, |
787 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | |
c66ac9db NB |
788 | .attach_hba = iblock_attach_hba, |
789 | .detach_hba = iblock_detach_hba, | |
0fd97ccf CH |
790 | .alloc_device = iblock_alloc_device, |
791 | .configure_device = iblock_configure_device, | |
c66ac9db | 792 | .free_device = iblock_free_device, |
0c2ad7d1 | 793 | .parse_cdb = iblock_parse_cdb, |
c66ac9db NB |
794 | .set_configfs_dev_params = iblock_set_configfs_dev_params, |
795 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | |
6f23ac8a | 796 | .get_device_type = sbc_get_device_type, |
c66ac9db NB |
797 | .get_blocks = iblock_get_blocks, |
798 | }; | |
799 | ||
800 | static int __init iblock_module_init(void) | |
801 | { | |
802 | return transport_subsystem_register(&iblock_template); | |
803 | } | |
804 | ||
805 | static void iblock_module_exit(void) | |
806 | { | |
807 | transport_subsystem_release(&iblock_template); | |
808 | } | |
809 | ||
810 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); | |
811 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | |
812 | MODULE_LICENSE("GPL"); | |
813 | ||
814 | module_init(iblock_module_init); | |
815 | module_exit(iblock_module_exit); |