gma500: udelay(20000) it too long again
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / target / target_core_rd.c
1 /*******************************************************************************
2 * Filename: target_core_rd.c
3 *
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 *
12 * Nicholas A. Bellinger <nab@kernel.org>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 ******************************************************************************/
29
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_transport.h>
43 #include <target/target_core_fabric_ops.h>
44
45 #include "target_core_rd.h"
46
47 static struct se_subsystem_api rd_mcp_template;
48
49 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
50 *
51 *
52 */
53 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
54 {
55 struct rd_host *rd_host;
56
57 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
58 if (!rd_host) {
59 pr_err("Unable to allocate memory for struct rd_host\n");
60 return -ENOMEM;
61 }
62
63 rd_host->rd_host_id = host_id;
64
65 hba->hba_ptr = rd_host;
66
67 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
68 " Generic Target Core Stack %s\n", hba->hba_id,
69 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
70 pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
71 " MaxSectors: %u\n", hba->hba_id,
72 rd_host->rd_host_id, RD_MAX_SECTORS);
73
74 return 0;
75 }
76
77 static void rd_detach_hba(struct se_hba *hba)
78 {
79 struct rd_host *rd_host = hba->hba_ptr;
80
81 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
82 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
83
84 kfree(rd_host);
85 hba->hba_ptr = NULL;
86 }
87
88 /* rd_release_device_space():
89 *
90 *
91 */
92 static void rd_release_device_space(struct rd_dev *rd_dev)
93 {
94 u32 i, j, page_count = 0, sg_per_table;
95 struct rd_dev_sg_table *sg_table;
96 struct page *pg;
97 struct scatterlist *sg;
98
99 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
100 return;
101
102 sg_table = rd_dev->sg_table_array;
103
104 for (i = 0; i < rd_dev->sg_table_count; i++) {
105 sg = sg_table[i].sg_table;
106 sg_per_table = sg_table[i].rd_sg_count;
107
108 for (j = 0; j < sg_per_table; j++) {
109 pg = sg_page(&sg[j]);
110 if (pg) {
111 __free_page(pg);
112 page_count++;
113 }
114 }
115
116 kfree(sg);
117 }
118
119 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
120 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
121 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
122 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
123
124 kfree(sg_table);
125 rd_dev->sg_table_array = NULL;
126 rd_dev->sg_table_count = 0;
127 }
128
129
130 /* rd_build_device_space():
131 *
132 *
133 */
134 static int rd_build_device_space(struct rd_dev *rd_dev)
135 {
136 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
137 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
138 sizeof(struct scatterlist));
139 struct rd_dev_sg_table *sg_table;
140 struct page *pg;
141 struct scatterlist *sg;
142
143 if (rd_dev->rd_page_count <= 0) {
144 pr_err("Illegal page count: %u for Ramdisk device\n",
145 rd_dev->rd_page_count);
146 return -EINVAL;
147 }
148 total_sg_needed = rd_dev->rd_page_count;
149
150 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
151
152 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
153 if (!sg_table) {
154 pr_err("Unable to allocate memory for Ramdisk"
155 " scatterlist tables\n");
156 return -ENOMEM;
157 }
158
159 rd_dev->sg_table_array = sg_table;
160 rd_dev->sg_table_count = sg_tables;
161
162 while (total_sg_needed) {
163 sg_per_table = (total_sg_needed > max_sg_per_table) ?
164 max_sg_per_table : total_sg_needed;
165
166 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
167 GFP_KERNEL);
168 if (!sg) {
169 pr_err("Unable to allocate scatterlist array"
170 " for struct rd_dev\n");
171 return -ENOMEM;
172 }
173
174 sg_init_table(sg, sg_per_table);
175
176 sg_table[i].sg_table = sg;
177 sg_table[i].rd_sg_count = sg_per_table;
178 sg_table[i].page_start_offset = page_offset;
179 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
180 - 1;
181
182 for (j = 0; j < sg_per_table; j++) {
183 pg = alloc_pages(GFP_KERNEL, 0);
184 if (!pg) {
185 pr_err("Unable to allocate scatterlist"
186 " pages for struct rd_dev_sg_table\n");
187 return -ENOMEM;
188 }
189 sg_assign_page(&sg[j], pg);
190 sg[j].length = PAGE_SIZE;
191 }
192
193 page_offset += sg_per_table;
194 total_sg_needed -= sg_per_table;
195 }
196
197 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
198 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
199 rd_dev->rd_dev_id, rd_dev->rd_page_count,
200 rd_dev->sg_table_count);
201
202 return 0;
203 }
204
205 static void *rd_allocate_virtdevice(
206 struct se_hba *hba,
207 const char *name,
208 int rd_direct)
209 {
210 struct rd_dev *rd_dev;
211 struct rd_host *rd_host = hba->hba_ptr;
212
213 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
214 if (!rd_dev) {
215 pr_err("Unable to allocate memory for struct rd_dev\n");
216 return NULL;
217 }
218
219 rd_dev->rd_host = rd_host;
220 rd_dev->rd_direct = rd_direct;
221
222 return rd_dev;
223 }
224
225 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
226 {
227 return rd_allocate_virtdevice(hba, name, 0);
228 }
229
230 /* rd_create_virtdevice():
231 *
232 *
233 */
234 static struct se_device *rd_create_virtdevice(
235 struct se_hba *hba,
236 struct se_subsystem_dev *se_dev,
237 void *p,
238 int rd_direct)
239 {
240 struct se_device *dev;
241 struct se_dev_limits dev_limits;
242 struct rd_dev *rd_dev = p;
243 struct rd_host *rd_host = hba->hba_ptr;
244 int dev_flags = 0, ret;
245 char prod[16], rev[4];
246
247 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
248
249 ret = rd_build_device_space(rd_dev);
250 if (ret < 0)
251 goto fail;
252
253 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
254 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
255 RD_MCP_VERSION);
256
257 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
258 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
259 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
260 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
261 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
262
263 dev = transport_add_device_to_core_hba(hba,
264 &rd_mcp_template, se_dev, dev_flags, rd_dev,
265 &dev_limits, prod, rev);
266 if (!dev)
267 goto fail;
268
269 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
270 rd_dev->rd_queue_depth = dev->queue_depth;
271
272 pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
273 " %u pages in %u tables, %lu total bytes\n",
274 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
275 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
276 rd_dev->sg_table_count,
277 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
278
279 return dev;
280
281 fail:
282 rd_release_device_space(rd_dev);
283 return ERR_PTR(ret);
284 }
285
286 static struct se_device *rd_MEMCPY_create_virtdevice(
287 struct se_hba *hba,
288 struct se_subsystem_dev *se_dev,
289 void *p)
290 {
291 return rd_create_virtdevice(hba, se_dev, p, 0);
292 }
293
294 /* rd_free_device(): (Part of se_subsystem_api_t template)
295 *
296 *
297 */
298 static void rd_free_device(void *p)
299 {
300 struct rd_dev *rd_dev = p;
301
302 rd_release_device_space(rd_dev);
303 kfree(rd_dev);
304 }
305
306 static inline struct rd_request *RD_REQ(struct se_task *task)
307 {
308 return container_of(task, struct rd_request, rd_task);
309 }
310
311 static struct se_task *
312 rd_alloc_task(unsigned char *cdb)
313 {
314 struct rd_request *rd_req;
315
316 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
317 if (!rd_req) {
318 pr_err("Unable to allocate struct rd_request\n");
319 return NULL;
320 }
321
322 return &rd_req->rd_task;
323 }
324
325 /* rd_get_sg_table():
326 *
327 *
328 */
329 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
330 {
331 u32 i;
332 struct rd_dev_sg_table *sg_table;
333
334 for (i = 0; i < rd_dev->sg_table_count; i++) {
335 sg_table = &rd_dev->sg_table_array[i];
336 if ((sg_table->page_start_offset <= page) &&
337 (sg_table->page_end_offset >= page))
338 return sg_table;
339 }
340
341 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
342 page);
343
344 return NULL;
345 }
346
347 /* rd_MEMCPY_read():
348 *
349 *
350 */
351 static int rd_MEMCPY_read(struct rd_request *req)
352 {
353 struct se_task *task = &req->rd_task;
354 struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
355 struct rd_dev_sg_table *table;
356 struct scatterlist *sg_d, *sg_s;
357 void *dst, *src;
358 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
359 u32 length, page_end = 0, table_sg_end;
360 u32 rd_offset = req->rd_offset;
361
362 table = rd_get_sg_table(dev, req->rd_page);
363 if (!table)
364 return -EINVAL;
365
366 table_sg_end = (table->page_end_offset - req->rd_page);
367 sg_d = task->task_sg;
368 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
369
370 pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
371 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
372 req->rd_page, req->rd_offset);
373
374 src_offset = rd_offset;
375
376 while (req->rd_size) {
377 if ((sg_d[i].length - dst_offset) <
378 (sg_s[j].length - src_offset)) {
379 length = (sg_d[i].length - dst_offset);
380
381 pr_debug("Step 1 - sg_d[%d]: %p length: %d"
382 " offset: %u sg_s[%d].length: %u\n", i,
383 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
384 sg_s[j].length);
385 pr_debug("Step 1 - length: %u dst_offset: %u"
386 " src_offset: %u\n", length, dst_offset,
387 src_offset);
388
389 if (length > req->rd_size)
390 length = req->rd_size;
391
392 dst = sg_virt(&sg_d[i++]) + dst_offset;
393 if (!dst)
394 BUG();
395
396 src = sg_virt(&sg_s[j]) + src_offset;
397 if (!src)
398 BUG();
399
400 dst_offset = 0;
401 src_offset = length;
402 page_end = 0;
403 } else {
404 length = (sg_s[j].length - src_offset);
405
406 pr_debug("Step 2 - sg_d[%d]: %p length: %d"
407 " offset: %u sg_s[%d].length: %u\n", i,
408 &sg_d[i], sg_d[i].length, sg_d[i].offset,
409 j, sg_s[j].length);
410 pr_debug("Step 2 - length: %u dst_offset: %u"
411 " src_offset: %u\n", length, dst_offset,
412 src_offset);
413
414 if (length > req->rd_size)
415 length = req->rd_size;
416
417 dst = sg_virt(&sg_d[i]) + dst_offset;
418 if (!dst)
419 BUG();
420
421 if (sg_d[i].length == length) {
422 i++;
423 dst_offset = 0;
424 } else
425 dst_offset = length;
426
427 src = sg_virt(&sg_s[j++]) + src_offset;
428 if (!src)
429 BUG();
430
431 src_offset = 0;
432 page_end = 1;
433 }
434
435 memcpy(dst, src, length);
436
437 pr_debug("page: %u, remaining size: %u, length: %u,"
438 " i: %u, j: %u\n", req->rd_page,
439 (req->rd_size - length), length, i, j);
440
441 req->rd_size -= length;
442 if (!req->rd_size)
443 return 0;
444
445 if (!page_end)
446 continue;
447
448 if (++req->rd_page <= table->page_end_offset) {
449 pr_debug("page: %u in same page table\n",
450 req->rd_page);
451 continue;
452 }
453
454 pr_debug("getting new page table for page: %u\n",
455 req->rd_page);
456
457 table = rd_get_sg_table(dev, req->rd_page);
458 if (!table)
459 return -EINVAL;
460
461 sg_s = &table->sg_table[j = 0];
462 }
463
464 return 0;
465 }
466
467 /* rd_MEMCPY_write():
468 *
469 *
470 */
471 static int rd_MEMCPY_write(struct rd_request *req)
472 {
473 struct se_task *task = &req->rd_task;
474 struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
475 struct rd_dev_sg_table *table;
476 struct scatterlist *sg_d, *sg_s;
477 void *dst, *src;
478 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
479 u32 length, page_end = 0, table_sg_end;
480 u32 rd_offset = req->rd_offset;
481
482 table = rd_get_sg_table(dev, req->rd_page);
483 if (!table)
484 return -EINVAL;
485
486 table_sg_end = (table->page_end_offset - req->rd_page);
487 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
488 sg_s = task->task_sg;
489
490 pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
491 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
492 req->rd_page, req->rd_offset);
493
494 dst_offset = rd_offset;
495
496 while (req->rd_size) {
497 if ((sg_s[i].length - src_offset) <
498 (sg_d[j].length - dst_offset)) {
499 length = (sg_s[i].length - src_offset);
500
501 pr_debug("Step 1 - sg_s[%d]: %p length: %d"
502 " offset: %d sg_d[%d].length: %u\n", i,
503 &sg_s[i], sg_s[i].length, sg_s[i].offset,
504 j, sg_d[j].length);
505 pr_debug("Step 1 - length: %u src_offset: %u"
506 " dst_offset: %u\n", length, src_offset,
507 dst_offset);
508
509 if (length > req->rd_size)
510 length = req->rd_size;
511
512 src = sg_virt(&sg_s[i++]) + src_offset;
513 if (!src)
514 BUG();
515
516 dst = sg_virt(&sg_d[j]) + dst_offset;
517 if (!dst)
518 BUG();
519
520 src_offset = 0;
521 dst_offset = length;
522 page_end = 0;
523 } else {
524 length = (sg_d[j].length - dst_offset);
525
526 pr_debug("Step 2 - sg_s[%d]: %p length: %d"
527 " offset: %d sg_d[%d].length: %u\n", i,
528 &sg_s[i], sg_s[i].length, sg_s[i].offset,
529 j, sg_d[j].length);
530 pr_debug("Step 2 - length: %u src_offset: %u"
531 " dst_offset: %u\n", length, src_offset,
532 dst_offset);
533
534 if (length > req->rd_size)
535 length = req->rd_size;
536
537 src = sg_virt(&sg_s[i]) + src_offset;
538 if (!src)
539 BUG();
540
541 if (sg_s[i].length == length) {
542 i++;
543 src_offset = 0;
544 } else
545 src_offset = length;
546
547 dst = sg_virt(&sg_d[j++]) + dst_offset;
548 if (!dst)
549 BUG();
550
551 dst_offset = 0;
552 page_end = 1;
553 }
554
555 memcpy(dst, src, length);
556
557 pr_debug("page: %u, remaining size: %u, length: %u,"
558 " i: %u, j: %u\n", req->rd_page,
559 (req->rd_size - length), length, i, j);
560
561 req->rd_size -= length;
562 if (!req->rd_size)
563 return 0;
564
565 if (!page_end)
566 continue;
567
568 if (++req->rd_page <= table->page_end_offset) {
569 pr_debug("page: %u in same page table\n",
570 req->rd_page);
571 continue;
572 }
573
574 pr_debug("getting new page table for page: %u\n",
575 req->rd_page);
576
577 table = rd_get_sg_table(dev, req->rd_page);
578 if (!table)
579 return -EINVAL;
580
581 sg_d = &table->sg_table[j = 0];
582 }
583
584 return 0;
585 }
586
587 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
588 *
589 *
590 */
591 static int rd_MEMCPY_do_task(struct se_task *task)
592 {
593 struct se_device *dev = task->se_dev;
594 struct rd_request *req = RD_REQ(task);
595 unsigned long long lba;
596 int ret;
597
598 req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
599 lba = task->task_lba;
600 req->rd_offset = (do_div(lba,
601 (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
602 dev->se_sub_dev->se_dev_attrib.block_size;
603 req->rd_size = task->task_size;
604
605 if (task->task_data_direction == DMA_FROM_DEVICE)
606 ret = rd_MEMCPY_read(req);
607 else
608 ret = rd_MEMCPY_write(req);
609
610 if (ret != 0)
611 return ret;
612
613 task->task_scsi_status = GOOD;
614 transport_complete_task(task, 1);
615
616 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
617 }
618
619 /* rd_free_task(): (Part of se_subsystem_api_t template)
620 *
621 *
622 */
623 static void rd_free_task(struct se_task *task)
624 {
625 kfree(RD_REQ(task));
626 }
627
628 enum {
629 Opt_rd_pages, Opt_err
630 };
631
632 static match_table_t tokens = {
633 {Opt_rd_pages, "rd_pages=%d"},
634 {Opt_err, NULL}
635 };
636
637 static ssize_t rd_set_configfs_dev_params(
638 struct se_hba *hba,
639 struct se_subsystem_dev *se_dev,
640 const char *page,
641 ssize_t count)
642 {
643 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
644 char *orig, *ptr, *opts;
645 substring_t args[MAX_OPT_ARGS];
646 int ret = 0, arg, token;
647
648 opts = kstrdup(page, GFP_KERNEL);
649 if (!opts)
650 return -ENOMEM;
651
652 orig = opts;
653
654 while ((ptr = strsep(&opts, ",")) != NULL) {
655 if (!*ptr)
656 continue;
657
658 token = match_token(ptr, tokens, args);
659 switch (token) {
660 case Opt_rd_pages:
661 match_int(args, &arg);
662 rd_dev->rd_page_count = arg;
663 pr_debug("RAMDISK: Referencing Page"
664 " Count: %u\n", rd_dev->rd_page_count);
665 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
666 break;
667 default:
668 break;
669 }
670 }
671
672 kfree(orig);
673 return (!ret) ? count : ret;
674 }
675
676 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
677 {
678 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
679
680 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
681 pr_debug("Missing rd_pages= parameter\n");
682 return -EINVAL;
683 }
684
685 return 0;
686 }
687
688 static ssize_t rd_show_configfs_dev_params(
689 struct se_hba *hba,
690 struct se_subsystem_dev *se_dev,
691 char *b)
692 {
693 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
694 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
695 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
696 "rd_direct" : "rd_mcp");
697 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
698 " SG_table_count: %u\n", rd_dev->rd_page_count,
699 PAGE_SIZE, rd_dev->sg_table_count);
700 return bl;
701 }
702
703 /* rd_get_cdb(): (Part of se_subsystem_api_t template)
704 *
705 *
706 */
707 static unsigned char *rd_get_cdb(struct se_task *task)
708 {
709 struct rd_request *req = RD_REQ(task);
710
711 return req->rd_scsi_cdb;
712 }
713
714 static u32 rd_get_device_rev(struct se_device *dev)
715 {
716 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
717 }
718
719 static u32 rd_get_device_type(struct se_device *dev)
720 {
721 return TYPE_DISK;
722 }
723
724 static sector_t rd_get_blocks(struct se_device *dev)
725 {
726 struct rd_dev *rd_dev = dev->dev_ptr;
727 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
728 dev->se_sub_dev->se_dev_attrib.block_size) - 1;
729
730 return blocks_long;
731 }
732
733 static struct se_subsystem_api rd_mcp_template = {
734 .name = "rd_mcp",
735 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
736 .attach_hba = rd_attach_hba,
737 .detach_hba = rd_detach_hba,
738 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
739 .create_virtdevice = rd_MEMCPY_create_virtdevice,
740 .free_device = rd_free_device,
741 .alloc_task = rd_alloc_task,
742 .do_task = rd_MEMCPY_do_task,
743 .free_task = rd_free_task,
744 .check_configfs_dev_params = rd_check_configfs_dev_params,
745 .set_configfs_dev_params = rd_set_configfs_dev_params,
746 .show_configfs_dev_params = rd_show_configfs_dev_params,
747 .get_cdb = rd_get_cdb,
748 .get_device_rev = rd_get_device_rev,
749 .get_device_type = rd_get_device_type,
750 .get_blocks = rd_get_blocks,
751 };
752
753 int __init rd_module_init(void)
754 {
755 int ret;
756
757 ret = transport_subsystem_register(&rd_mcp_template);
758 if (ret < 0) {
759 return ret;
760 }
761
762 return 0;
763 }
764
765 void rd_module_exit(void)
766 {
767 transport_subsystem_release(&rd_mcp_template);
768 }