target: Add TMR_ABORT_TASK task management support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / target / target_core_base.h
1 #ifndef TARGET_CORE_BASE_H
2 #define TARGET_CORE_BASE_H
3
4 #include <linux/in.h>
5 #include <linux/configfs.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/blkdev.h>
8 #include <scsi/scsi_cmnd.h>
9 #include <net/sock.h>
10 #include <net/tcp.h>
11
12 #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
13 #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
14
15 /* Maximum Number of LUNs per Target Portal Group */
16 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
17 #define TRANSPORT_MAX_LUNS_PER_TPG 256
18 /*
19 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
20 *
21 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
22 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
23 * 16-byte CDBs by default and require an extra allocation for
24 * 32-byte CDBs to because of legacy issues.
25 *
26 * Within TCM Core there are no such legacy limitiations, so we go ahead
27 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
28 * within all TCM Core and subsystem plugin code.
29 */
30 #define TCM_MAX_COMMAND_SIZE 32
31 /*
32 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
33 * defined 96, but the real limit is 252 (or 260 including the header)
34 */
35 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
36 /* Used by transport_send_check_condition_and_sense() */
37 #define SPC_SENSE_KEY_OFFSET 2
38 #define SPC_ADD_SENSE_LEN_OFFSET 7
39 #define SPC_ASC_KEY_OFFSET 12
40 #define SPC_ASCQ_KEY_OFFSET 13
41 #define TRANSPORT_IQN_LEN 224
42 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
43 #define LU_GROUP_NAME_BUF 256
44 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
45 #define TG_PT_GROUP_NAME_BUF 256
46 /* Used to parse VPD into struct t10_vpd */
47 #define VPD_TMP_BUF_SIZE 128
48 /* Used by transport_generic_cmd_sequencer() */
49 #define READ_BLOCK_LEN 6
50 #define READ_CAP_LEN 8
51 #define READ_POSITION_LEN 20
52 #define INQUIRY_LEN 36
53 /* Used by transport_get_inquiry_vpd_serial() */
54 #define INQUIRY_VPD_SERIAL_LEN 254
55 /* Used by transport_get_inquiry_vpd_device_ident() */
56 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
57
58 /* Attempts before moving from SHORT to LONG */
59 #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
60 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
61 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
62
63 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
64
65 /*
66 * struct se_subsystem_dev->su_dev_flags
67 */
68 #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
69 #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
70 #define SDF_USING_UDEV_PATH 0x00000004
71 #define SDF_USING_ALIAS 0x00000008
72
73 /*
74 * struct se_device->dev_flags
75 */
76 #define DF_READ_ONLY 0x00000001
77 #define DF_SPC2_RESERVATIONS 0x00000002
78 #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
79
80 /* struct se_dev_attrib sanity values */
81 /* Default max_unmap_lba_count */
82 #define DA_MAX_UNMAP_LBA_COUNT 0
83 /* Default max_unmap_block_desc_count */
84 #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
85 /* Default unmap_granularity */
86 #define DA_UNMAP_GRANULARITY_DEFAULT 0
87 /* Default unmap_granularity_alignment */
88 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
89 /* Emulation for Direct Page Out */
90 #define DA_EMULATE_DPO 0
91 /* Emulation for Forced Unit Access WRITEs */
92 #define DA_EMULATE_FUA_WRITE 1
93 /* Emulation for Forced Unit Access READs */
94 #define DA_EMULATE_FUA_READ 0
95 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */
96 #define DA_EMULATE_WRITE_CACHE 0
97 /* Emulation for UNIT ATTENTION Interlock Control */
98 #define DA_EMULATE_UA_INTLLCK_CTRL 0
99 /* Emulation for TASK_ABORTED status (TAS) by default */
100 #define DA_EMULATE_TAS 1
101 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
102 #define DA_EMULATE_TPU 0
103 /*
104 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
105 * block/blk-lib.c:blkdev_issue_discard()
106 */
107 #define DA_EMULATE_TPWS 0
108 /* No Emulation for PSCSI by default */
109 #define DA_EMULATE_RESERVATIONS 0
110 /* No Emulation for PSCSI by default */
111 #define DA_EMULATE_ALUA 0
112 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
113 #define DA_ENFORCE_PR_ISIDS 1
114 #define DA_STATUS_MAX_SECTORS_MIN 16
115 #define DA_STATUS_MAX_SECTORS_MAX 8192
116 /* By default don't report non-rotating (solid state) medium */
117 #define DA_IS_NONROT 0
118 /* Queue Algorithm Modifier default for restricted reordering in control mode page */
119 #define DA_EMULATE_REST_REORD 0
120
121 #define SE_MODE_PAGE_BUF 512
122
123
124 /* struct se_hba->hba_flags */
125 enum hba_flags_table {
126 HBA_FLAGS_INTERNAL_USE = 0x01,
127 HBA_FLAGS_PSCSI_MODE = 0x02,
128 };
129
130 /* struct se_lun->lun_status */
131 enum transport_lun_status_table {
132 TRANSPORT_LUN_STATUS_FREE = 0,
133 TRANSPORT_LUN_STATUS_ACTIVE = 1,
134 };
135
136 /* struct se_portal_group->se_tpg_type */
137 enum transport_tpg_type_table {
138 TRANSPORT_TPG_TYPE_NORMAL = 0,
139 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
140 };
141
142 /* struct se_task->task_flags */
143 enum se_task_flags {
144 TF_ACTIVE = (1 << 0),
145 TF_SENT = (1 << 1),
146 TF_REQUEST_STOP = (1 << 2),
147 TF_HAS_SENSE = (1 << 3),
148 };
149
150 /* Special transport agnostic struct se_cmd->t_states */
151 enum transport_state_table {
152 TRANSPORT_NO_STATE = 0,
153 TRANSPORT_NEW_CMD = 1,
154 TRANSPORT_WRITE_PENDING = 3,
155 TRANSPORT_PROCESS_WRITE = 4,
156 TRANSPORT_PROCESSING = 5,
157 TRANSPORT_COMPLETE = 6,
158 TRANSPORT_PROCESS_TMR = 9,
159 TRANSPORT_ISTATE_PROCESSING = 11,
160 TRANSPORT_NEW_CMD_MAP = 16,
161 TRANSPORT_COMPLETE_QF_WP = 18,
162 TRANSPORT_COMPLETE_QF_OK = 19,
163 };
164
165 /* Used for struct se_cmd->se_cmd_flags */
166 enum se_cmd_flags_table {
167 SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
168 SCF_TRANSPORT_TASK_SENSE = 0x00000002,
169 SCF_EMULATED_TASK_SENSE = 0x00000004,
170 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
171 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
172 SCF_SCSI_NON_DATA_CDB = 0x00000020,
173 SCF_SCSI_TMR_CDB = 0x00000040,
174 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
175 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
176 SCF_FUA = 0x00000200,
177 SCF_SE_LUN_CMD = 0x00000800,
178 SCF_SE_ALLOW_EOO = 0x00001000,
179 SCF_BIDI = 0x00002000,
180 SCF_SENT_CHECK_CONDITION = 0x00004000,
181 SCF_OVERFLOW_BIT = 0x00008000,
182 SCF_UNDERFLOW_BIT = 0x00010000,
183 SCF_SENT_DELAYED_TAS = 0x00020000,
184 SCF_ALUA_NON_OPTIMIZED = 0x00040000,
185 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
186 SCF_UNUSED = 0x00100000,
187 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000,
188 SCF_ACK_KREF = 0x00400000,
189 };
190
191 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
192 enum transport_lunflags_table {
193 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
194 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
195 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
196 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
197 };
198
199 /* struct se_device->dev_status */
200 enum transport_device_status_table {
201 TRANSPORT_DEVICE_ACTIVATED = 0x01,
202 TRANSPORT_DEVICE_DEACTIVATED = 0x02,
203 TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
204 TRANSPORT_DEVICE_SHUTDOWN = 0x08,
205 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
206 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
207 };
208
209 /*
210 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
211 * to signal which ASC/ASCQ sense payload should be built.
212 */
213 enum tcm_sense_reason_table {
214 TCM_NON_EXISTENT_LUN = 0x01,
215 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
216 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
217 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
218 TCM_SERVICE_CRC_ERROR = 0x05,
219 TCM_SNACK_REJECTED = 0x06,
220 TCM_SECTOR_COUNT_TOO_MANY = 0x07,
221 TCM_INVALID_CDB_FIELD = 0x08,
222 TCM_INVALID_PARAMETER_LIST = 0x09,
223 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
224 TCM_UNKNOWN_MODE_PAGE = 0x0b,
225 TCM_WRITE_PROTECTED = 0x0c,
226 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
227 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
228 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
229 TCM_RESERVATION_CONFLICT = 0x10,
230 };
231
232 enum target_sc_flags_table {
233 TARGET_SCF_BIDI_OP = 0x01,
234 TARGET_SCF_ACK_KREF = 0x02,
235 };
236
237 /* fabric independent task management function values */
238 enum tcm_tmreq_table {
239 TMR_ABORT_TASK = 1,
240 TMR_ABORT_TASK_SET = 2,
241 TMR_CLEAR_ACA = 3,
242 TMR_CLEAR_TASK_SET = 4,
243 TMR_LUN_RESET = 5,
244 TMR_TARGET_WARM_RESET = 6,
245 TMR_TARGET_COLD_RESET = 7,
246 TMR_FABRIC_TMR = 255,
247 };
248
249 /* fabric independent task management response values */
250 enum tcm_tmrsp_table {
251 TMR_FUNCTION_COMPLETE = 0,
252 TMR_TASK_DOES_NOT_EXIST = 1,
253 TMR_LUN_DOES_NOT_EXIST = 2,
254 TMR_TASK_STILL_ALLEGIANT = 3,
255 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
256 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
257 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
258 TMR_FUNCTION_REJECTED = 255,
259 };
260
261 struct se_obj {
262 atomic_t obj_access_count;
263 };
264
265 /*
266 * Used by TCM Core internally to signal if ALUA emulation is enabled or
267 * disabled, or running in with TCM/pSCSI passthrough mode
268 */
269 typedef enum {
270 SPC_ALUA_PASSTHROUGH,
271 SPC2_ALUA_DISABLED,
272 SPC3_ALUA_EMULATED
273 } t10_alua_index_t;
274
275 /*
276 * Used by TCM Core internally to signal if SAM Task Attribute emulation
277 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
278 */
279 typedef enum {
280 SAM_TASK_ATTR_PASSTHROUGH,
281 SAM_TASK_ATTR_UNTAGGED,
282 SAM_TASK_ATTR_EMULATED
283 } t10_task_attr_index_t;
284
285 /*
286 * Used for target SCSI statistics
287 */
288 typedef enum {
289 SCSI_INST_INDEX,
290 SCSI_DEVICE_INDEX,
291 SCSI_AUTH_INTR_INDEX,
292 SCSI_INDEX_TYPE_MAX
293 } scsi_index_t;
294
295 struct se_cmd;
296
297 struct t10_alua {
298 t10_alua_index_t alua_type;
299 /* ALUA Target Port Group ID */
300 u16 alua_tg_pt_gps_counter;
301 u32 alua_tg_pt_gps_count;
302 spinlock_t tg_pt_gps_lock;
303 struct se_subsystem_dev *t10_sub_dev;
304 /* Used for default ALUA Target Port Group */
305 struct t10_alua_tg_pt_gp *default_tg_pt_gp;
306 /* Used for default ALUA Target Port Group ConfigFS group */
307 struct config_group alua_tg_pt_gps_group;
308 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
309 struct list_head tg_pt_gps_list;
310 };
311
312 struct t10_alua_lu_gp {
313 u16 lu_gp_id;
314 int lu_gp_valid_id;
315 u32 lu_gp_members;
316 atomic_t lu_gp_ref_cnt;
317 spinlock_t lu_gp_lock;
318 struct config_group lu_gp_group;
319 struct list_head lu_gp_node;
320 struct list_head lu_gp_mem_list;
321 };
322
323 struct t10_alua_lu_gp_member {
324 bool lu_gp_assoc;
325 atomic_t lu_gp_mem_ref_cnt;
326 spinlock_t lu_gp_mem_lock;
327 struct t10_alua_lu_gp *lu_gp;
328 struct se_device *lu_gp_mem_dev;
329 struct list_head lu_gp_mem_list;
330 };
331
332 struct t10_alua_tg_pt_gp {
333 u16 tg_pt_gp_id;
334 int tg_pt_gp_valid_id;
335 int tg_pt_gp_alua_access_status;
336 int tg_pt_gp_alua_access_type;
337 int tg_pt_gp_nonop_delay_msecs;
338 int tg_pt_gp_trans_delay_msecs;
339 int tg_pt_gp_pref;
340 int tg_pt_gp_write_metadata;
341 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
342 #define ALUA_MD_BUF_LEN 1024
343 u32 tg_pt_gp_md_buf_len;
344 u32 tg_pt_gp_members;
345 atomic_t tg_pt_gp_alua_access_state;
346 atomic_t tg_pt_gp_ref_cnt;
347 spinlock_t tg_pt_gp_lock;
348 struct mutex tg_pt_gp_md_mutex;
349 struct se_subsystem_dev *tg_pt_gp_su_dev;
350 struct config_group tg_pt_gp_group;
351 struct list_head tg_pt_gp_list;
352 struct list_head tg_pt_gp_mem_list;
353 };
354
355 struct t10_alua_tg_pt_gp_member {
356 bool tg_pt_gp_assoc;
357 atomic_t tg_pt_gp_mem_ref_cnt;
358 spinlock_t tg_pt_gp_mem_lock;
359 struct t10_alua_tg_pt_gp *tg_pt_gp;
360 struct se_port *tg_pt;
361 struct list_head tg_pt_gp_mem_list;
362 };
363
364 struct t10_vpd {
365 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
366 int protocol_identifier_set;
367 u32 protocol_identifier;
368 u32 device_identifier_code_set;
369 u32 association;
370 u32 device_identifier_type;
371 struct list_head vpd_list;
372 };
373
374 struct t10_wwn {
375 char vendor[8];
376 char model[16];
377 char revision[4];
378 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
379 spinlock_t t10_vpd_lock;
380 struct se_subsystem_dev *t10_sub_dev;
381 struct config_group t10_wwn_group;
382 struct list_head t10_vpd_list;
383 };
384
385
386 /*
387 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
388 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
389 * mode
390 */
391 typedef enum {
392 SPC_PASSTHROUGH,
393 SPC2_RESERVATIONS,
394 SPC3_PERSISTENT_RESERVATIONS
395 } t10_reservations_index_t;
396
397 struct t10_pr_registration {
398 /* Used for fabrics that contain WWN+ISID */
399 #define PR_REG_ISID_LEN 16
400 /* PR_REG_ISID_LEN + ',i,0x' */
401 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
402 char pr_reg_isid[PR_REG_ISID_LEN];
403 /* Used during APTPL metadata reading */
404 #define PR_APTPL_MAX_IPORT_LEN 256
405 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
406 /* Used during APTPL metadata reading */
407 #define PR_APTPL_MAX_TPORT_LEN 256
408 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
409 /* For writing out live meta data */
410 unsigned char *pr_aptpl_buf;
411 u16 pr_aptpl_rpti;
412 u16 pr_reg_tpgt;
413 /* Reservation effects all target ports */
414 int pr_reg_all_tg_pt;
415 /* Activate Persistence across Target Power Loss */
416 int pr_reg_aptpl;
417 int pr_res_holder;
418 int pr_res_type;
419 int pr_res_scope;
420 /* Used for fabric initiator WWPNs using a ISID */
421 bool isid_present_at_reg;
422 u32 pr_res_mapped_lun;
423 u32 pr_aptpl_target_lun;
424 u32 pr_res_generation;
425 u64 pr_reg_bin_isid;
426 u64 pr_res_key;
427 atomic_t pr_res_holders;
428 struct se_node_acl *pr_reg_nacl;
429 struct se_dev_entry *pr_reg_deve;
430 struct se_lun *pr_reg_tg_pt_lun;
431 struct list_head pr_reg_list;
432 struct list_head pr_reg_abort_list;
433 struct list_head pr_reg_aptpl_list;
434 struct list_head pr_reg_atp_list;
435 struct list_head pr_reg_atp_mem_list;
436 };
437
438 /*
439 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
440 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
441 * core_setup_reservations()
442 */
443 struct t10_reservation_ops {
444 int (*t10_reservation_check)(struct se_cmd *, u32 *);
445 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
446 int (*t10_pr_register)(struct se_cmd *);
447 int (*t10_pr_clear)(struct se_cmd *);
448 };
449
450 struct t10_reservation {
451 /* Reservation effects all target ports */
452 int pr_all_tg_pt;
453 /* Activate Persistence across Target Power Loss enabled
454 * for SCSI device */
455 int pr_aptpl_active;
456 /* Used by struct t10_reservation->pr_aptpl_buf_len */
457 #define PR_APTPL_BUF_LEN 8192
458 u32 pr_aptpl_buf_len;
459 u32 pr_generation;
460 t10_reservations_index_t res_type;
461 spinlock_t registration_lock;
462 spinlock_t aptpl_reg_lock;
463 /*
464 * This will always be set by one individual I_T Nexus.
465 * However with all_tg_pt=1, other I_T Nexus from the
466 * same initiator can access PR reg/res info on a different
467 * target port.
468 *
469 * There is also the 'All Registrants' case, where there is
470 * a single *pr_res_holder of the reservation, but all
471 * registrations are considered reservation holders.
472 */
473 struct se_node_acl *pr_res_holder;
474 struct list_head registration_list;
475 struct list_head aptpl_reg_list;
476 struct t10_reservation_ops pr_ops;
477 };
478
479 struct se_queue_obj {
480 atomic_t queue_cnt;
481 spinlock_t cmd_queue_lock;
482 struct list_head qobj_list;
483 wait_queue_head_t thread_wq;
484 };
485
486 struct se_task {
487 unsigned long long task_lba;
488 u32 task_sectors;
489 u32 task_size;
490 struct se_cmd *task_se_cmd;
491 struct scatterlist *task_sg;
492 u32 task_sg_nents;
493 u16 task_flags;
494 u8 task_scsi_status;
495 enum dma_data_direction task_data_direction;
496 struct list_head t_list;
497 struct list_head t_execute_list;
498 struct list_head t_state_list;
499 bool t_state_active;
500 struct completion task_stop_comp;
501 };
502
503 struct se_tmr_req {
504 /* Task Management function to be performed */
505 u8 function;
506 /* Task Management response to send */
507 u8 response;
508 int call_transport;
509 /* Reference to ITT that Task Mgmt should be performed */
510 u32 ref_task_tag;
511 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
512 u64 ref_task_lun;
513 void *fabric_tmr_ptr;
514 struct se_cmd *task_cmd;
515 struct se_cmd *ref_cmd;
516 struct se_device *tmr_dev;
517 struct se_lun *tmr_lun;
518 struct list_head tmr_list;
519 };
520
521 struct se_cmd {
522 /* SAM response code being sent to initiator */
523 u8 scsi_status;
524 u8 scsi_asc;
525 u8 scsi_ascq;
526 u8 scsi_sense_reason;
527 u16 scsi_sense_length;
528 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
529 int alua_nonop_delay;
530 /* See include/linux/dma-mapping.h */
531 enum dma_data_direction data_direction;
532 /* For SAM Task Attribute */
533 int sam_task_attr;
534 /* Transport protocol dependent state, see transport_state_table */
535 enum transport_state_table t_state;
536 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
537 unsigned check_release:1;
538 unsigned cmd_wait_set:1;
539 /* See se_cmd_flags_table */
540 u32 se_cmd_flags;
541 u32 se_ordered_id;
542 /* Total size in bytes associated with command */
543 u32 data_length;
544 /* SCSI Presented Data Transfer Length */
545 u32 cmd_spdtl;
546 u32 residual_count;
547 u32 orig_fe_lun;
548 /* Persistent Reservation key */
549 u64 pr_res_key;
550 /* Used for sense data */
551 void *sense_buffer;
552 struct list_head se_delayed_node;
553 struct list_head se_lun_node;
554 struct list_head se_qf_node;
555 struct se_device *se_dev;
556 struct se_dev_entry *se_deve;
557 struct se_lun *se_lun;
558 /* Only used for internal passthrough and legacy TCM fabric modules */
559 struct se_session *se_sess;
560 struct se_tmr_req *se_tmr_req;
561 struct list_head se_queue_node;
562 struct list_head se_cmd_list;
563 struct completion cmd_wait_comp;
564 struct kref cmd_kref;
565 struct target_core_fabric_ops *se_tfo;
566 int (*execute_task)(struct se_task *);
567 void (*transport_complete_callback)(struct se_cmd *);
568
569 unsigned char *t_task_cdb;
570 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
571 unsigned long long t_task_lba;
572 u32 t_tasks_sg_chained_no;
573 atomic_t t_fe_count;
574 atomic_t t_se_count;
575 atomic_t t_task_cdbs_left;
576 atomic_t t_task_cdbs_ex_left;
577 atomic_t t_task_cdbs_sent;
578 unsigned int transport_state;
579 #define CMD_T_ABORTED (1 << 0)
580 #define CMD_T_ACTIVE (1 << 1)
581 #define CMD_T_COMPLETE (1 << 2)
582 #define CMD_T_QUEUED (1 << 3)
583 #define CMD_T_SENT (1 << 4)
584 #define CMD_T_STOP (1 << 5)
585 #define CMD_T_FAILED (1 << 6)
586 #define CMD_T_LUN_STOP (1 << 7)
587 #define CMD_T_LUN_FE_STOP (1 << 8)
588 #define CMD_T_DEV_ACTIVE (1 << 9)
589 spinlock_t t_state_lock;
590 struct completion t_transport_stop_comp;
591 struct completion transport_lun_fe_stop_comp;
592 struct completion transport_lun_stop_comp;
593 struct scatterlist *t_tasks_sg_chained;
594
595 struct work_struct work;
596
597 struct scatterlist *t_data_sg;
598 unsigned int t_data_nents;
599 void *t_data_vmap;
600 struct scatterlist *t_bidi_data_sg;
601 unsigned int t_bidi_data_nents;
602
603 /* Used for BIDI READ */
604 struct list_head t_task_list;
605 u32 t_task_list_num;
606
607 };
608
609 struct se_ua {
610 u8 ua_asc;
611 u8 ua_ascq;
612 struct se_node_acl *ua_nacl;
613 struct list_head ua_dev_list;
614 struct list_head ua_nacl_list;
615 };
616
617 struct se_node_acl {
618 char initiatorname[TRANSPORT_IQN_LEN];
619 /* Used to signal demo mode created ACL, disabled by default */
620 bool dynamic_node_acl;
621 u32 queue_depth;
622 u32 acl_index;
623 u64 num_cmds;
624 u64 read_bytes;
625 u64 write_bytes;
626 spinlock_t stats_lock;
627 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
628 atomic_t acl_pr_ref_count;
629 struct se_dev_entry *device_list;
630 struct se_session *nacl_sess;
631 struct se_portal_group *se_tpg;
632 spinlock_t device_list_lock;
633 spinlock_t nacl_sess_lock;
634 struct config_group acl_group;
635 struct config_group acl_attrib_group;
636 struct config_group acl_auth_group;
637 struct config_group acl_param_group;
638 struct config_group acl_fabric_stat_group;
639 struct config_group *acl_default_groups[5];
640 struct list_head acl_list;
641 struct list_head acl_sess_list;
642 };
643
644 struct se_session {
645 unsigned sess_tearing_down:1;
646 u64 sess_bin_isid;
647 struct se_node_acl *se_node_acl;
648 struct se_portal_group *se_tpg;
649 void *fabric_sess_ptr;
650 struct list_head sess_list;
651 struct list_head sess_acl_list;
652 struct list_head sess_cmd_list;
653 struct list_head sess_wait_list;
654 spinlock_t sess_cmd_lock;
655 };
656
657 struct se_device;
658 struct se_transform_info;
659 struct scatterlist;
660
661 struct se_ml_stat_grps {
662 struct config_group stat_group;
663 struct config_group scsi_auth_intr_group;
664 struct config_group scsi_att_intr_port_group;
665 };
666
667 struct se_lun_acl {
668 char initiatorname[TRANSPORT_IQN_LEN];
669 u32 mapped_lun;
670 struct se_node_acl *se_lun_nacl;
671 struct se_lun *se_lun;
672 struct list_head lacl_list;
673 struct config_group se_lun_group;
674 struct se_ml_stat_grps ml_stat_grps;
675 };
676
677 struct se_dev_entry {
678 bool def_pr_registered;
679 /* See transport_lunflags_table */
680 u32 lun_flags;
681 u32 deve_cmds;
682 u32 mapped_lun;
683 u32 average_bytes;
684 u32 last_byte_count;
685 u32 total_cmds;
686 u32 total_bytes;
687 u64 pr_res_key;
688 u64 creation_time;
689 u32 attach_count;
690 u64 read_bytes;
691 u64 write_bytes;
692 atomic_t ua_count;
693 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
694 atomic_t pr_ref_count;
695 struct se_lun_acl *se_lun_acl;
696 spinlock_t ua_lock;
697 struct se_lun *se_lun;
698 struct list_head alua_port_list;
699 struct list_head ua_list;
700 };
701
702 struct se_dev_limits {
703 /* Max supported HW queue depth */
704 u32 hw_queue_depth;
705 /* Max supported virtual queue depth */
706 u32 queue_depth;
707 /* From include/linux/blkdev.h for the other HW/SW limits. */
708 struct queue_limits limits;
709 };
710
711 struct se_dev_attrib {
712 int emulate_dpo;
713 int emulate_fua_write;
714 int emulate_fua_read;
715 int emulate_write_cache;
716 int emulate_ua_intlck_ctrl;
717 int emulate_tas;
718 int emulate_tpu;
719 int emulate_tpws;
720 int emulate_reservations;
721 int emulate_alua;
722 int enforce_pr_isids;
723 int is_nonrot;
724 int emulate_rest_reord;
725 u32 hw_block_size;
726 u32 block_size;
727 u32 hw_max_sectors;
728 u32 max_sectors;
729 u32 optimal_sectors;
730 u32 hw_queue_depth;
731 u32 queue_depth;
732 u32 max_unmap_lba_count;
733 u32 max_unmap_block_desc_count;
734 u32 unmap_granularity;
735 u32 unmap_granularity_alignment;
736 struct se_subsystem_dev *da_sub_dev;
737 struct config_group da_group;
738 };
739
740 struct se_dev_stat_grps {
741 struct config_group stat_group;
742 struct config_group scsi_dev_group;
743 struct config_group scsi_tgt_dev_group;
744 struct config_group scsi_lu_group;
745 };
746
747 struct se_subsystem_dev {
748 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
749 #define SE_DEV_ALIAS_LEN 512
750 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
751 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
752 #define SE_UDEV_PATH_LEN 512
753 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
754 u32 su_dev_flags;
755 struct se_hba *se_dev_hba;
756 struct se_device *se_dev_ptr;
757 struct se_dev_attrib se_dev_attrib;
758 /* T10 Asymmetric Logical Unit Assignment for Target Ports */
759 struct t10_alua t10_alua;
760 /* T10 Inquiry and VPD WWN Information */
761 struct t10_wwn t10_wwn;
762 /* T10 SPC-2 + SPC-3 Reservations */
763 struct t10_reservation t10_pr;
764 spinlock_t se_dev_lock;
765 void *se_dev_su_ptr;
766 struct config_group se_dev_group;
767 /* For T10 Reservations */
768 struct config_group se_dev_pr_group;
769 /* For target_core_stat.c groups */
770 struct se_dev_stat_grps dev_stat_grps;
771 };
772
773 struct se_device {
774 /* RELATIVE TARGET PORT IDENTIFER Counter */
775 u16 dev_rpti_counter;
776 /* Used for SAM Task Attribute ordering */
777 u32 dev_cur_ordered_id;
778 u32 dev_flags;
779 u32 dev_port_count;
780 /* See transport_device_status_table */
781 u32 dev_status;
782 /* Physical device queue depth */
783 u32 queue_depth;
784 /* Used for SPC-2 reservations enforce of ISIDs */
785 u64 dev_res_bin_isid;
786 t10_task_attr_index_t dev_task_attr_type;
787 /* Pointer to transport specific device structure */
788 void *dev_ptr;
789 u32 dev_index;
790 u64 creation_time;
791 u32 num_resets;
792 u64 num_cmds;
793 u64 read_bytes;
794 u64 write_bytes;
795 spinlock_t stats_lock;
796 /* Active commands on this virtual SE device */
797 atomic_t simple_cmds;
798 atomic_t dev_ordered_id;
799 atomic_t execute_tasks;
800 atomic_t dev_ordered_sync;
801 atomic_t dev_qf_count;
802 struct se_obj dev_obj;
803 struct se_obj dev_access_obj;
804 struct se_obj dev_export_obj;
805 struct se_queue_obj dev_queue_obj;
806 spinlock_t delayed_cmd_lock;
807 spinlock_t execute_task_lock;
808 spinlock_t dev_reservation_lock;
809 spinlock_t dev_status_lock;
810 spinlock_t se_port_lock;
811 spinlock_t se_tmr_lock;
812 spinlock_t qf_cmd_lock;
813 /* Used for legacy SPC-2 reservationsa */
814 struct se_node_acl *dev_reserved_node_acl;
815 /* Used for ALUA Logical Unit Group membership */
816 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
817 /* Used for SPC-3 Persistent Reservations */
818 struct t10_pr_registration *dev_pr_res_holder;
819 struct list_head dev_sep_list;
820 struct list_head dev_tmr_list;
821 /* Pointer to descriptor for processing thread */
822 struct task_struct *process_thread;
823 struct work_struct qf_work_queue;
824 struct list_head delayed_cmd_list;
825 struct list_head execute_task_list;
826 struct list_head state_task_list;
827 struct list_head qf_cmd_list;
828 /* Pointer to associated SE HBA */
829 struct se_hba *se_hba;
830 struct se_subsystem_dev *se_sub_dev;
831 /* Pointer to template of function pointers for transport */
832 struct se_subsystem_api *transport;
833 /* Linked list for struct se_hba struct se_device list */
834 struct list_head dev_list;
835 };
836
837 struct se_hba {
838 u16 hba_tpgt;
839 u32 hba_id;
840 /* See hba_flags_table */
841 u32 hba_flags;
842 /* Virtual iSCSI devices attached. */
843 u32 dev_count;
844 u32 hba_index;
845 /* Pointer to transport specific host structure. */
846 void *hba_ptr;
847 /* Linked list for struct se_device */
848 struct list_head hba_dev_list;
849 struct list_head hba_node;
850 spinlock_t device_lock;
851 struct config_group hba_group;
852 struct mutex hba_access_mutex;
853 struct se_subsystem_api *transport;
854 };
855
856 struct se_port_stat_grps {
857 struct config_group stat_group;
858 struct config_group scsi_port_group;
859 struct config_group scsi_tgt_port_group;
860 struct config_group scsi_transport_group;
861 };
862
863 struct se_lun {
864 /* See transport_lun_status_table */
865 enum transport_lun_status_table lun_status;
866 u32 lun_access;
867 u32 lun_flags;
868 u32 unpacked_lun;
869 atomic_t lun_acl_count;
870 spinlock_t lun_acl_lock;
871 spinlock_t lun_cmd_lock;
872 spinlock_t lun_sep_lock;
873 struct completion lun_shutdown_comp;
874 struct list_head lun_cmd_list;
875 struct list_head lun_acl_list;
876 struct se_device *lun_se_dev;
877 struct se_port *lun_sep;
878 struct config_group lun_group;
879 struct se_port_stat_grps port_stat_grps;
880 };
881
882 struct scsi_port_stats {
883 u64 cmd_pdus;
884 u64 tx_data_octets;
885 u64 rx_data_octets;
886 };
887
888 struct se_port {
889 /* RELATIVE TARGET PORT IDENTIFER */
890 u16 sep_rtpi;
891 int sep_tg_pt_secondary_stat;
892 int sep_tg_pt_secondary_write_md;
893 u32 sep_index;
894 struct scsi_port_stats sep_stats;
895 /* Used for ALUA Target Port Groups membership */
896 atomic_t sep_tg_pt_secondary_offline;
897 /* Used for PR ALL_TG_PT=1 */
898 atomic_t sep_tg_pt_ref_cnt;
899 spinlock_t sep_alua_lock;
900 struct mutex sep_tg_pt_md_mutex;
901 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
902 struct se_lun *sep_lun;
903 struct se_portal_group *sep_tpg;
904 struct list_head sep_alua_list;
905 struct list_head sep_list;
906 };
907
908 struct se_tpg_np {
909 struct se_portal_group *tpg_np_parent;
910 struct config_group tpg_np_group;
911 };
912
913 struct se_portal_group {
914 /* Type of target portal group, see transport_tpg_type_table */
915 enum transport_tpg_type_table se_tpg_type;
916 /* Number of ACLed Initiator Nodes for this TPG */
917 u32 num_node_acls;
918 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
919 atomic_t tpg_pr_ref_count;
920 /* Spinlock for adding/removing ACLed Nodes */
921 spinlock_t acl_node_lock;
922 /* Spinlock for adding/removing sessions */
923 spinlock_t session_lock;
924 spinlock_t tpg_lun_lock;
925 /* Pointer to $FABRIC_MOD portal group */
926 void *se_tpg_fabric_ptr;
927 struct list_head se_tpg_node;
928 /* linked list for initiator ACL list */
929 struct list_head acl_node_list;
930 struct se_lun *tpg_lun_list;
931 struct se_lun tpg_virt_lun0;
932 /* List of TCM sessions associated wth this TPG */
933 struct list_head tpg_sess_list;
934 /* Pointer to $FABRIC_MOD dependent code */
935 struct target_core_fabric_ops *se_tpg_tfo;
936 struct se_wwn *se_tpg_wwn;
937 struct config_group tpg_group;
938 struct config_group *tpg_default_groups[6];
939 struct config_group tpg_lun_group;
940 struct config_group tpg_np_group;
941 struct config_group tpg_acl_group;
942 struct config_group tpg_attrib_group;
943 struct config_group tpg_param_group;
944 };
945
946 struct se_wwn {
947 struct target_fabric_configfs *wwn_tf;
948 struct config_group wwn_group;
949 struct config_group *wwn_default_groups[2];
950 struct config_group fabric_stat_group;
951 };
952
953 #endif /* TARGET_CORE_BASE_H */