| 1 | #ifndef TARGET_CORE_BASE_H |
| 2 | #define TARGET_CORE_BASE_H |
| 3 | |
| 4 | #include <linux/in.h> |
| 5 | #include <linux/configfs.h> |
| 6 | #include <linux/dma-mapping.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/percpu_ida.h> |
| 9 | #include <linux/t10-pi.h> |
| 10 | #include <net/sock.h> |
| 11 | #include <net/tcp.h> |
| 12 | |
| 13 | #define TARGET_CORE_VERSION "v5.0" |
| 14 | |
| 15 | /* |
| 16 | * Maximum size of a CDB that can be stored in se_cmd without allocating |
| 17 | * memory dynamically for the CDB. |
| 18 | */ |
| 19 | #define TCM_MAX_COMMAND_SIZE 32 |
| 20 | /* |
| 21 | * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently |
| 22 | * defined 96, but the real limit is 252 (or 260 including the header) |
| 23 | */ |
| 24 | #define TRANSPORT_SENSE_BUFFER 96 |
| 25 | /* Used by transport_send_check_condition_and_sense() */ |
| 26 | #define SPC_SENSE_KEY_OFFSET 2 |
| 27 | #define SPC_ADD_SENSE_LEN_OFFSET 7 |
| 28 | #define SPC_DESC_TYPE_OFFSET 8 |
| 29 | #define SPC_ADDITIONAL_DESC_LEN_OFFSET 9 |
| 30 | #define SPC_VALIDITY_OFFSET 10 |
| 31 | #define SPC_ASC_KEY_OFFSET 12 |
| 32 | #define SPC_ASCQ_KEY_OFFSET 13 |
| 33 | #define TRANSPORT_IQN_LEN 224 |
| 34 | /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ |
| 35 | #define LU_GROUP_NAME_BUF 256 |
| 36 | /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ |
| 37 | #define TG_PT_GROUP_NAME_BUF 256 |
| 38 | /* Used to parse VPD into struct t10_vpd */ |
| 39 | #define VPD_TMP_BUF_SIZE 254 |
| 40 | /* Used by transport_generic_cmd_sequencer() */ |
| 41 | #define READ_BLOCK_LEN 6 |
| 42 | #define READ_CAP_LEN 8 |
| 43 | #define READ_POSITION_LEN 20 |
| 44 | #define INQUIRY_LEN 36 |
| 45 | /* Used by transport_get_inquiry_vpd_serial() */ |
| 46 | #define INQUIRY_VPD_SERIAL_LEN 254 |
| 47 | /* Used by transport_get_inquiry_vpd_device_ident() */ |
| 48 | #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 |
| 49 | |
| 50 | /* Attempts before moving from SHORT to LONG */ |
| 51 | #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 |
| 52 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ |
| 53 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ |
| 54 | |
| 55 | #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ |
| 56 | |
| 57 | /* struct se_dev_attrib sanity values */ |
| 58 | /* Default max_unmap_lba_count */ |
| 59 | #define DA_MAX_UNMAP_LBA_COUNT 0 |
| 60 | /* Default max_unmap_block_desc_count */ |
| 61 | #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 |
| 62 | /* Default unmap_granularity */ |
| 63 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 |
| 64 | /* Default unmap_granularity_alignment */ |
| 65 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 |
| 66 | /* Default unmap_zeroes_data */ |
| 67 | #define DA_UNMAP_ZEROES_DATA_DEFAULT 0 |
| 68 | /* Default max_write_same_len, disabled by default */ |
| 69 | #define DA_MAX_WRITE_SAME_LEN 0 |
| 70 | /* Use a model alias based on the configfs backend device name */ |
| 71 | #define DA_EMULATE_MODEL_ALIAS 0 |
| 72 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ |
| 73 | #define DA_EMULATE_WRITE_CACHE 0 |
| 74 | /* Emulation for UNIT ATTENTION Interlock Control */ |
| 75 | #define DA_EMULATE_UA_INTLLCK_CTRL 0 |
| 76 | /* Emulation for TASK_ABORTED status (TAS) by default */ |
| 77 | #define DA_EMULATE_TAS 1 |
| 78 | /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ |
| 79 | #define DA_EMULATE_TPU 0 |
| 80 | /* |
| 81 | * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using |
| 82 | * block/blk-lib.c:blkdev_issue_discard() |
| 83 | */ |
| 84 | #define DA_EMULATE_TPWS 0 |
| 85 | /* Emulation for CompareAndWrite (AtomicTestandSet) by default */ |
| 86 | #define DA_EMULATE_CAW 1 |
| 87 | /* Emulation for 3rd Party Copy (ExtendedCopy) by default */ |
| 88 | #define DA_EMULATE_3PC 1 |
| 89 | /* No Emulation for PSCSI by default */ |
| 90 | #define DA_EMULATE_ALUA 0 |
| 91 | /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ |
| 92 | #define DA_ENFORCE_PR_ISIDS 1 |
| 93 | /* Force SPC-3 PR Activate Persistence across Target Power Loss */ |
| 94 | #define DA_FORCE_PR_APTPL 0 |
| 95 | #define DA_STATUS_MAX_SECTORS_MIN 16 |
| 96 | #define DA_STATUS_MAX_SECTORS_MAX 8192 |
| 97 | /* By default don't report non-rotating (solid state) medium */ |
| 98 | #define DA_IS_NONROT 0 |
| 99 | /* Queue Algorithm Modifier default for restricted reordering in control mode page */ |
| 100 | #define DA_EMULATE_REST_REORD 0 |
| 101 | |
| 102 | #define SE_INQUIRY_BUF 1024 |
| 103 | #define SE_MODE_PAGE_BUF 512 |
| 104 | #define SE_SENSE_BUF 96 |
| 105 | |
| 106 | /* struct se_hba->hba_flags */ |
| 107 | enum hba_flags_table { |
| 108 | HBA_FLAGS_INTERNAL_USE = 0x01, |
| 109 | HBA_FLAGS_PSCSI_MODE = 0x02, |
| 110 | }; |
| 111 | |
| 112 | /* Special transport agnostic struct se_cmd->t_states */ |
| 113 | enum transport_state_table { |
| 114 | TRANSPORT_NO_STATE = 0, |
| 115 | TRANSPORT_NEW_CMD = 1, |
| 116 | TRANSPORT_WRITE_PENDING = 3, |
| 117 | TRANSPORT_PROCESSING = 5, |
| 118 | TRANSPORT_COMPLETE = 6, |
| 119 | TRANSPORT_ISTATE_PROCESSING = 11, |
| 120 | TRANSPORT_COMPLETE_QF_WP = 18, |
| 121 | TRANSPORT_COMPLETE_QF_OK = 19, |
| 122 | }; |
| 123 | |
| 124 | /* Used for struct se_cmd->se_cmd_flags */ |
| 125 | enum se_cmd_flags_table { |
| 126 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, |
| 127 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, |
| 128 | SCF_EMULATED_TASK_SENSE = 0x00000004, |
| 129 | SCF_SCSI_DATA_CDB = 0x00000008, |
| 130 | SCF_SCSI_TMR_CDB = 0x00000010, |
| 131 | SCF_FUA = 0x00000080, |
| 132 | SCF_SE_LUN_CMD = 0x00000100, |
| 133 | SCF_BIDI = 0x00000400, |
| 134 | SCF_SENT_CHECK_CONDITION = 0x00000800, |
| 135 | SCF_OVERFLOW_BIT = 0x00001000, |
| 136 | SCF_UNDERFLOW_BIT = 0x00002000, |
| 137 | SCF_SEND_DELAYED_TAS = 0x00004000, |
| 138 | SCF_ALUA_NON_OPTIMIZED = 0x00008000, |
| 139 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, |
| 140 | SCF_COMPARE_AND_WRITE = 0x00080000, |
| 141 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, |
| 142 | SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, |
| 143 | SCF_ACK_KREF = 0x00400000, |
| 144 | SCF_USE_CPUID = 0x00800000, |
| 145 | SCF_TASK_ATTR_SET = 0x01000000, |
| 146 | }; |
| 147 | |
| 148 | /* |
| 149 | * Used by transport_send_check_condition_and_sense() |
| 150 | * to signal which ASC/ASCQ sense payload should be built. |
| 151 | */ |
| 152 | typedef unsigned __bitwise__ sense_reason_t; |
| 153 | |
| 154 | enum tcm_sense_reason_table { |
| 155 | #define R(x) (__force sense_reason_t )(x) |
| 156 | TCM_NO_SENSE = R(0x00), |
| 157 | TCM_NON_EXISTENT_LUN = R(0x01), |
| 158 | TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02), |
| 159 | TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03), |
| 160 | TCM_UNEXPECTED_UNSOLICITED_DATA = R(0x04), |
| 161 | TCM_SERVICE_CRC_ERROR = R(0x05), |
| 162 | TCM_SNACK_REJECTED = R(0x06), |
| 163 | TCM_SECTOR_COUNT_TOO_MANY = R(0x07), |
| 164 | TCM_INVALID_CDB_FIELD = R(0x08), |
| 165 | TCM_INVALID_PARAMETER_LIST = R(0x09), |
| 166 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = R(0x0a), |
| 167 | TCM_UNKNOWN_MODE_PAGE = R(0x0b), |
| 168 | TCM_WRITE_PROTECTED = R(0x0c), |
| 169 | TCM_CHECK_CONDITION_ABORT_CMD = R(0x0d), |
| 170 | TCM_CHECK_CONDITION_UNIT_ATTENTION = R(0x0e), |
| 171 | TCM_CHECK_CONDITION_NOT_READY = R(0x0f), |
| 172 | TCM_RESERVATION_CONFLICT = R(0x10), |
| 173 | TCM_ADDRESS_OUT_OF_RANGE = R(0x11), |
| 174 | TCM_OUT_OF_RESOURCES = R(0x12), |
| 175 | TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13), |
| 176 | TCM_MISCOMPARE_VERIFY = R(0x14), |
| 177 | TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), |
| 178 | TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), |
| 179 | TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), |
| 180 | #undef R |
| 181 | }; |
| 182 | |
| 183 | enum target_sc_flags_table { |
| 184 | TARGET_SCF_BIDI_OP = 0x01, |
| 185 | TARGET_SCF_ACK_KREF = 0x02, |
| 186 | TARGET_SCF_UNKNOWN_SIZE = 0x04, |
| 187 | TARGET_SCF_USE_CPUID = 0x08, |
| 188 | }; |
| 189 | |
| 190 | /* fabric independent task management function values */ |
| 191 | enum tcm_tmreq_table { |
| 192 | TMR_ABORT_TASK = 1, |
| 193 | TMR_ABORT_TASK_SET = 2, |
| 194 | TMR_CLEAR_ACA = 3, |
| 195 | TMR_CLEAR_TASK_SET = 4, |
| 196 | TMR_LUN_RESET = 5, |
| 197 | TMR_TARGET_WARM_RESET = 6, |
| 198 | TMR_TARGET_COLD_RESET = 7, |
| 199 | }; |
| 200 | |
| 201 | /* fabric independent task management response values */ |
| 202 | enum tcm_tmrsp_table { |
| 203 | TMR_FUNCTION_FAILED = 0, |
| 204 | TMR_FUNCTION_COMPLETE = 1, |
| 205 | TMR_TASK_DOES_NOT_EXIST = 2, |
| 206 | TMR_LUN_DOES_NOT_EXIST = 3, |
| 207 | TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 4, |
| 208 | TMR_FUNCTION_REJECTED = 5, |
| 209 | }; |
| 210 | |
| 211 | /* |
| 212 | * Used for target SCSI statistics |
| 213 | */ |
| 214 | typedef enum { |
| 215 | SCSI_INST_INDEX, |
| 216 | SCSI_DEVICE_INDEX, |
| 217 | SCSI_AUTH_INTR_INDEX, |
| 218 | SCSI_INDEX_TYPE_MAX |
| 219 | } scsi_index_t; |
| 220 | |
| 221 | struct se_cmd; |
| 222 | |
| 223 | struct t10_alua_lba_map_member { |
| 224 | struct list_head lba_map_mem_list; |
| 225 | int lba_map_mem_alua_state; |
| 226 | int lba_map_mem_alua_pg_id; |
| 227 | }; |
| 228 | |
| 229 | struct t10_alua_lba_map { |
| 230 | u64 lba_map_first_lba; |
| 231 | u64 lba_map_last_lba; |
| 232 | struct list_head lba_map_list; |
| 233 | struct list_head lba_map_mem_list; |
| 234 | }; |
| 235 | |
| 236 | struct t10_alua { |
| 237 | /* ALUA Target Port Group ID */ |
| 238 | u16 alua_tg_pt_gps_counter; |
| 239 | u32 alua_tg_pt_gps_count; |
| 240 | /* Referrals support */ |
| 241 | spinlock_t lba_map_lock; |
| 242 | u32 lba_map_segment_size; |
| 243 | u32 lba_map_segment_multiplier; |
| 244 | struct list_head lba_map_list; |
| 245 | spinlock_t tg_pt_gps_lock; |
| 246 | struct se_device *t10_dev; |
| 247 | /* Used for default ALUA Target Port Group */ |
| 248 | struct t10_alua_tg_pt_gp *default_tg_pt_gp; |
| 249 | /* Used for default ALUA Target Port Group ConfigFS group */ |
| 250 | struct config_group alua_tg_pt_gps_group; |
| 251 | struct list_head tg_pt_gps_list; |
| 252 | }; |
| 253 | |
| 254 | struct t10_alua_lu_gp { |
| 255 | u16 lu_gp_id; |
| 256 | int lu_gp_valid_id; |
| 257 | u32 lu_gp_members; |
| 258 | atomic_t lu_gp_ref_cnt; |
| 259 | spinlock_t lu_gp_lock; |
| 260 | struct config_group lu_gp_group; |
| 261 | struct list_head lu_gp_node; |
| 262 | struct list_head lu_gp_mem_list; |
| 263 | }; |
| 264 | |
| 265 | struct t10_alua_lu_gp_member { |
| 266 | bool lu_gp_assoc; |
| 267 | atomic_t lu_gp_mem_ref_cnt; |
| 268 | spinlock_t lu_gp_mem_lock; |
| 269 | struct t10_alua_lu_gp *lu_gp; |
| 270 | struct se_device *lu_gp_mem_dev; |
| 271 | struct list_head lu_gp_mem_list; |
| 272 | }; |
| 273 | |
| 274 | struct t10_alua_tg_pt_gp { |
| 275 | u16 tg_pt_gp_id; |
| 276 | int tg_pt_gp_valid_id; |
| 277 | int tg_pt_gp_alua_supported_states; |
| 278 | int tg_pt_gp_alua_pending_state; |
| 279 | int tg_pt_gp_alua_previous_state; |
| 280 | int tg_pt_gp_alua_access_status; |
| 281 | int tg_pt_gp_alua_access_type; |
| 282 | int tg_pt_gp_nonop_delay_msecs; |
| 283 | int tg_pt_gp_trans_delay_msecs; |
| 284 | int tg_pt_gp_implicit_trans_secs; |
| 285 | int tg_pt_gp_pref; |
| 286 | int tg_pt_gp_write_metadata; |
| 287 | u32 tg_pt_gp_members; |
| 288 | atomic_t tg_pt_gp_alua_access_state; |
| 289 | atomic_t tg_pt_gp_ref_cnt; |
| 290 | spinlock_t tg_pt_gp_lock; |
| 291 | struct mutex tg_pt_gp_md_mutex; |
| 292 | struct se_device *tg_pt_gp_dev; |
| 293 | struct config_group tg_pt_gp_group; |
| 294 | struct list_head tg_pt_gp_list; |
| 295 | struct list_head tg_pt_gp_lun_list; |
| 296 | struct se_lun *tg_pt_gp_alua_lun; |
| 297 | struct se_node_acl *tg_pt_gp_alua_nacl; |
| 298 | struct delayed_work tg_pt_gp_transition_work; |
| 299 | struct completion *tg_pt_gp_transition_complete; |
| 300 | }; |
| 301 | |
| 302 | struct t10_vpd { |
| 303 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; |
| 304 | int protocol_identifier_set; |
| 305 | u32 protocol_identifier; |
| 306 | u32 device_identifier_code_set; |
| 307 | u32 association; |
| 308 | u32 device_identifier_type; |
| 309 | struct list_head vpd_list; |
| 310 | }; |
| 311 | |
| 312 | struct t10_wwn { |
| 313 | char vendor[8]; |
| 314 | char model[16]; |
| 315 | char revision[4]; |
| 316 | char unit_serial[INQUIRY_VPD_SERIAL_LEN]; |
| 317 | spinlock_t t10_vpd_lock; |
| 318 | struct se_device *t10_dev; |
| 319 | struct config_group t10_wwn_group; |
| 320 | struct list_head t10_vpd_list; |
| 321 | }; |
| 322 | |
| 323 | struct t10_pr_registration { |
| 324 | /* Used for fabrics that contain WWN+ISID */ |
| 325 | #define PR_REG_ISID_LEN 16 |
| 326 | /* PR_REG_ISID_LEN + ',i,0x' */ |
| 327 | #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) |
| 328 | char pr_reg_isid[PR_REG_ISID_LEN]; |
| 329 | /* Used during APTPL metadata reading */ |
| 330 | #define PR_APTPL_MAX_IPORT_LEN 256 |
| 331 | unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; |
| 332 | /* Used during APTPL metadata reading */ |
| 333 | #define PR_APTPL_MAX_TPORT_LEN 256 |
| 334 | unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; |
| 335 | u16 pr_aptpl_rpti; |
| 336 | u16 pr_reg_tpgt; |
| 337 | /* Reservation effects all target ports */ |
| 338 | int pr_reg_all_tg_pt; |
| 339 | /* Activate Persistence across Target Power Loss */ |
| 340 | int pr_reg_aptpl; |
| 341 | int pr_res_holder; |
| 342 | int pr_res_type; |
| 343 | int pr_res_scope; |
| 344 | /* Used for fabric initiator WWPNs using a ISID */ |
| 345 | bool isid_present_at_reg; |
| 346 | u64 pr_res_mapped_lun; |
| 347 | u64 pr_aptpl_target_lun; |
| 348 | u16 tg_pt_sep_rtpi; |
| 349 | u32 pr_res_generation; |
| 350 | u64 pr_reg_bin_isid; |
| 351 | u64 pr_res_key; |
| 352 | atomic_t pr_res_holders; |
| 353 | struct se_node_acl *pr_reg_nacl; |
| 354 | /* Used by ALL_TG_PT=1 registration with deve->pr_ref taken */ |
| 355 | struct se_dev_entry *pr_reg_deve; |
| 356 | struct list_head pr_reg_list; |
| 357 | struct list_head pr_reg_abort_list; |
| 358 | struct list_head pr_reg_aptpl_list; |
| 359 | struct list_head pr_reg_atp_list; |
| 360 | struct list_head pr_reg_atp_mem_list; |
| 361 | }; |
| 362 | |
| 363 | struct t10_reservation { |
| 364 | /* Reservation effects all target ports */ |
| 365 | int pr_all_tg_pt; |
| 366 | /* Activate Persistence across Target Power Loss enabled |
| 367 | * for SCSI device */ |
| 368 | int pr_aptpl_active; |
| 369 | #define PR_APTPL_BUF_LEN 262144 |
| 370 | u32 pr_generation; |
| 371 | spinlock_t registration_lock; |
| 372 | spinlock_t aptpl_reg_lock; |
| 373 | /* |
| 374 | * This will always be set by one individual I_T Nexus. |
| 375 | * However with all_tg_pt=1, other I_T Nexus from the |
| 376 | * same initiator can access PR reg/res info on a different |
| 377 | * target port. |
| 378 | * |
| 379 | * There is also the 'All Registrants' case, where there is |
| 380 | * a single *pr_res_holder of the reservation, but all |
| 381 | * registrations are considered reservation holders. |
| 382 | */ |
| 383 | struct se_node_acl *pr_res_holder; |
| 384 | struct list_head registration_list; |
| 385 | struct list_head aptpl_reg_list; |
| 386 | }; |
| 387 | |
| 388 | struct se_tmr_req { |
| 389 | /* Task Management function to be performed */ |
| 390 | u8 function; |
| 391 | /* Task Management response to send */ |
| 392 | u8 response; |
| 393 | int call_transport; |
| 394 | /* Reference to ITT that Task Mgmt should be performed */ |
| 395 | u64 ref_task_tag; |
| 396 | void *fabric_tmr_ptr; |
| 397 | struct se_cmd *task_cmd; |
| 398 | struct se_device *tmr_dev; |
| 399 | struct se_lun *tmr_lun; |
| 400 | struct list_head tmr_list; |
| 401 | }; |
| 402 | |
| 403 | enum target_prot_op { |
| 404 | TARGET_PROT_NORMAL = 0, |
| 405 | TARGET_PROT_DIN_INSERT = (1 << 0), |
| 406 | TARGET_PROT_DOUT_INSERT = (1 << 1), |
| 407 | TARGET_PROT_DIN_STRIP = (1 << 2), |
| 408 | TARGET_PROT_DOUT_STRIP = (1 << 3), |
| 409 | TARGET_PROT_DIN_PASS = (1 << 4), |
| 410 | TARGET_PROT_DOUT_PASS = (1 << 5), |
| 411 | }; |
| 412 | |
| 413 | #define TARGET_PROT_ALL TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \ |
| 414 | TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \ |
| 415 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS |
| 416 | |
| 417 | enum target_prot_type { |
| 418 | TARGET_DIF_TYPE0_PROT, |
| 419 | TARGET_DIF_TYPE1_PROT, |
| 420 | TARGET_DIF_TYPE2_PROT, |
| 421 | TARGET_DIF_TYPE3_PROT, |
| 422 | }; |
| 423 | |
| 424 | enum target_core_dif_check { |
| 425 | TARGET_DIF_CHECK_GUARD = 0x1 << 0, |
| 426 | TARGET_DIF_CHECK_APPTAG = 0x1 << 1, |
| 427 | TARGET_DIF_CHECK_REFTAG = 0x1 << 2, |
| 428 | }; |
| 429 | |
| 430 | /* for sam_task_attr */ |
| 431 | #define TCM_SIMPLE_TAG 0x20 |
| 432 | #define TCM_HEAD_TAG 0x21 |
| 433 | #define TCM_ORDERED_TAG 0x22 |
| 434 | #define TCM_ACA_TAG 0x24 |
| 435 | |
| 436 | struct se_cmd { |
| 437 | /* SAM response code being sent to initiator */ |
| 438 | u8 scsi_status; |
| 439 | u8 scsi_asc; |
| 440 | u8 scsi_ascq; |
| 441 | u16 scsi_sense_length; |
| 442 | unsigned cmd_wait_set:1; |
| 443 | unsigned unknown_data_length:1; |
| 444 | bool state_active:1; |
| 445 | u64 tag; /* SAM command identifier aka task tag */ |
| 446 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ |
| 447 | int alua_nonop_delay; |
| 448 | /* See include/linux/dma-mapping.h */ |
| 449 | enum dma_data_direction data_direction; |
| 450 | /* For SAM Task Attribute */ |
| 451 | int sam_task_attr; |
| 452 | /* Used for se_sess->sess_tag_pool */ |
| 453 | unsigned int map_tag; |
| 454 | /* Transport protocol dependent state, see transport_state_table */ |
| 455 | enum transport_state_table t_state; |
| 456 | /* See se_cmd_flags_table */ |
| 457 | u32 se_cmd_flags; |
| 458 | /* Total size in bytes associated with command */ |
| 459 | u32 data_length; |
| 460 | u32 residual_count; |
| 461 | u64 orig_fe_lun; |
| 462 | /* Persistent Reservation key */ |
| 463 | u64 pr_res_key; |
| 464 | /* Used for sense data */ |
| 465 | void *sense_buffer; |
| 466 | struct list_head se_delayed_node; |
| 467 | struct list_head se_qf_node; |
| 468 | struct se_device *se_dev; |
| 469 | struct se_lun *se_lun; |
| 470 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
| 471 | struct se_session *se_sess; |
| 472 | struct se_tmr_req *se_tmr_req; |
| 473 | struct list_head se_cmd_list; |
| 474 | struct completion cmd_wait_comp; |
| 475 | const struct target_core_fabric_ops *se_tfo; |
| 476 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
| 477 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); |
| 478 | void *protocol_data; |
| 479 | |
| 480 | unsigned char *t_task_cdb; |
| 481 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
| 482 | unsigned long long t_task_lba; |
| 483 | unsigned int t_task_nolb; |
| 484 | unsigned int transport_state; |
| 485 | #define CMD_T_ABORTED (1 << 0) |
| 486 | #define CMD_T_ACTIVE (1 << 1) |
| 487 | #define CMD_T_COMPLETE (1 << 2) |
| 488 | #define CMD_T_SENT (1 << 4) |
| 489 | #define CMD_T_STOP (1 << 5) |
| 490 | #define CMD_T_DEV_ACTIVE (1 << 7) |
| 491 | #define CMD_T_BUSY (1 << 9) |
| 492 | #define CMD_T_TAS (1 << 10) |
| 493 | #define CMD_T_FABRIC_STOP (1 << 11) |
| 494 | spinlock_t t_state_lock; |
| 495 | struct kref cmd_kref; |
| 496 | struct completion t_transport_stop_comp; |
| 497 | |
| 498 | struct work_struct work; |
| 499 | |
| 500 | struct scatterlist *t_data_sg; |
| 501 | struct scatterlist *t_data_sg_orig; |
| 502 | unsigned int t_data_nents; |
| 503 | unsigned int t_data_nents_orig; |
| 504 | void *t_data_vmap; |
| 505 | struct scatterlist *t_bidi_data_sg; |
| 506 | unsigned int t_bidi_data_nents; |
| 507 | |
| 508 | /* Used for lun->lun_ref counting */ |
| 509 | int lun_ref_active; |
| 510 | |
| 511 | struct list_head state_list; |
| 512 | |
| 513 | /* backend private data */ |
| 514 | void *priv; |
| 515 | |
| 516 | /* DIF related members */ |
| 517 | enum target_prot_op prot_op; |
| 518 | enum target_prot_type prot_type; |
| 519 | u8 prot_checks; |
| 520 | bool prot_pto; |
| 521 | u32 prot_length; |
| 522 | u32 reftag_seed; |
| 523 | struct scatterlist *t_prot_sg; |
| 524 | unsigned int t_prot_nents; |
| 525 | sense_reason_t pi_err; |
| 526 | sector_t bad_sector; |
| 527 | int cpuid; |
| 528 | }; |
| 529 | |
| 530 | struct se_ua { |
| 531 | u8 ua_asc; |
| 532 | u8 ua_ascq; |
| 533 | struct list_head ua_nacl_list; |
| 534 | }; |
| 535 | |
| 536 | struct se_node_acl { |
| 537 | char initiatorname[TRANSPORT_IQN_LEN]; |
| 538 | /* Used to signal demo mode created ACL, disabled by default */ |
| 539 | bool dynamic_node_acl; |
| 540 | u32 queue_depth; |
| 541 | u32 acl_index; |
| 542 | enum target_prot_type saved_prot_type; |
| 543 | #define MAX_ACL_TAG_SIZE 64 |
| 544 | char acl_tag[MAX_ACL_TAG_SIZE]; |
| 545 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
| 546 | atomic_t acl_pr_ref_count; |
| 547 | struct hlist_head lun_entry_hlist; |
| 548 | struct se_session *nacl_sess; |
| 549 | struct se_portal_group *se_tpg; |
| 550 | struct mutex lun_entry_mutex; |
| 551 | spinlock_t nacl_sess_lock; |
| 552 | struct config_group acl_group; |
| 553 | struct config_group acl_attrib_group; |
| 554 | struct config_group acl_auth_group; |
| 555 | struct config_group acl_param_group; |
| 556 | struct config_group acl_fabric_stat_group; |
| 557 | struct list_head acl_list; |
| 558 | struct list_head acl_sess_list; |
| 559 | struct completion acl_free_comp; |
| 560 | struct kref acl_kref; |
| 561 | }; |
| 562 | |
| 563 | static inline struct se_node_acl *acl_to_nacl(struct config_item *item) |
| 564 | { |
| 565 | return container_of(to_config_group(item), struct se_node_acl, |
| 566 | acl_group); |
| 567 | } |
| 568 | |
| 569 | static inline struct se_node_acl *attrib_to_nacl(struct config_item *item) |
| 570 | { |
| 571 | return container_of(to_config_group(item), struct se_node_acl, |
| 572 | acl_attrib_group); |
| 573 | } |
| 574 | |
| 575 | static inline struct se_node_acl *auth_to_nacl(struct config_item *item) |
| 576 | { |
| 577 | return container_of(to_config_group(item), struct se_node_acl, |
| 578 | acl_auth_group); |
| 579 | } |
| 580 | |
| 581 | static inline struct se_node_acl *param_to_nacl(struct config_item *item) |
| 582 | { |
| 583 | return container_of(to_config_group(item), struct se_node_acl, |
| 584 | acl_param_group); |
| 585 | } |
| 586 | |
| 587 | static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item) |
| 588 | { |
| 589 | return container_of(to_config_group(item), struct se_node_acl, |
| 590 | acl_fabric_stat_group); |
| 591 | } |
| 592 | |
| 593 | struct se_session { |
| 594 | unsigned sess_tearing_down:1; |
| 595 | u64 sess_bin_isid; |
| 596 | enum target_prot_op sup_prot_ops; |
| 597 | enum target_prot_type sess_prot_type; |
| 598 | struct se_node_acl *se_node_acl; |
| 599 | struct se_portal_group *se_tpg; |
| 600 | void *fabric_sess_ptr; |
| 601 | struct list_head sess_list; |
| 602 | struct list_head sess_acl_list; |
| 603 | struct list_head sess_cmd_list; |
| 604 | struct list_head sess_wait_list; |
| 605 | spinlock_t sess_cmd_lock; |
| 606 | void *sess_cmd_map; |
| 607 | struct percpu_ida sess_tag_pool; |
| 608 | }; |
| 609 | |
| 610 | struct se_device; |
| 611 | struct se_transform_info; |
| 612 | struct scatterlist; |
| 613 | |
| 614 | struct se_ml_stat_grps { |
| 615 | struct config_group stat_group; |
| 616 | struct config_group scsi_auth_intr_group; |
| 617 | struct config_group scsi_att_intr_port_group; |
| 618 | }; |
| 619 | |
| 620 | struct se_lun_acl { |
| 621 | u64 mapped_lun; |
| 622 | struct se_node_acl *se_lun_nacl; |
| 623 | struct se_lun *se_lun; |
| 624 | struct config_group se_lun_group; |
| 625 | struct se_ml_stat_grps ml_stat_grps; |
| 626 | }; |
| 627 | |
| 628 | struct se_dev_entry { |
| 629 | u64 mapped_lun; |
| 630 | u64 pr_res_key; |
| 631 | u64 creation_time; |
| 632 | bool lun_access_ro; |
| 633 | u32 attach_count; |
| 634 | atomic_long_t total_cmds; |
| 635 | atomic_long_t read_bytes; |
| 636 | atomic_long_t write_bytes; |
| 637 | atomic_t ua_count; |
| 638 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
| 639 | struct kref pr_kref; |
| 640 | struct completion pr_comp; |
| 641 | struct se_lun_acl __rcu *se_lun_acl; |
| 642 | spinlock_t ua_lock; |
| 643 | struct se_lun __rcu *se_lun; |
| 644 | #define DEF_PR_REG_ACTIVE 1 |
| 645 | unsigned long deve_flags; |
| 646 | struct list_head alua_port_list; |
| 647 | struct list_head lun_link; |
| 648 | struct list_head ua_list; |
| 649 | struct hlist_node link; |
| 650 | struct rcu_head rcu_head; |
| 651 | }; |
| 652 | |
| 653 | struct se_dev_attrib { |
| 654 | int emulate_model_alias; |
| 655 | int emulate_dpo; |
| 656 | int emulate_fua_write; |
| 657 | int emulate_fua_read; |
| 658 | int emulate_write_cache; |
| 659 | int emulate_ua_intlck_ctrl; |
| 660 | int emulate_tas; |
| 661 | int emulate_tpu; |
| 662 | int emulate_tpws; |
| 663 | int emulate_caw; |
| 664 | int emulate_3pc; |
| 665 | int pi_prot_format; |
| 666 | enum target_prot_type pi_prot_type; |
| 667 | enum target_prot_type hw_pi_prot_type; |
| 668 | int enforce_pr_isids; |
| 669 | int force_pr_aptpl; |
| 670 | int is_nonrot; |
| 671 | int emulate_rest_reord; |
| 672 | int unmap_zeroes_data; |
| 673 | u32 hw_block_size; |
| 674 | u32 block_size; |
| 675 | u32 hw_max_sectors; |
| 676 | u32 optimal_sectors; |
| 677 | u32 hw_queue_depth; |
| 678 | u32 queue_depth; |
| 679 | u32 max_unmap_lba_count; |
| 680 | u32 max_unmap_block_desc_count; |
| 681 | u32 unmap_granularity; |
| 682 | u32 unmap_granularity_alignment; |
| 683 | u32 max_write_same_len; |
| 684 | u32 max_bytes_per_io; |
| 685 | struct se_device *da_dev; |
| 686 | struct config_group da_group; |
| 687 | }; |
| 688 | |
| 689 | struct se_port_stat_grps { |
| 690 | struct config_group stat_group; |
| 691 | struct config_group scsi_port_group; |
| 692 | struct config_group scsi_tgt_port_group; |
| 693 | struct config_group scsi_transport_group; |
| 694 | }; |
| 695 | |
| 696 | struct scsi_port_stats { |
| 697 | atomic_long_t cmd_pdus; |
| 698 | atomic_long_t tx_data_octets; |
| 699 | atomic_long_t rx_data_octets; |
| 700 | }; |
| 701 | |
| 702 | struct se_lun { |
| 703 | u64 unpacked_lun; |
| 704 | #define SE_LUN_LINK_MAGIC 0xffff7771 |
| 705 | u32 lun_link_magic; |
| 706 | bool lun_access_ro; |
| 707 | u32 lun_index; |
| 708 | |
| 709 | /* RELATIVE TARGET PORT IDENTIFER */ |
| 710 | u16 lun_rtpi; |
| 711 | atomic_t lun_acl_count; |
| 712 | struct se_device __rcu *lun_se_dev; |
| 713 | |
| 714 | struct list_head lun_deve_list; |
| 715 | spinlock_t lun_deve_lock; |
| 716 | |
| 717 | /* ALUA state */ |
| 718 | int lun_tg_pt_secondary_stat; |
| 719 | int lun_tg_pt_secondary_write_md; |
| 720 | atomic_t lun_tg_pt_secondary_offline; |
| 721 | struct mutex lun_tg_pt_md_mutex; |
| 722 | |
| 723 | /* ALUA target port group linkage */ |
| 724 | struct list_head lun_tg_pt_gp_link; |
| 725 | struct t10_alua_tg_pt_gp *lun_tg_pt_gp; |
| 726 | spinlock_t lun_tg_pt_gp_lock; |
| 727 | |
| 728 | struct se_portal_group *lun_tpg; |
| 729 | struct scsi_port_stats lun_stats; |
| 730 | struct config_group lun_group; |
| 731 | struct se_port_stat_grps port_stat_grps; |
| 732 | struct completion lun_ref_comp; |
| 733 | struct percpu_ref lun_ref; |
| 734 | struct list_head lun_dev_link; |
| 735 | struct hlist_node link; |
| 736 | struct rcu_head rcu_head; |
| 737 | }; |
| 738 | |
| 739 | struct se_dev_stat_grps { |
| 740 | struct config_group stat_group; |
| 741 | struct config_group scsi_dev_group; |
| 742 | struct config_group scsi_tgt_dev_group; |
| 743 | struct config_group scsi_lu_group; |
| 744 | }; |
| 745 | |
| 746 | struct se_device { |
| 747 | #define SE_DEV_LINK_MAGIC 0xfeeddeef |
| 748 | u32 dev_link_magic; |
| 749 | /* RELATIVE TARGET PORT IDENTIFER Counter */ |
| 750 | u16 dev_rpti_counter; |
| 751 | /* Used for SAM Task Attribute ordering */ |
| 752 | u32 dev_cur_ordered_id; |
| 753 | u32 dev_flags; |
| 754 | #define DF_CONFIGURED 0x00000001 |
| 755 | #define DF_FIRMWARE_VPD_UNIT_SERIAL 0x00000002 |
| 756 | #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 |
| 757 | #define DF_USING_UDEV_PATH 0x00000008 |
| 758 | #define DF_USING_ALIAS 0x00000010 |
| 759 | #define DF_READ_ONLY 0x00000020 |
| 760 | /* Physical device queue depth */ |
| 761 | u32 queue_depth; |
| 762 | /* Used for SPC-2 reservations enforce of ISIDs */ |
| 763 | u64 dev_res_bin_isid; |
| 764 | /* Pointer to transport specific device structure */ |
| 765 | u32 dev_index; |
| 766 | u64 creation_time; |
| 767 | atomic_long_t num_resets; |
| 768 | atomic_long_t num_cmds; |
| 769 | atomic_long_t read_bytes; |
| 770 | atomic_long_t write_bytes; |
| 771 | /* Active commands on this virtual SE device */ |
| 772 | atomic_t simple_cmds; |
| 773 | atomic_t dev_ordered_sync; |
| 774 | atomic_t dev_qf_count; |
| 775 | u32 export_count; |
| 776 | spinlock_t delayed_cmd_lock; |
| 777 | spinlock_t execute_task_lock; |
| 778 | spinlock_t dev_reservation_lock; |
| 779 | unsigned int dev_reservation_flags; |
| 780 | #define DRF_SPC2_RESERVATIONS 0x00000001 |
| 781 | #define DRF_SPC2_RESERVATIONS_WITH_ISID 0x00000002 |
| 782 | spinlock_t se_port_lock; |
| 783 | spinlock_t se_tmr_lock; |
| 784 | spinlock_t qf_cmd_lock; |
| 785 | struct semaphore caw_sem; |
| 786 | /* Used for legacy SPC-2 reservationsa */ |
| 787 | struct se_node_acl *dev_reserved_node_acl; |
| 788 | /* Used for ALUA Logical Unit Group membership */ |
| 789 | struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; |
| 790 | /* Used for SPC-3 Persistent Reservations */ |
| 791 | struct t10_pr_registration *dev_pr_res_holder; |
| 792 | struct list_head dev_sep_list; |
| 793 | struct list_head dev_tmr_list; |
| 794 | struct workqueue_struct *tmr_wq; |
| 795 | struct work_struct qf_work_queue; |
| 796 | struct list_head delayed_cmd_list; |
| 797 | struct list_head state_list; |
| 798 | struct list_head qf_cmd_list; |
| 799 | struct list_head g_dev_node; |
| 800 | /* Pointer to associated SE HBA */ |
| 801 | struct se_hba *se_hba; |
| 802 | /* T10 Inquiry and VPD WWN Information */ |
| 803 | struct t10_wwn t10_wwn; |
| 804 | /* T10 Asymmetric Logical Unit Assignment for Target Ports */ |
| 805 | struct t10_alua t10_alua; |
| 806 | /* T10 SPC-2 + SPC-3 Reservations */ |
| 807 | struct t10_reservation t10_pr; |
| 808 | struct se_dev_attrib dev_attrib; |
| 809 | struct config_group dev_group; |
| 810 | struct config_group dev_pr_group; |
| 811 | struct se_dev_stat_grps dev_stat_grps; |
| 812 | #define SE_DEV_ALIAS_LEN 512 /* must be less than PAGE_SIZE */ |
| 813 | unsigned char dev_alias[SE_DEV_ALIAS_LEN]; |
| 814 | #define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ |
| 815 | unsigned char udev_path[SE_UDEV_PATH_LEN]; |
| 816 | /* Pointer to template of function pointers for transport */ |
| 817 | const struct target_backend_ops *transport; |
| 818 | /* Linked list for struct se_hba struct se_device list */ |
| 819 | struct list_head dev_list; |
| 820 | struct se_lun xcopy_lun; |
| 821 | /* Protection Information */ |
| 822 | int prot_length; |
| 823 | /* For se_lun->lun_se_dev RCU read-side critical access */ |
| 824 | u32 hba_index; |
| 825 | struct rcu_head rcu_head; |
| 826 | }; |
| 827 | |
| 828 | struct se_hba { |
| 829 | u16 hba_tpgt; |
| 830 | u32 hba_id; |
| 831 | /* See hba_flags_table */ |
| 832 | u32 hba_flags; |
| 833 | /* Virtual iSCSI devices attached. */ |
| 834 | u32 dev_count; |
| 835 | u32 hba_index; |
| 836 | /* Pointer to transport specific host structure. */ |
| 837 | void *hba_ptr; |
| 838 | struct list_head hba_node; |
| 839 | spinlock_t device_lock; |
| 840 | struct config_group hba_group; |
| 841 | struct mutex hba_access_mutex; |
| 842 | struct target_backend *backend; |
| 843 | }; |
| 844 | |
| 845 | struct se_tpg_np { |
| 846 | struct se_portal_group *tpg_np_parent; |
| 847 | struct config_group tpg_np_group; |
| 848 | }; |
| 849 | |
| 850 | static inline struct se_tpg_np *to_tpg_np(struct config_item *item) |
| 851 | { |
| 852 | return container_of(to_config_group(item), struct se_tpg_np, |
| 853 | tpg_np_group); |
| 854 | } |
| 855 | |
| 856 | struct se_portal_group { |
| 857 | /* |
| 858 | * PROTOCOL IDENTIFIER value per SPC4, 7.5.1. |
| 859 | * |
| 860 | * Negative values can be used by fabric drivers for internal use TPGs. |
| 861 | */ |
| 862 | int proto_id; |
| 863 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
| 864 | atomic_t tpg_pr_ref_count; |
| 865 | /* Spinlock for adding/removing ACLed Nodes */ |
| 866 | struct mutex acl_node_mutex; |
| 867 | /* Spinlock for adding/removing sessions */ |
| 868 | spinlock_t session_lock; |
| 869 | struct mutex tpg_lun_mutex; |
| 870 | struct list_head se_tpg_node; |
| 871 | /* linked list for initiator ACL list */ |
| 872 | struct list_head acl_node_list; |
| 873 | struct hlist_head tpg_lun_hlist; |
| 874 | struct se_lun *tpg_virt_lun0; |
| 875 | /* List of TCM sessions associated wth this TPG */ |
| 876 | struct list_head tpg_sess_list; |
| 877 | /* Pointer to $FABRIC_MOD dependent code */ |
| 878 | const struct target_core_fabric_ops *se_tpg_tfo; |
| 879 | struct se_wwn *se_tpg_wwn; |
| 880 | struct config_group tpg_group; |
| 881 | struct config_group tpg_lun_group; |
| 882 | struct config_group tpg_np_group; |
| 883 | struct config_group tpg_acl_group; |
| 884 | struct config_group tpg_attrib_group; |
| 885 | struct config_group tpg_auth_group; |
| 886 | struct config_group tpg_param_group; |
| 887 | }; |
| 888 | |
| 889 | static inline struct se_portal_group *to_tpg(struct config_item *item) |
| 890 | { |
| 891 | return container_of(to_config_group(item), struct se_portal_group, |
| 892 | tpg_group); |
| 893 | } |
| 894 | |
| 895 | static inline struct se_portal_group *attrib_to_tpg(struct config_item *item) |
| 896 | { |
| 897 | return container_of(to_config_group(item), struct se_portal_group, |
| 898 | tpg_attrib_group); |
| 899 | } |
| 900 | |
| 901 | static inline struct se_portal_group *auth_to_tpg(struct config_item *item) |
| 902 | { |
| 903 | return container_of(to_config_group(item), struct se_portal_group, |
| 904 | tpg_auth_group); |
| 905 | } |
| 906 | |
| 907 | static inline struct se_portal_group *param_to_tpg(struct config_item *item) |
| 908 | { |
| 909 | return container_of(to_config_group(item), struct se_portal_group, |
| 910 | tpg_param_group); |
| 911 | } |
| 912 | |
| 913 | struct se_wwn { |
| 914 | struct target_fabric_configfs *wwn_tf; |
| 915 | struct config_group wwn_group; |
| 916 | struct config_group fabric_stat_group; |
| 917 | }; |
| 918 | |
| 919 | static inline void atomic_inc_mb(atomic_t *v) |
| 920 | { |
| 921 | smp_mb__before_atomic(); |
| 922 | atomic_inc(v); |
| 923 | smp_mb__after_atomic(); |
| 924 | } |
| 925 | |
| 926 | static inline void atomic_dec_mb(atomic_t *v) |
| 927 | { |
| 928 | smp_mb__before_atomic(); |
| 929 | atomic_dec(v); |
| 930 | smp_mb__after_atomic(); |
| 931 | } |
| 932 | |
| 933 | #endif /* TARGET_CORE_BASE_H */ |