1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2c4795fb2SChristoph Hellwig #ifndef TARGET_CORE_FABRIC_H
3c4795fb2SChristoph Hellwig #define TARGET_CORE_FABRIC_H
4c4795fb2SChristoph Hellwig
58dcf07beSBart Van Assche #include <linux/configfs.h>
68dcf07beSBart Van Assche #include <linux/types.h>
78dcf07beSBart Van Assche #include <target/target_core_base.h>
88dcf07beSBart Van Assche
9c4795fb2SChristoph Hellwig struct target_core_fabric_ops {
109ac8928eSChristoph Hellwig struct module *module;
1130c7ca93SDavid Disseldorp /*
1259a206b4SDavid Disseldorp * XXX: Special case for iscsi/iSCSI...
1359a206b4SDavid Disseldorp * If non-null, fabric_alias is used for matching target/$fabric
1459a206b4SDavid Disseldorp * ConfigFS paths. If null, fabric_name is used for this (see below).
1559a206b4SDavid Disseldorp */
1659a206b4SDavid Disseldorp const char *fabric_alias;
1759a206b4SDavid Disseldorp /*
1859a206b4SDavid Disseldorp * fabric_name is used for matching target/$fabric ConfigFS paths
1959a206b4SDavid Disseldorp * without a fabric_alias (see above). It's also used for the ALUA state
2059a206b4SDavid Disseldorp * path and is stored on disk with PR state.
2130c7ca93SDavid Disseldorp */
2230c7ca93SDavid Disseldorp const char *fabric_name;
23144bc4c2SChristoph Hellwig size_t node_acl_size;
248f9b5654SNicholas Bellinger /*
258f9b5654SNicholas Bellinger * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
268f9b5654SNicholas Bellinger * Setting this value tells target-core to enforce this limit, and
278f9b5654SNicholas Bellinger * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH.
288f9b5654SNicholas Bellinger *
298f9b5654SNicholas Bellinger * target-core will currently reset se_cmd->data_length to this
308f9b5654SNicholas Bellinger * maximum size, and set UNDERFLOW residual count if length exceeds
318f9b5654SNicholas Bellinger * this limit.
328f9b5654SNicholas Bellinger *
338f9b5654SNicholas Bellinger * XXX: Not all initiator hosts honor this block-limit EVPD
348f9b5654SNicholas Bellinger * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
358f9b5654SNicholas Bellinger */
368f9b5654SNicholas Bellinger u32 max_data_sg_nents;
37c4795fb2SChristoph Hellwig char *(*tpg_get_wwn)(struct se_portal_group *);
38c4795fb2SChristoph Hellwig u16 (*tpg_get_tag)(struct se_portal_group *);
39c4795fb2SChristoph Hellwig u32 (*tpg_get_default_depth)(struct se_portal_group *);
40c4795fb2SChristoph Hellwig int (*tpg_check_demo_mode)(struct se_portal_group *);
41c4795fb2SChristoph Hellwig int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
42c4795fb2SChristoph Hellwig int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
43c4795fb2SChristoph Hellwig int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
44c4795fb2SChristoph Hellwig /*
45c4795fb2SChristoph Hellwig * Optionally used by fabrics to allow demo-mode login, but not
46c4795fb2SChristoph Hellwig * expose any TPG LUNs, and return 'not connected' in standard
47c4795fb2SChristoph Hellwig * inquiry response
48c4795fb2SChristoph Hellwig */
49c4795fb2SChristoph Hellwig int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
5038b57f82SNicholas Bellinger /*
5138b57f82SNicholas Bellinger * Optionally used as a configfs tunable to determine when
5238b57f82SNicholas Bellinger * target-core should signal the PROTECT=1 feature bit for
5338b57f82SNicholas Bellinger * backends that don't support T10-PI, so that either fabric
5438b57f82SNicholas Bellinger * HW offload or target-core emulation performs the associated
5538b57f82SNicholas Bellinger * WRITE_STRIP and READ_INSERT operations.
5638b57f82SNicholas Bellinger */
5738b57f82SNicholas Bellinger int (*tpg_check_prot_fabric_only)(struct se_portal_group *);
58c4795fb2SChristoph Hellwig u32 (*tpg_get_inst_index)(struct se_portal_group *);
59c4795fb2SChristoph Hellwig /*
60c4795fb2SChristoph Hellwig * Optional to release struct se_cmd and fabric dependent allocated
61b1a2ecdaSBart Van Assche * I/O descriptor after command execution has finished.
62c4795fb2SChristoph Hellwig *
63c4795fb2SChristoph Hellwig * Returning 1 will signal a descriptor has been released.
64c4795fb2SChristoph Hellwig * Returning 0 will signal a descriptor has not been released.
65c4795fb2SChristoph Hellwig */
66c4795fb2SChristoph Hellwig int (*check_stop_free)(struct se_cmd *);
67c4795fb2SChristoph Hellwig void (*release_cmd)(struct se_cmd *);
68c4795fb2SChristoph Hellwig void (*close_session)(struct se_session *);
69c4795fb2SChristoph Hellwig u32 (*sess_get_index)(struct se_session *);
70c4795fb2SChristoph Hellwig /*
71c4795fb2SChristoph Hellwig * Used only for SCSI fabrics that contain multi-value TransportIDs
72c4795fb2SChristoph Hellwig * (like iSCSI). All other SCSI fabrics should set this to NULL.
73c4795fb2SChristoph Hellwig */
74c4795fb2SChristoph Hellwig u32 (*sess_get_initiator_sid)(struct se_session *,
75c4795fb2SChristoph Hellwig unsigned char *, u32);
76c4795fb2SChristoph Hellwig int (*write_pending)(struct se_cmd *);
77c4795fb2SChristoph Hellwig void (*set_default_node_attributes)(struct se_node_acl *);
78c4795fb2SChristoph Hellwig int (*get_cmd_state)(struct se_cmd *);
79c4795fb2SChristoph Hellwig int (*queue_data_in)(struct se_cmd *);
80c4795fb2SChristoph Hellwig int (*queue_status)(struct se_cmd *);
81b79fafacSJoern Engel void (*queue_tm_rsp)(struct se_cmd *);
82131e6abcSNicholas Bellinger void (*aborted_task)(struct se_cmd *);
83c4795fb2SChristoph Hellwig /*
84c4795fb2SChristoph Hellwig * fabric module calls for target_core_fabric_configfs.c
85c4795fb2SChristoph Hellwig */
86c4795fb2SChristoph Hellwig struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
87c4795fb2SChristoph Hellwig struct config_group *, const char *);
88c4795fb2SChristoph Hellwig void (*fabric_drop_wwn)(struct se_wwn *);
89839559e1SChristoph Hellwig void (*add_wwn_groups)(struct se_wwn *);
90c4795fb2SChristoph Hellwig struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
91aa090eabSBart Van Assche const char *);
9280ed33c8SDmitry Bogdanov int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable);
93c4795fb2SChristoph Hellwig void (*fabric_drop_tpg)(struct se_portal_group *);
94c4795fb2SChristoph Hellwig int (*fabric_post_link)(struct se_portal_group *,
95c4795fb2SChristoph Hellwig struct se_lun *);
96c4795fb2SChristoph Hellwig void (*fabric_pre_unlink)(struct se_portal_group *,
97c4795fb2SChristoph Hellwig struct se_lun *);
98c4795fb2SChristoph Hellwig struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
99c4795fb2SChristoph Hellwig struct config_group *, const char *);
100c4795fb2SChristoph Hellwig void (*fabric_drop_np)(struct se_tpg_np *);
101c7d6a803SChristoph Hellwig int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
1029ac8928eSChristoph Hellwig
1039ac8928eSChristoph Hellwig struct configfs_attribute **tfc_discovery_attrs;
1049ac8928eSChristoph Hellwig struct configfs_attribute **tfc_wwn_attrs;
1059ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_base_attrs;
1069ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_np_base_attrs;
1079ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_attrib_attrs;
1089ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_auth_attrs;
1099ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_param_attrs;
1109ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_nacl_base_attrs;
1119ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
1129ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
1139ac8928eSChristoph Hellwig struct configfs_attribute **tfc_tpg_nacl_param_attrs;
114fbbd4923SBart Van Assche
115fbbd4923SBart Van Assche /*
11640ddd6dfSMike Christie * Set this member variable if the SCSI transport protocol
117fbbd4923SBart Van Assche * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
118fbbd4923SBart Van Assche * its entirety before a command is aborted.
119fbbd4923SBart Van Assche */
12040ddd6dfSMike Christie unsigned int write_pending_must_be_called:1;
121194605d4SMike Christie /*
122194605d4SMike Christie * Set this if the driver supports submitting commands to the backend
123194605d4SMike Christie * from target_submit/target_submit_cmd.
124194605d4SMike Christie */
125194605d4SMike Christie unsigned int direct_submit_supp:1;
126194605d4SMike Christie /*
127194605d4SMike Christie * Set this to a target_submit_type value.
128194605d4SMike Christie */
129194605d4SMike Christie u8 default_submit_type;
130c4795fb2SChristoph Hellwig };
131c4795fb2SChristoph Hellwig
1329ac8928eSChristoph Hellwig int target_register_template(const struct target_core_fabric_ops *fo);
1339ac8928eSChristoph Hellwig void target_unregister_template(const struct target_core_fabric_ops *fo);
1349ac8928eSChristoph Hellwig
135d588cf8fSChristoph Hellwig int target_depend_item(struct config_item *item);
136d588cf8fSChristoph Hellwig void target_undepend_item(struct config_item *item);
137d588cf8fSChristoph Hellwig
138fa834287SMike Christie struct se_session *target_setup_session(struct se_portal_group *,
1397861728dSNicholas Bellinger unsigned int, unsigned int, enum target_prot_op prot_op,
1407861728dSNicholas Bellinger const char *, void *,
1417861728dSNicholas Bellinger int (*callback)(struct se_portal_group *,
1427861728dSNicholas Bellinger struct se_session *, void *));
143fb7c70f2SMike Christie void target_remove_session(struct se_session *);
1447861728dSNicholas Bellinger
1456d256beeSMike Christie void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
1466d256beeSMike Christie void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
1474edba7e4SMike Christie struct target_cmd_counter *target_alloc_cmd_counter(void);
1486d256beeSMike Christie void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
1494edba7e4SMike Christie
1504edba7e4SMike Christie void transport_init_session(struct se_session *se_sess);
151317f8971SBart Van Assche struct se_session *transport_alloc_session(enum target_prot_op);
152c0add7fdSNicholas Bellinger int transport_alloc_session_tags(struct se_session *, unsigned int,
153c0add7fdSNicholas Bellinger unsigned int);
154c4795fb2SChristoph Hellwig void __transport_register_session(struct se_portal_group *,
155c4795fb2SChristoph Hellwig struct se_node_acl *, struct se_session *, void *);
156c4795fb2SChristoph Hellwig void transport_register_session(struct se_portal_group *,
157c4795fb2SChristoph Hellwig struct se_node_acl *, struct se_session *, void *);
158f8e471f9SNicholas Bellinger ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
159c4795fb2SChristoph Hellwig void transport_free_session(struct se_session *);
160fae43461SBart Van Assche void target_spc2_release(struct se_node_acl *nacl);
161afb999ffSNicholas Bellinger void target_put_nacl(struct se_node_acl *);
162c4795fb2SChristoph Hellwig void transport_deregister_session_configfs(struct se_session *);
163c4795fb2SChristoph Hellwig void transport_deregister_session(struct se_session *);
164c4795fb2SChristoph Hellwig
165c4795fb2SChristoph Hellwig
1668e288be8SMike Christie void __target_init_cmd(struct se_cmd *cmd,
1678e288be8SMike Christie const struct target_core_fabric_ops *tfo,
1688e288be8SMike Christie struct se_session *sess, u32 data_length, int data_direction,
1698e288be8SMike Christie int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
1708e288be8SMike Christie struct target_cmd_counter *cmd_cnt);
171750a1d93SMike Christie int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
172750a1d93SMike Christie unsigned char *sense, u64 unpacked_lun, u32 data_length,
173750a1d93SMike Christie int task_attr, int data_dir, int flags);
174750a1d93SMike Christie int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
175750a1d93SMike Christie struct scatterlist *sgl, u32 sgl_count,
176750a1d93SMike Christie struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
17708694199SMike Christie struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
178*42892679SMike Christie int target_submit(struct se_cmd *se_cmd);
179a36840d8SSudhakar Panneerselvam sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
18008694199SMike Christie sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
18108694199SMike Christie gfp_t gfp);
182987db587SSudhakar Panneerselvam sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
1830fa50a8bSMike Christie void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
184f2d30680SHannes Reinecke unsigned char *, u64, u32, int, int, int);
185eb44ce8cSMike Christie
186c7042caeSNicholas Bellinger int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
187f2d30680SHannes Reinecke unsigned char *sense, u64 unpacked_lun,
188c0974f89SNicholas Bellinger void *fabric_tmr_ptr, unsigned char tm_type,
1895261d86cSBart Van Assche gfp_t, u64, int);
190de103c93SChristoph Hellwig sense_reason_t transport_generic_new_cmd(struct se_cmd *);
191c4795fb2SChristoph Hellwig
1922c9fa49eSBart Van Assche void target_put_cmd_and_wait(struct se_cmd *cmd);
19370baf0abSChristoph Hellwig void target_execute_cmd(struct se_cmd *cmd);
194c4795fb2SChristoph Hellwig
195d5ddad41SNicholas Bellinger int transport_generic_free_cmd(struct se_cmd *, int);
196c4795fb2SChristoph Hellwig
197c4795fb2SChristoph Hellwig bool transport_wait_for_tasks(struct se_cmd *);
198de103c93SChristoph Hellwig int transport_send_check_condition_and_sense(struct se_cmd *,
199de103c93SChristoph Hellwig sense_reason_t, int);
20094ebb471SBart Van Assche int target_send_busy(struct se_cmd *cmd);
201afc16604SBart Van Assche int target_get_sess_cmd(struct se_cmd *, bool);
202afc16604SBart Van Assche int target_put_sess_cmd(struct se_cmd *);
2036f55b06fSMike Christie void target_stop_session(struct se_session *se_sess);
204be646c2dSJoern Engel void target_wait_for_sess_cmds(struct se_session *);
205c00e6220SBart Van Assche void target_show_cmd(const char *pfx, struct se_cmd *cmd);
206c4795fb2SChristoph Hellwig
207c8e31f26SAndy Grover int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
208c4795fb2SChristoph Hellwig void core_tmr_release_req(struct se_tmr_req *);
209c4795fb2SChristoph Hellwig int transport_generic_handle_tmr(struct se_cmd *);
210de103c93SChristoph Hellwig void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
211a36840d8SSudhakar Panneerselvam int transport_lookup_tmr_lun(struct se_cmd *);
212e986a35aSHannes Reinecke void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
213c4795fb2SChristoph Hellwig
214b3fde035SThomas Glanzmann struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
215b3fde035SThomas Glanzmann unsigned char *);
21621aaa23bSNicholas Bellinger bool target_tpg_has_node_acl(struct se_portal_group *tpg,
21721aaa23bSNicholas Bellinger const char *);
218c4795fb2SChristoph Hellwig struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
219c4795fb2SChristoph Hellwig unsigned char *);
220d36ad77fSNicholas Bellinger int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
22179e62fc3SAndy Grover int core_tpg_set_initiator_node_tag(struct se_portal_group *,
22279e62fc3SAndy Grover struct se_node_acl *, const char *);
223bc0c94b1SNicholas Bellinger int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
224c4795fb2SChristoph Hellwig int core_tpg_deregister(struct se_portal_group *);
225c4795fb2SChristoph Hellwig
226e64aa657SChristoph Hellwig int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
227e64aa657SChristoph Hellwig u32 length, bool zero_page, bool chainable);
228e64aa657SChristoph Hellwig void target_free_sgl(struct scatterlist *sgl, int nents);
229e64aa657SChristoph Hellwig
230b3faa2e8SNicholas Bellinger /*
231b3faa2e8SNicholas Bellinger * The LIO target core uses DMA_TO_DEVICE to mean that data is going
232b3faa2e8SNicholas Bellinger * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
233b3faa2e8SNicholas Bellinger * that data is coming from the target (eg handling a READ). However,
234b3faa2e8SNicholas Bellinger * this is just the opposite of what we have to tell the DMA mapping
235b3faa2e8SNicholas Bellinger * layer -- eg when handling a READ, the HBA will have to DMA the data
236b3faa2e8SNicholas Bellinger * out of memory so it can send it to the initiator, which means we
237b3faa2e8SNicholas Bellinger * need to use DMA_TO_DEVICE when we map the data.
238b3faa2e8SNicholas Bellinger */
239b3faa2e8SNicholas Bellinger static inline enum dma_data_direction
target_reverse_dma_direction(struct se_cmd * se_cmd)240b3faa2e8SNicholas Bellinger target_reverse_dma_direction(struct se_cmd *se_cmd)
241b3faa2e8SNicholas Bellinger {
242b3faa2e8SNicholas Bellinger if (se_cmd->se_cmd_flags & SCF_BIDI)
243b3faa2e8SNicholas Bellinger return DMA_BIDIRECTIONAL;
244b3faa2e8SNicholas Bellinger
245b3faa2e8SNicholas Bellinger switch (se_cmd->data_direction) {
246b3faa2e8SNicholas Bellinger case DMA_TO_DEVICE:
247b3faa2e8SNicholas Bellinger return DMA_FROM_DEVICE;
248b3faa2e8SNicholas Bellinger case DMA_FROM_DEVICE:
249b3faa2e8SNicholas Bellinger return DMA_TO_DEVICE;
250b3faa2e8SNicholas Bellinger case DMA_NONE:
251b3faa2e8SNicholas Bellinger default:
252b3faa2e8SNicholas Bellinger return DMA_NONE;
253b3faa2e8SNicholas Bellinger }
254b3faa2e8SNicholas Bellinger }
255b3faa2e8SNicholas Bellinger
256c4795fb2SChristoph Hellwig #endif /* TARGET_CORE_FABRICH */
257