xref: /linux/drivers/infiniband/ulp/iser/iscsi_iser.h (revision 2f188828)
1 /*
2  * iSER transport for the Open iSCSI Initiator & iSER transport internals
3  *
4  * Copyright (C) 2004 Dmitry Yusupov
5  * Copyright (C) 2004 Alex Aizman
6  * Copyright (C) 2005 Mike Christie
7  * based on code maintained by open-iscsi@googlegroups.com
8  *
9  * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
11  * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12  *
13  * This software is available to you under a choice of one of two
14  * licenses.  You may choose to be licensed under the terms of the GNU
15  * General Public License (GPL) Version 2, available from the file
16  * COPYING in the main directory of this source tree, or the
17  * OpenIB.org BSD license below:
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *	- Redistributions of source code must retain the above
24  *	  copyright notice, this list of conditions and the following
25  *	  disclaimer.
26  *
27  *	- Redistributions in binary form must reproduce the above
28  *	  copyright notice, this list of conditions and the following
29  *	  disclaimer in the documentation and/or other materials
30  *	  provided with the distribution.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
36  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
37  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
38  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39  * SOFTWARE.
40  */
41 #ifndef __ISCSI_ISER_H__
42 #define __ISCSI_ISER_H__
43 
44 #include <linux/types.h>
45 #include <linux/net.h>
46 #include <linux/printk.h>
47 #include <scsi/libiscsi.h>
48 #include <scsi/scsi_transport_iscsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/iser.h>
52 
53 #include <linux/interrupt.h>
54 #include <linux/wait.h>
55 #include <linux/sched.h>
56 #include <linux/list.h>
57 #include <linux/slab.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/mutex.h>
60 #include <linux/mempool.h>
61 #include <linux/uio.h>
62 
63 #include <linux/socket.h>
64 #include <linux/in.h>
65 #include <linux/in6.h>
66 
67 #include <rdma/ib_verbs.h>
68 #include <rdma/rdma_cm.h>
69 
70 #define DRV_NAME	"iser"
71 #define PFX		DRV_NAME ": "
72 #define DRV_VER		"1.6"
73 
74 #define iser_dbg(fmt, arg...)				 \
75 	do {						 \
76 		if (unlikely(iser_debug_level > 2))	 \
77 			printk(KERN_DEBUG PFX "%s: " fmt,\
78 				__func__ , ## arg);	 \
79 	} while (0)
80 
81 #define iser_warn(fmt, arg...)				\
82 	do {						\
83 		if (unlikely(iser_debug_level > 0))	\
84 			pr_warn(PFX "%s: " fmt,		\
85 				__func__ , ## arg);	\
86 	} while (0)
87 
88 #define iser_info(fmt, arg...)				\
89 	do {						\
90 		if (unlikely(iser_debug_level > 1))	\
91 			pr_info(PFX "%s: " fmt,		\
92 				__func__ , ## arg);	\
93 	} while (0)
94 
95 #define iser_err(fmt, arg...) \
96 	pr_err(PFX "%s: " fmt, __func__ , ## arg)
97 
98 /* Default support is 512KB I/O size */
99 #define ISER_DEF_MAX_SECTORS		1024
100 #define ISCSI_ISER_DEF_SG_TABLESIZE                                            \
101 	((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
102 /* Maximum support is 16MB I/O size */
103 #define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
104 
105 #define ISER_DEF_XMIT_CMDS_DEFAULT		512
106 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
107 	#define ISER_DEF_XMIT_CMDS_MAX		ISCSI_DEF_XMIT_CMDS_MAX
108 #else
109 	#define ISER_DEF_XMIT_CMDS_MAX		ISER_DEF_XMIT_CMDS_DEFAULT
110 #endif
111 #define ISER_DEF_CMD_PER_LUN		ISER_DEF_XMIT_CMDS_MAX
112 
113 /* QP settings */
114 /* Maximal bounds on received asynchronous PDUs */
115 #define ISER_MAX_RX_MISC_PDUS		4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */
116 
117 #define ISER_MAX_TX_MISC_PDUS		6 /* NOOP_OUT(2), TEXT(1),         *
118 					   * SCSI_TMFUNC(2), LOGOUT(1) */
119 
120 #define ISER_QP_MAX_RECV_DTOS		(ISER_DEF_XMIT_CMDS_MAX)
121 
122 /* the max TX (send) WR supported by the iSER QP is defined by                 *
123  * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
124  * to have at max for SCSI command. The tx posting & completion handling code  *
125  * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
126  * send WR. D=8 comes from 64K/8K                                              */
127 
128 #define ISER_INFLIGHT_DATAOUTS		8
129 
130 #define ISER_QP_MAX_REQ_DTOS		(ISER_DEF_XMIT_CMDS_MAX *    \
131 					(1 + ISER_INFLIGHT_DATAOUTS) + \
132 					ISER_MAX_TX_MISC_PDUS        + \
133 					ISER_MAX_RX_MISC_PDUS)
134 
135 /* Max registration work requests per command */
136 #define ISER_MAX_REG_WR_PER_CMD		5
137 
138 /* For Signature we don't support DATAOUTs so no need to make room for them */
139 #define ISER_QP_SIG_MAX_REQ_DTOS	(ISER_DEF_XMIT_CMDS_MAX	*       \
140 					(1 + ISER_MAX_REG_WR_PER_CMD) + \
141 					ISER_MAX_TX_MISC_PDUS         + \
142 					ISER_MAX_RX_MISC_PDUS)
143 
144 #define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr			\
145 					 - ISER_MAX_TX_MISC_PDUS	\
146 					 - ISER_MAX_RX_MISC_PDUS) /	\
147 					 (1 + ISER_INFLIGHT_DATAOUTS))
148 
149 /* Constant PDU lengths calculations */
150 #define ISER_HEADERS_LEN	(sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
151 
152 #define ISER_RECV_DATA_SEG_LEN	128
153 #define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
154 #define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
155 
156 /* Length of an object name string */
157 #define ISER_OBJECT_NAME_SIZE		    64
158 
159 enum iser_conn_state {
160 	ISER_CONN_INIT,		   /* descriptor allocd, no conn          */
161 	ISER_CONN_PENDING,	   /* in the process of being established */
162 	ISER_CONN_UP,		   /* up and running                      */
163 	ISER_CONN_TERMINATING,	   /* in the process of being terminated  */
164 	ISER_CONN_DOWN,		   /* shut down                           */
165 	ISER_CONN_STATES_NUM
166 };
167 
168 enum iser_task_status {
169 	ISER_TASK_STATUS_INIT = 0,
170 	ISER_TASK_STATUS_STARTED,
171 	ISER_TASK_STATUS_COMPLETED
172 };
173 
174 enum iser_data_dir {
175 	ISER_DIR_IN = 0,	   /* to initiator */
176 	ISER_DIR_OUT,		   /* from initiator */
177 	ISER_DIRS_NUM
178 };
179 
180 /**
181  * struct iser_data_buf - iSER data buffer
182  *
183  * @sg:           pointer to the sg list
184  * @size:         num entries of this sg
185  * @data_len:     total buffer byte len
186  * @dma_nents:    returned by dma_map_sg
187  */
188 struct iser_data_buf {
189 	struct scatterlist *sg;
190 	int                size;
191 	unsigned long      data_len;
192 	int                dma_nents;
193 };
194 
195 /* fwd declarations */
196 struct iser_device;
197 struct iscsi_iser_task;
198 struct iscsi_endpoint;
199 struct iser_reg_resources;
200 
201 /**
202  * struct iser_mem_reg - iSER memory registration info
203  *
204  * @sge:          memory region sg element
205  * @rkey:         memory region remote key
206  * @desc:         pointer to fast registration context
207  */
208 struct iser_mem_reg {
209 	struct ib_sge sge;
210 	u32 rkey;
211 	struct iser_fr_desc *desc;
212 };
213 
214 enum iser_desc_type {
215 	ISCSI_TX_CONTROL ,
216 	ISCSI_TX_SCSI_COMMAND,
217 	ISCSI_TX_DATAOUT
218 };
219 
220 /**
221  * struct iser_tx_desc - iSER TX descriptor
222  *
223  * @iser_header:   iser header
224  * @iscsi_header:  iscsi header
225  * @type:          command/control/dataout
226  * @dma_addr:      header buffer dma_address
227  * @tx_sg:         sg[0] points to iser/iscsi headers
228  *                 sg[1] optionally points to either of immediate data
229  *                 unsolicited data-out or control
230  * @num_sge:       number sges used on this TX task
231  * @cqe:           completion handler
232  * @mapped:        Is the task header mapped
233  * @reg_wr:        registration WR
234  * @send_wr:       send WR
235  * @inv_wr:        invalidate WR
236  */
237 struct iser_tx_desc {
238 	struct iser_ctrl             iser_header;
239 	struct iscsi_hdr             iscsi_header;
240 	enum   iser_desc_type        type;
241 	u64		             dma_addr;
242 	struct ib_sge		     tx_sg[2];
243 	int                          num_sge;
244 	struct ib_cqe		     cqe;
245 	bool			     mapped;
246 	struct ib_reg_wr	     reg_wr;
247 	struct ib_send_wr	     send_wr;
248 	struct ib_send_wr	     inv_wr;
249 };
250 
251 #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
252 				 sizeof(u64) + sizeof(struct ib_sge) + \
253 				 sizeof(struct ib_cqe)))
254 /**
255  * struct iser_rx_desc - iSER RX descriptor
256  *
257  * @iser_header:   iser header
258  * @iscsi_header:  iscsi header
259  * @data:          received data segment
260  * @dma_addr:      receive buffer dma address
261  * @rx_sg:         ib_sge of receive buffer
262  * @cqe:           completion handler
263  * @pad:           for sense data TODO: Modify to maximum sense length supported
264  */
265 struct iser_rx_desc {
266 	struct iser_ctrl             iser_header;
267 	struct iscsi_hdr             iscsi_header;
268 	char		             data[ISER_RECV_DATA_SEG_LEN];
269 	u64		             dma_addr;
270 	struct ib_sge		     rx_sg;
271 	struct ib_cqe		     cqe;
272 	char		             pad[ISER_RX_PAD_SIZE];
273 } __packed;
274 
275 /**
276  * struct iser_login_desc - iSER login descriptor
277  *
278  * @req:           pointer to login request buffer
279  * @rsp:           pointer to login response buffer
280  * @req_dma:       DMA address of login request buffer
281  * @rsp_dma:       DMA address of login response buffer
282  * @sge:           IB sge for login post recv
283  * @cqe:           completion handler
284  */
285 struct iser_login_desc {
286 	void                         *req;
287 	void                         *rsp;
288 	u64                          req_dma;
289 	u64                          rsp_dma;
290 	struct ib_sge                sge;
291 	struct ib_cqe		     cqe;
292 } __packed;
293 
294 struct iser_conn;
295 struct ib_conn;
296 
297 /**
298  * struct iser_device - iSER device handle
299  *
300  * @ib_device:     RDMA device
301  * @pd:            Protection Domain for this device
302  * @event_handler: IB events handle routine
303  * @ig_list:	   entry in devices list
304  * @refcount:      Reference counter, dominated by open iser connections
305  */
306 struct iser_device {
307 	struct ib_device             *ib_device;
308 	struct ib_pd	             *pd;
309 	struct ib_event_handler      event_handler;
310 	struct list_head             ig_list;
311 	int                          refcount;
312 };
313 
314 /**
315  * struct iser_reg_resources - Fast registration resources
316  *
317  * @mr:         memory region
318  * @sig_mr:     signature memory region
319  */
320 struct iser_reg_resources {
321 	struct ib_mr                     *mr;
322 	struct ib_mr                     *sig_mr;
323 };
324 
325 /**
326  * struct iser_fr_desc - Fast registration descriptor
327  *
328  * @list:           entry in connection fastreg pool
329  * @rsc:            data buffer registration resources
330  * @sig_protected:  is region protected indicator
331  * @all_list:       first and last list members
332  */
333 struct iser_fr_desc {
334 	struct list_head		  list;
335 	struct iser_reg_resources	  rsc;
336 	bool				  sig_protected;
337 	struct list_head                  all_list;
338 };
339 
340 /**
341  * struct iser_fr_pool - connection fast registration pool
342  *
343  * @list:                list of fastreg descriptors
344  * @lock:                protects fastreg pool
345  * @size:                size of the pool
346  * @all_list:            first and last list members
347  */
348 struct iser_fr_pool {
349 	struct list_head        list;
350 	spinlock_t              lock;
351 	int                     size;
352 	struct list_head        all_list;
353 };
354 
355 /**
356  * struct ib_conn - Infiniband related objects
357  *
358  * @cma_id:              rdma_cm connection maneger handle
359  * @qp:                  Connection Queue-pair
360  * @cq:                  Connection completion queue
361  * @cq_size:             The number of max outstanding completions
362  * @device:              reference to iser device
363  * @fr_pool:             connection fast registration pool
364  * @pi_support:          Indicate device T10-PI support
365  * @reg_cqe:             completion handler
366  */
367 struct ib_conn {
368 	struct rdma_cm_id           *cma_id;
369 	struct ib_qp	            *qp;
370 	struct ib_cq		    *cq;
371 	u32			    cq_size;
372 	struct iser_device          *device;
373 	struct iser_fr_pool          fr_pool;
374 	bool			     pi_support;
375 	struct ib_cqe		     reg_cqe;
376 };
377 
378 /**
379  * struct iser_conn - iSER connection context
380  *
381  * @ib_conn:          connection RDMA resources
382  * @iscsi_conn:       link to matching iscsi connection
383  * @ep:               transport handle
384  * @state:            connection logical state
385  * @qp_max_recv_dtos: maximum number of data outs, corresponds
386  *                    to max number of post recvs
387  * @max_cmds:         maximum cmds allowed for this connection
388  * @name:             connection peer portal
389  * @release_work:     deferred work for release job
390  * @state_mutex:      protects iser onnection state
391  * @stop_completion:  conn_stop completion
392  * @ib_completion:    RDMA cleanup completion
393  * @up_completion:    connection establishment completed
394  *                    (state is ISER_CONN_UP)
395  * @conn_list:        entry in ig conn list
396  * @login_desc:       login descriptor
397  * @rx_descs:         rx buffers array (cyclic buffer)
398  * @num_rx_descs:     number of rx descriptors
399  * @scsi_sg_tablesize: scsi host sg_tablesize
400  * @pages_per_mr:     maximum pages available for registration
401  * @snd_w_inv:        connection uses remote invalidation
402  */
403 struct iser_conn {
404 	struct ib_conn		     ib_conn;
405 	struct iscsi_conn	     *iscsi_conn;
406 	struct iscsi_endpoint	     *ep;
407 	enum iser_conn_state	     state;
408 	unsigned		     qp_max_recv_dtos;
409 	u16                          max_cmds;
410 	char 			     name[ISER_OBJECT_NAME_SIZE];
411 	struct work_struct	     release_work;
412 	struct mutex		     state_mutex;
413 	struct completion	     stop_completion;
414 	struct completion	     ib_completion;
415 	struct completion	     up_completion;
416 	struct list_head	     conn_list;
417 	struct iser_login_desc       login_desc;
418 	struct iser_rx_desc	     *rx_descs;
419 	u32                          num_rx_descs;
420 	unsigned short               scsi_sg_tablesize;
421 	unsigned short               pages_per_mr;
422 	bool			     snd_w_inv;
423 };
424 
425 /**
426  * struct iscsi_iser_task - iser task context
427  *
428  * @desc:     TX descriptor
429  * @iser_conn:        link to iser connection
430  * @status:           current task status
431  * @sc:               link to scsi command
432  * @command_sent:     indicate if command was sent
433  * @dir:              iser data direction
434  * @rdma_reg:         task rdma registration desc
435  * @data:             iser data buffer desc
436  * @prot:             iser protection buffer desc
437  */
438 struct iscsi_iser_task {
439 	struct iser_tx_desc          desc;
440 	struct iser_conn	     *iser_conn;
441 	enum iser_task_status 	     status;
442 	struct scsi_cmnd	     *sc;
443 	int                          command_sent;
444 	int                          dir[ISER_DIRS_NUM];
445 	struct iser_mem_reg          rdma_reg[ISER_DIRS_NUM];
446 	struct iser_data_buf         data[ISER_DIRS_NUM];
447 	struct iser_data_buf         prot[ISER_DIRS_NUM];
448 };
449 
450 /**
451  * struct iser_global - iSER global context
452  *
453  * @device_list_mutex:    protects device_list
454  * @device_list:          iser devices global list
455  * @connlist_mutex:       protects connlist
456  * @connlist:             iser connections global list
457  * @desc_cache:           kmem cache for tx dataout
458  */
459 struct iser_global {
460 	struct mutex      device_list_mutex;
461 	struct list_head  device_list;
462 	struct mutex      connlist_mutex;
463 	struct list_head  connlist;
464 	struct kmem_cache *desc_cache;
465 };
466 
467 extern struct iser_global ig;
468 extern int iser_debug_level;
469 extern bool iser_pi_enable;
470 extern unsigned int iser_max_sectors;
471 extern bool iser_always_reg;
472 
473 int iser_send_control(struct iscsi_conn *conn,
474 		      struct iscsi_task *task);
475 
476 int iser_send_command(struct iscsi_conn *conn,
477 		      struct iscsi_task *task);
478 
479 int iser_send_data_out(struct iscsi_conn *conn,
480 		       struct iscsi_task *task,
481 		       struct iscsi_data *hdr);
482 
483 void iscsi_iser_recv(struct iscsi_conn *conn,
484 		     struct iscsi_hdr *hdr,
485 		     char *rx_data,
486 		     int rx_data_len);
487 
488 void iser_conn_init(struct iser_conn *iser_conn);
489 
490 void iser_conn_release(struct iser_conn *iser_conn);
491 
492 int iser_conn_terminate(struct iser_conn *iser_conn);
493 
494 void iser_release_work(struct work_struct *work);
495 
496 void iser_err_comp(struct ib_wc *wc, const char *type);
497 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
498 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
499 void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
500 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
501 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
502 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
503 
504 void iser_task_rdma_init(struct iscsi_iser_task *task);
505 
506 void iser_task_rdma_finalize(struct iscsi_iser_task *task);
507 
508 void iser_free_rx_descriptors(struct iser_conn *iser_conn);
509 
510 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
511 				     struct iser_data_buf *mem,
512 				     enum iser_data_dir cmd_dir);
513 
514 int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
515 			 enum iser_data_dir dir,
516 			 bool all_imm);
517 void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
518 			    enum iser_data_dir dir);
519 
520 int  iser_connect(struct iser_conn *iser_conn,
521 		  struct sockaddr *src_addr,
522 		  struct sockaddr *dst_addr,
523 		  int non_blocking);
524 
525 int  iser_post_recvl(struct iser_conn *iser_conn);
526 int  iser_post_recvm(struct iser_conn *iser_conn,
527 		     struct iser_rx_desc *rx_desc);
528 int  iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
529 
530 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
531 			   enum iser_data_dir iser_dir,
532 			   enum dma_data_direction dma_dir);
533 
534 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
535 			      enum iser_data_dir iser_dir,
536 			      enum dma_data_direction dma_dir);
537 
538 int  iser_initialize_task_headers(struct iscsi_task *task,
539 			struct iser_tx_desc *tx_desc);
540 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
541 			      struct iscsi_session *session);
542 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
543 			    unsigned cmds_max,
544 			    unsigned int size);
545 void iser_free_fastreg_pool(struct ib_conn *ib_conn);
546 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
547 			     enum iser_data_dir cmd_dir, sector_t *sector);
548 
549 static inline struct iser_conn *
to_iser_conn(struct ib_conn * ib_conn)550 to_iser_conn(struct ib_conn *ib_conn)
551 {
552 	return container_of(ib_conn, struct iser_conn, ib_conn);
553 }
554 
555 static inline struct iser_rx_desc *
iser_rx(struct ib_cqe * cqe)556 iser_rx(struct ib_cqe *cqe)
557 {
558 	return container_of(cqe, struct iser_rx_desc, cqe);
559 }
560 
561 static inline struct iser_tx_desc *
iser_tx(struct ib_cqe * cqe)562 iser_tx(struct ib_cqe *cqe)
563 {
564 	return container_of(cqe, struct iser_tx_desc, cqe);
565 }
566 
567 static inline struct iser_login_desc *
iser_login(struct ib_cqe * cqe)568 iser_login(struct ib_cqe *cqe)
569 {
570 	return container_of(cqe, struct iser_login_desc, cqe);
571 }
572 
573 #endif
574