xref: /linux/drivers/target/iscsi/iscsi_target.c (revision 84b9b44b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * This file contains main functions related to the iSCSI Target Core Driver.
4  *
5  * (c) Copyright 2007-2013 Datera, Inc.
6  *
7  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8  *
9  ******************************************************************************/
10 
11 #include <crypto/hash.h>
12 #include <linux/string.h>
13 #include <linux/kthread.h>
14 #include <linux/completion.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/idr.h>
18 #include <linux/delay.h>
19 #include <linux/sched/signal.h>
20 #include <asm/unaligned.h>
21 #include <linux/inet.h>
22 #include <net/ipv6.h>
23 #include <scsi/scsi_proto.h>
24 #include <scsi/iscsi_proto.h>
25 #include <scsi/scsi_tcq.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 
29 #include <target/target_core_backend.h>
30 #include <target/iscsi/iscsi_target_core.h>
31 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_login.h"
38 #include "iscsi_target_tmr.h"
39 #include "iscsi_target_tpg.h"
40 #include "iscsi_target_util.h"
41 #include "iscsi_target.h"
42 #include "iscsi_target_device.h"
43 #include <target/iscsi/iscsi_target_stat.h>
44 
45 #include <target/iscsi/iscsi_transport.h>
46 
47 static LIST_HEAD(g_tiqn_list);
48 static LIST_HEAD(g_np_list);
49 static DEFINE_SPINLOCK(tiqn_lock);
50 static DEFINE_MUTEX(np_lock);
51 
52 static struct idr tiqn_idr;
53 DEFINE_IDA(sess_ida);
54 struct mutex auth_id_lock;
55 
56 struct iscsit_global *iscsit_global;
57 
58 struct kmem_cache *lio_qr_cache;
59 struct kmem_cache *lio_dr_cache;
60 struct kmem_cache *lio_ooo_cache;
61 struct kmem_cache *lio_r2t_cache;
62 
63 static int iscsit_handle_immediate_data(struct iscsit_cmd *,
64 			struct iscsi_scsi_req *, u32);
65 
66 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
67 {
68 	struct iscsi_tiqn *tiqn = NULL;
69 
70 	spin_lock(&tiqn_lock);
71 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
72 		if (!strcmp(tiqn->tiqn, buf)) {
73 
74 			spin_lock(&tiqn->tiqn_state_lock);
75 			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
76 				tiqn->tiqn_access_count++;
77 				spin_unlock(&tiqn->tiqn_state_lock);
78 				spin_unlock(&tiqn_lock);
79 				return tiqn;
80 			}
81 			spin_unlock(&tiqn->tiqn_state_lock);
82 		}
83 	}
84 	spin_unlock(&tiqn_lock);
85 
86 	return NULL;
87 }
88 
89 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
90 {
91 	spin_lock(&tiqn->tiqn_state_lock);
92 	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
93 		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
94 		spin_unlock(&tiqn->tiqn_state_lock);
95 		return 0;
96 	}
97 	spin_unlock(&tiqn->tiqn_state_lock);
98 
99 	return -1;
100 }
101 
102 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
103 {
104 	spin_lock(&tiqn->tiqn_state_lock);
105 	tiqn->tiqn_access_count--;
106 	spin_unlock(&tiqn->tiqn_state_lock);
107 }
108 
109 /*
110  * Note that IQN formatting is expected to be done in userspace, and
111  * no explict IQN format checks are done here.
112  */
113 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
114 {
115 	struct iscsi_tiqn *tiqn = NULL;
116 	int ret;
117 
118 	if (strlen(buf) >= ISCSI_IQN_LEN) {
119 		pr_err("Target IQN exceeds %d bytes\n",
120 				ISCSI_IQN_LEN);
121 		return ERR_PTR(-EINVAL);
122 	}
123 
124 	tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
125 	if (!tiqn)
126 		return ERR_PTR(-ENOMEM);
127 
128 	sprintf(tiqn->tiqn, "%s", buf);
129 	INIT_LIST_HEAD(&tiqn->tiqn_list);
130 	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
131 	spin_lock_init(&tiqn->tiqn_state_lock);
132 	spin_lock_init(&tiqn->tiqn_tpg_lock);
133 	spin_lock_init(&tiqn->sess_err_stats.lock);
134 	spin_lock_init(&tiqn->login_stats.lock);
135 	spin_lock_init(&tiqn->logout_stats.lock);
136 
137 	tiqn->tiqn_state = TIQN_STATE_ACTIVE;
138 
139 	idr_preload(GFP_KERNEL);
140 	spin_lock(&tiqn_lock);
141 
142 	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
143 	if (ret < 0) {
144 		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
145 		spin_unlock(&tiqn_lock);
146 		idr_preload_end();
147 		kfree(tiqn);
148 		return ERR_PTR(ret);
149 	}
150 	tiqn->tiqn_index = ret;
151 	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
152 
153 	spin_unlock(&tiqn_lock);
154 	idr_preload_end();
155 
156 	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
157 
158 	return tiqn;
159 
160 }
161 
162 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
163 {
164 	/*
165 	 * Wait for accesses to said struct iscsi_tiqn to end.
166 	 */
167 	spin_lock(&tiqn->tiqn_state_lock);
168 	while (tiqn->tiqn_access_count != 0) {
169 		spin_unlock(&tiqn->tiqn_state_lock);
170 		msleep(10);
171 		spin_lock(&tiqn->tiqn_state_lock);
172 	}
173 	spin_unlock(&tiqn->tiqn_state_lock);
174 }
175 
176 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
177 {
178 	/*
179 	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
180 	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
181 	 * attempts to access this struct iscsi_tiqn will fail from both transport
182 	 * fabric and control code paths.
183 	 */
184 	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
185 		pr_err("iscsit_set_tiqn_shutdown() failed\n");
186 		return;
187 	}
188 
189 	iscsit_wait_for_tiqn(tiqn);
190 
191 	spin_lock(&tiqn_lock);
192 	list_del(&tiqn->tiqn_list);
193 	idr_remove(&tiqn_idr, tiqn->tiqn_index);
194 	spin_unlock(&tiqn_lock);
195 
196 	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
197 			tiqn->tiqn);
198 	kfree(tiqn);
199 }
200 
201 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
202 {
203 	int ret;
204 	/*
205 	 * Determine if the network portal is accepting storage traffic.
206 	 */
207 	spin_lock_bh(&np->np_thread_lock);
208 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
209 		spin_unlock_bh(&np->np_thread_lock);
210 		return -1;
211 	}
212 	spin_unlock_bh(&np->np_thread_lock);
213 	/*
214 	 * Determine if the portal group is accepting storage traffic.
215 	 */
216 	spin_lock_bh(&tpg->tpg_state_lock);
217 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
218 		spin_unlock_bh(&tpg->tpg_state_lock);
219 		return -1;
220 	}
221 	spin_unlock_bh(&tpg->tpg_state_lock);
222 
223 	/*
224 	 * Here we serialize access across the TIQN+TPG Tuple.
225 	 */
226 	ret = down_interruptible(&tpg->np_login_sem);
227 	if (ret != 0)
228 		return -1;
229 
230 	spin_lock_bh(&tpg->tpg_state_lock);
231 	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
232 		spin_unlock_bh(&tpg->tpg_state_lock);
233 		up(&tpg->np_login_sem);
234 		return -1;
235 	}
236 	spin_unlock_bh(&tpg->tpg_state_lock);
237 
238 	return 0;
239 }
240 
241 void iscsit_login_kref_put(struct kref *kref)
242 {
243 	struct iscsi_tpg_np *tpg_np = container_of(kref,
244 				struct iscsi_tpg_np, tpg_np_kref);
245 
246 	complete(&tpg_np->tpg_np_comp);
247 }
248 
249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
250 		       struct iscsi_tpg_np *tpg_np)
251 {
252 	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
253 
254 	up(&tpg->np_login_sem);
255 
256 	if (tpg_np)
257 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
258 
259 	if (tiqn)
260 		iscsit_put_tiqn_for_login(tiqn);
261 
262 	return 0;
263 }
264 
265 bool iscsit_check_np_match(
266 	struct sockaddr_storage *sockaddr,
267 	struct iscsi_np *np,
268 	int network_transport)
269 {
270 	struct sockaddr_in *sock_in, *sock_in_e;
271 	struct sockaddr_in6 *sock_in6, *sock_in6_e;
272 	bool ip_match = false;
273 	u16 port, port_e;
274 
275 	if (sockaddr->ss_family == AF_INET6) {
276 		sock_in6 = (struct sockaddr_in6 *)sockaddr;
277 		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
278 
279 		if (!memcmp(&sock_in6->sin6_addr.in6_u,
280 			    &sock_in6_e->sin6_addr.in6_u,
281 			    sizeof(struct in6_addr)))
282 			ip_match = true;
283 
284 		port = ntohs(sock_in6->sin6_port);
285 		port_e = ntohs(sock_in6_e->sin6_port);
286 	} else {
287 		sock_in = (struct sockaddr_in *)sockaddr;
288 		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
289 
290 		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
291 			ip_match = true;
292 
293 		port = ntohs(sock_in->sin_port);
294 		port_e = ntohs(sock_in_e->sin_port);
295 	}
296 
297 	if (ip_match && (port_e == port) &&
298 	    (np->np_network_transport == network_transport))
299 		return true;
300 
301 	return false;
302 }
303 
304 static struct iscsi_np *iscsit_get_np(
305 	struct sockaddr_storage *sockaddr,
306 	int network_transport)
307 {
308 	struct iscsi_np *np;
309 	bool match;
310 
311 	lockdep_assert_held(&np_lock);
312 
313 	list_for_each_entry(np, &g_np_list, np_list) {
314 		spin_lock_bh(&np->np_thread_lock);
315 		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
316 			spin_unlock_bh(&np->np_thread_lock);
317 			continue;
318 		}
319 
320 		match = iscsit_check_np_match(sockaddr, np, network_transport);
321 		if (match) {
322 			/*
323 			 * Increment the np_exports reference count now to
324 			 * prevent iscsit_del_np() below from being called
325 			 * while iscsi_tpg_add_network_portal() is called.
326 			 */
327 			np->np_exports++;
328 			spin_unlock_bh(&np->np_thread_lock);
329 			return np;
330 		}
331 		spin_unlock_bh(&np->np_thread_lock);
332 	}
333 
334 	return NULL;
335 }
336 
337 struct iscsi_np *iscsit_add_np(
338 	struct sockaddr_storage *sockaddr,
339 	int network_transport)
340 {
341 	struct iscsi_np *np;
342 	int ret;
343 
344 	mutex_lock(&np_lock);
345 
346 	/*
347 	 * Locate the existing struct iscsi_np if already active..
348 	 */
349 	np = iscsit_get_np(sockaddr, network_transport);
350 	if (np) {
351 		mutex_unlock(&np_lock);
352 		return np;
353 	}
354 
355 	np = kzalloc(sizeof(*np), GFP_KERNEL);
356 	if (!np) {
357 		mutex_unlock(&np_lock);
358 		return ERR_PTR(-ENOMEM);
359 	}
360 
361 	np->np_flags |= NPF_IP_NETWORK;
362 	np->np_network_transport = network_transport;
363 	spin_lock_init(&np->np_thread_lock);
364 	init_completion(&np->np_restart_comp);
365 	INIT_LIST_HEAD(&np->np_list);
366 
367 	ret = iscsi_target_setup_login_socket(np, sockaddr);
368 	if (ret != 0) {
369 		kfree(np);
370 		mutex_unlock(&np_lock);
371 		return ERR_PTR(ret);
372 	}
373 
374 	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
375 	if (IS_ERR(np->np_thread)) {
376 		pr_err("Unable to create kthread: iscsi_np\n");
377 		ret = PTR_ERR(np->np_thread);
378 		kfree(np);
379 		mutex_unlock(&np_lock);
380 		return ERR_PTR(ret);
381 	}
382 	/*
383 	 * Increment the np_exports reference count now to prevent
384 	 * iscsit_del_np() below from being run while a new call to
385 	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
386 	 * active.  We don't need to hold np->np_thread_lock at this
387 	 * point because iscsi_np has not been added to g_np_list yet.
388 	 */
389 	np->np_exports = 1;
390 	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
391 
392 	list_add_tail(&np->np_list, &g_np_list);
393 	mutex_unlock(&np_lock);
394 
395 	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
396 		&np->np_sockaddr, np->np_transport->name);
397 
398 	return np;
399 }
400 
401 int iscsit_reset_np_thread(
402 	struct iscsi_np *np,
403 	struct iscsi_tpg_np *tpg_np,
404 	struct iscsi_portal_group *tpg,
405 	bool shutdown)
406 {
407 	spin_lock_bh(&np->np_thread_lock);
408 	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
409 		spin_unlock_bh(&np->np_thread_lock);
410 		return 0;
411 	}
412 	np->np_thread_state = ISCSI_NP_THREAD_RESET;
413 	atomic_inc(&np->np_reset_count);
414 
415 	if (np->np_thread) {
416 		spin_unlock_bh(&np->np_thread_lock);
417 		send_sig(SIGINT, np->np_thread, 1);
418 		wait_for_completion(&np->np_restart_comp);
419 		spin_lock_bh(&np->np_thread_lock);
420 	}
421 	spin_unlock_bh(&np->np_thread_lock);
422 
423 	if (tpg_np && shutdown) {
424 		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
425 
426 		wait_for_completion(&tpg_np->tpg_np_comp);
427 	}
428 
429 	return 0;
430 }
431 
432 static void iscsit_free_np(struct iscsi_np *np)
433 {
434 	if (np->np_socket)
435 		sock_release(np->np_socket);
436 }
437 
438 int iscsit_del_np(struct iscsi_np *np)
439 {
440 	spin_lock_bh(&np->np_thread_lock);
441 	np->np_exports--;
442 	if (np->np_exports) {
443 		np->enabled = true;
444 		spin_unlock_bh(&np->np_thread_lock);
445 		return 0;
446 	}
447 	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
448 	spin_unlock_bh(&np->np_thread_lock);
449 
450 	if (np->np_thread) {
451 		/*
452 		 * We need to send the signal to wakeup Linux/Net
453 		 * which may be sleeping in sock_accept()..
454 		 */
455 		send_sig(SIGINT, np->np_thread, 1);
456 		kthread_stop(np->np_thread);
457 		np->np_thread = NULL;
458 	}
459 
460 	np->np_transport->iscsit_free_np(np);
461 
462 	mutex_lock(&np_lock);
463 	list_del(&np->np_list);
464 	mutex_unlock(&np_lock);
465 
466 	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
467 		&np->np_sockaddr, np->np_transport->name);
468 
469 	iscsit_put_transport(np->np_transport);
470 	kfree(np);
471 	return 0;
472 }
473 
474 static void iscsit_get_rx_pdu(struct iscsit_conn *);
475 
476 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
477 {
478 	return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
479 }
480 EXPORT_SYMBOL(iscsit_queue_rsp);
481 
482 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
483 {
484 	spin_lock_bh(&conn->cmd_lock);
485 	if (!list_empty(&cmd->i_conn_node))
486 		list_del_init(&cmd->i_conn_node);
487 	spin_unlock_bh(&conn->cmd_lock);
488 
489 	__iscsit_free_cmd(cmd, true);
490 }
491 EXPORT_SYMBOL(iscsit_aborted_task);
492 
493 static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
494 				      u32, u32, const void *, void *);
495 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
496 
497 static int
498 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
499 			  const void *data_buf, u32 data_buf_len)
500 {
501 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
502 	struct kvec *iov;
503 	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
504 	int ret;
505 
506 	iov = &cmd->iov_misc[0];
507 	iov[niov].iov_base	= cmd->pdu;
508 	iov[niov++].iov_len	= ISCSI_HDR_LEN;
509 
510 	if (conn->conn_ops->HeaderDigest) {
511 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
512 
513 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
514 					  ISCSI_HDR_LEN, 0, NULL,
515 					  header_digest);
516 
517 		iov[0].iov_len += ISCSI_CRC_LEN;
518 		tx_size += ISCSI_CRC_LEN;
519 		pr_debug("Attaching CRC32C HeaderDigest"
520 			 " to opcode 0x%x 0x%08x\n",
521 			 hdr->opcode, *header_digest);
522 	}
523 
524 	if (data_buf_len) {
525 		u32 padding = ((-data_buf_len) & 3);
526 
527 		iov[niov].iov_base	= (void *)data_buf;
528 		iov[niov++].iov_len	= data_buf_len;
529 		tx_size += data_buf_len;
530 
531 		if (padding != 0) {
532 			iov[niov].iov_base = &cmd->pad_bytes;
533 			iov[niov++].iov_len = padding;
534 			tx_size += padding;
535 			pr_debug("Attaching %u additional"
536 				 " padding bytes.\n", padding);
537 		}
538 
539 		if (conn->conn_ops->DataDigest) {
540 			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
541 						  data_buf, data_buf_len,
542 						  padding, &cmd->pad_bytes,
543 						  &cmd->data_crc);
544 
545 			iov[niov].iov_base = &cmd->data_crc;
546 			iov[niov++].iov_len = ISCSI_CRC_LEN;
547 			tx_size += ISCSI_CRC_LEN;
548 			pr_debug("Attached DataDigest for %u"
549 				 " bytes opcode 0x%x, CRC 0x%08x\n",
550 				 data_buf_len, hdr->opcode, cmd->data_crc);
551 		}
552 	}
553 
554 	cmd->iov_misc_count = niov;
555 	cmd->tx_size = tx_size;
556 
557 	ret = iscsit_send_tx_data(cmd, conn, 1);
558 	if (ret < 0) {
559 		iscsit_tx_thread_wait_for_tcp(conn);
560 		return ret;
561 	}
562 
563 	return 0;
564 }
565 
566 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
567 			    u32 data_offset, u32 data_length);
568 static void iscsit_unmap_iovec(struct iscsit_cmd *);
569 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsit_cmd *,
570 				    u32, u32, u32, u8 *);
571 static int
572 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
573 		       const struct iscsi_datain *datain)
574 {
575 	struct kvec *iov;
576 	u32 iov_count = 0, tx_size = 0;
577 	int ret, iov_ret;
578 
579 	iov = &cmd->iov_data[0];
580 	iov[iov_count].iov_base	= cmd->pdu;
581 	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
582 	tx_size += ISCSI_HDR_LEN;
583 
584 	if (conn->conn_ops->HeaderDigest) {
585 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
586 
587 		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
588 					  ISCSI_HDR_LEN, 0, NULL,
589 					  header_digest);
590 
591 		iov[0].iov_len += ISCSI_CRC_LEN;
592 		tx_size += ISCSI_CRC_LEN;
593 
594 		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
595 			 *header_digest);
596 	}
597 
598 	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
599 				   cmd->orig_iov_data_count - (iov_count + 2),
600 				   datain->offset, datain->length);
601 	if (iov_ret < 0)
602 		return -1;
603 
604 	iov_count += iov_ret;
605 	tx_size += datain->length;
606 
607 	cmd->padding = ((-datain->length) & 3);
608 	if (cmd->padding) {
609 		iov[iov_count].iov_base		= cmd->pad_bytes;
610 		iov[iov_count++].iov_len	= cmd->padding;
611 		tx_size += cmd->padding;
612 
613 		pr_debug("Attaching %u padding bytes\n", cmd->padding);
614 	}
615 
616 	if (conn->conn_ops->DataDigest) {
617 		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
618 							 cmd, datain->offset,
619 							 datain->length,
620 							 cmd->padding,
621 							 cmd->pad_bytes);
622 
623 		iov[iov_count].iov_base	= &cmd->data_crc;
624 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
625 		tx_size += ISCSI_CRC_LEN;
626 
627 		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
628 			 datain->length + cmd->padding, cmd->data_crc);
629 	}
630 
631 	cmd->iov_data_count = iov_count;
632 	cmd->tx_size = tx_size;
633 
634 	ret = iscsit_fe_sendpage_sg(cmd, conn);
635 
636 	iscsit_unmap_iovec(cmd);
637 
638 	if (ret < 0) {
639 		iscsit_tx_thread_wait_for_tcp(conn);
640 		return ret;
641 	}
642 
643 	return 0;
644 }
645 
646 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
647 			   struct iscsi_datain_req *dr, const void *buf,
648 			   u32 buf_len)
649 {
650 	if (dr)
651 		return iscsit_xmit_datain_pdu(conn, cmd, buf);
652 	else
653 		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
654 }
655 
656 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
657 {
658 	return TARGET_PROT_NORMAL;
659 }
660 
661 static struct iscsit_transport iscsi_target_transport = {
662 	.name			= "iSCSI/TCP",
663 	.transport_type		= ISCSI_TCP,
664 	.rdma_shutdown		= false,
665 	.owner			= NULL,
666 	.iscsit_setup_np	= iscsit_setup_np,
667 	.iscsit_accept_np	= iscsit_accept_np,
668 	.iscsit_free_np		= iscsit_free_np,
669 	.iscsit_get_login_rx	= iscsit_get_login_rx,
670 	.iscsit_put_login_tx	= iscsit_put_login_tx,
671 	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
672 	.iscsit_immediate_queue	= iscsit_immediate_queue,
673 	.iscsit_response_queue	= iscsit_response_queue,
674 	.iscsit_queue_data_in	= iscsit_queue_rsp,
675 	.iscsit_queue_status	= iscsit_queue_rsp,
676 	.iscsit_aborted_task	= iscsit_aborted_task,
677 	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
678 	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
679 	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
680 };
681 
682 static int __init iscsi_target_init_module(void)
683 {
684 	int ret = 0, size;
685 
686 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
687 	iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
688 	if (!iscsit_global)
689 		return -1;
690 
691 	spin_lock_init(&iscsit_global->ts_bitmap_lock);
692 	mutex_init(&auth_id_lock);
693 	idr_init(&tiqn_idr);
694 
695 	ret = target_register_template(&iscsi_ops);
696 	if (ret)
697 		goto out;
698 
699 	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
700 	iscsit_global->ts_bitmap = vzalloc(size);
701 	if (!iscsit_global->ts_bitmap)
702 		goto configfs_out;
703 
704 	if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
705 		pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
706 		goto bitmap_out;
707 	}
708 	cpumask_setall(iscsit_global->allowed_cpumask);
709 
710 	lio_qr_cache = kmem_cache_create("lio_qr_cache",
711 			sizeof(struct iscsi_queue_req),
712 			__alignof__(struct iscsi_queue_req), 0, NULL);
713 	if (!lio_qr_cache) {
714 		pr_err("Unable to kmem_cache_create() for"
715 				" lio_qr_cache\n");
716 		goto cpumask_out;
717 	}
718 
719 	lio_dr_cache = kmem_cache_create("lio_dr_cache",
720 			sizeof(struct iscsi_datain_req),
721 			__alignof__(struct iscsi_datain_req), 0, NULL);
722 	if (!lio_dr_cache) {
723 		pr_err("Unable to kmem_cache_create() for"
724 				" lio_dr_cache\n");
725 		goto qr_out;
726 	}
727 
728 	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
729 			sizeof(struct iscsi_ooo_cmdsn),
730 			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
731 	if (!lio_ooo_cache) {
732 		pr_err("Unable to kmem_cache_create() for"
733 				" lio_ooo_cache\n");
734 		goto dr_out;
735 	}
736 
737 	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
738 			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
739 			0, NULL);
740 	if (!lio_r2t_cache) {
741 		pr_err("Unable to kmem_cache_create() for"
742 				" lio_r2t_cache\n");
743 		goto ooo_out;
744 	}
745 
746 	iscsit_register_transport(&iscsi_target_transport);
747 
748 	if (iscsit_load_discovery_tpg() < 0)
749 		goto r2t_out;
750 
751 	return ret;
752 r2t_out:
753 	iscsit_unregister_transport(&iscsi_target_transport);
754 	kmem_cache_destroy(lio_r2t_cache);
755 ooo_out:
756 	kmem_cache_destroy(lio_ooo_cache);
757 dr_out:
758 	kmem_cache_destroy(lio_dr_cache);
759 qr_out:
760 	kmem_cache_destroy(lio_qr_cache);
761 cpumask_out:
762 	free_cpumask_var(iscsit_global->allowed_cpumask);
763 bitmap_out:
764 	vfree(iscsit_global->ts_bitmap);
765 configfs_out:
766 	/* XXX: this probably wants it to be it's own unwind step.. */
767 	if (iscsit_global->discovery_tpg)
768 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
769 	target_unregister_template(&iscsi_ops);
770 out:
771 	kfree(iscsit_global);
772 	return -ENOMEM;
773 }
774 
775 static void __exit iscsi_target_cleanup_module(void)
776 {
777 	iscsit_release_discovery_tpg();
778 	iscsit_unregister_transport(&iscsi_target_transport);
779 	kmem_cache_destroy(lio_qr_cache);
780 	kmem_cache_destroy(lio_dr_cache);
781 	kmem_cache_destroy(lio_ooo_cache);
782 	kmem_cache_destroy(lio_r2t_cache);
783 
784 	/*
785 	 * Shutdown discovery sessions and disable discovery TPG
786 	 */
787 	if (iscsit_global->discovery_tpg)
788 		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
789 
790 	target_unregister_template(&iscsi_ops);
791 
792 	free_cpumask_var(iscsit_global->allowed_cpumask);
793 	vfree(iscsit_global->ts_bitmap);
794 	kfree(iscsit_global);
795 }
796 
797 int iscsit_add_reject(
798 	struct iscsit_conn *conn,
799 	u8 reason,
800 	unsigned char *buf)
801 {
802 	struct iscsit_cmd *cmd;
803 
804 	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
805 	if (!cmd)
806 		return -1;
807 
808 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
809 	cmd->reject_reason = reason;
810 
811 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
812 	if (!cmd->buf_ptr) {
813 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
814 		iscsit_free_cmd(cmd, false);
815 		return -1;
816 	}
817 
818 	spin_lock_bh(&conn->cmd_lock);
819 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
820 	spin_unlock_bh(&conn->cmd_lock);
821 
822 	cmd->i_state = ISTATE_SEND_REJECT;
823 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
824 
825 	return -1;
826 }
827 EXPORT_SYMBOL(iscsit_add_reject);
828 
829 static int iscsit_add_reject_from_cmd(
830 	struct iscsit_cmd *cmd,
831 	u8 reason,
832 	bool add_to_conn,
833 	unsigned char *buf)
834 {
835 	struct iscsit_conn *conn;
836 	const bool do_put = cmd->se_cmd.se_tfo != NULL;
837 
838 	if (!cmd->conn) {
839 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
840 				cmd->init_task_tag);
841 		return -1;
842 	}
843 	conn = cmd->conn;
844 
845 	cmd->iscsi_opcode = ISCSI_OP_REJECT;
846 	cmd->reject_reason = reason;
847 
848 	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
849 	if (!cmd->buf_ptr) {
850 		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
851 		iscsit_free_cmd(cmd, false);
852 		return -1;
853 	}
854 
855 	if (add_to_conn) {
856 		spin_lock_bh(&conn->cmd_lock);
857 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
858 		spin_unlock_bh(&conn->cmd_lock);
859 	}
860 
861 	cmd->i_state = ISTATE_SEND_REJECT;
862 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
863 	/*
864 	 * Perform the kref_put now if se_cmd has already been setup by
865 	 * scsit_setup_scsi_cmd()
866 	 */
867 	if (do_put) {
868 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
869 		target_put_sess_cmd(&cmd->se_cmd);
870 	}
871 	return -1;
872 }
873 
874 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
875 				 unsigned char *buf)
876 {
877 	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
878 }
879 
880 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
881 {
882 	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
883 }
884 EXPORT_SYMBOL(iscsit_reject_cmd);
885 
886 /*
887  * Map some portion of the allocated scatterlist to an iovec, suitable for
888  * kernel sockets to copy data in/out.
889  */
890 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
891 			    u32 data_offset, u32 data_length)
892 {
893 	u32 i = 0, orig_data_length = data_length;
894 	struct scatterlist *sg;
895 	unsigned int page_off;
896 
897 	/*
898 	 * We know each entry in t_data_sg contains a page.
899 	 */
900 	u32 ent = data_offset / PAGE_SIZE;
901 
902 	if (!data_length)
903 		return 0;
904 
905 	if (ent >= cmd->se_cmd.t_data_nents) {
906 		pr_err("Initial page entry out-of-bounds\n");
907 		goto overflow;
908 	}
909 
910 	sg = &cmd->se_cmd.t_data_sg[ent];
911 	page_off = (data_offset % PAGE_SIZE);
912 
913 	cmd->first_data_sg = sg;
914 	cmd->first_data_sg_off = page_off;
915 
916 	while (data_length) {
917 		u32 cur_len;
918 
919 		if (WARN_ON_ONCE(!sg || i >= nvec))
920 			goto overflow;
921 
922 		cur_len = min_t(u32, data_length, sg->length - page_off);
923 
924 		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
925 		iov[i].iov_len = cur_len;
926 
927 		data_length -= cur_len;
928 		page_off = 0;
929 		sg = sg_next(sg);
930 		i++;
931 	}
932 
933 	cmd->kmapped_nents = i;
934 
935 	return i;
936 
937 overflow:
938 	pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
939 	       data_offset, orig_data_length, i, nvec);
940 	for_each_sg(cmd->se_cmd.t_data_sg, sg,
941 		    cmd->se_cmd.t_data_nents, i) {
942 		pr_err("[%d] off %d len %d\n",
943 		       i, sg->offset, sg->length);
944 	}
945 	return -1;
946 }
947 
948 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
949 {
950 	u32 i;
951 	struct scatterlist *sg;
952 
953 	sg = cmd->first_data_sg;
954 
955 	for (i = 0; i < cmd->kmapped_nents; i++)
956 		kunmap(sg_page(&sg[i]));
957 }
958 
959 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
960 {
961 	LIST_HEAD(ack_list);
962 	struct iscsit_cmd *cmd, *cmd_p;
963 
964 	conn->exp_statsn = exp_statsn;
965 
966 	if (conn->sess->sess_ops->RDMAExtensions)
967 		return;
968 
969 	spin_lock_bh(&conn->cmd_lock);
970 	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
971 		spin_lock(&cmd->istate_lock);
972 		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
973 		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
974 			cmd->i_state = ISTATE_REMOVE;
975 			spin_unlock(&cmd->istate_lock);
976 			list_move_tail(&cmd->i_conn_node, &ack_list);
977 			continue;
978 		}
979 		spin_unlock(&cmd->istate_lock);
980 	}
981 	spin_unlock_bh(&conn->cmd_lock);
982 
983 	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
984 		list_del_init(&cmd->i_conn_node);
985 		iscsit_free_cmd(cmd, false);
986 	}
987 }
988 
989 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
990 {
991 	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
992 
993 	iov_count += ISCSI_IOV_DATA_BUFFER;
994 	cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
995 	if (!cmd->iov_data)
996 		return -ENOMEM;
997 
998 	cmd->orig_iov_data_count = iov_count;
999 	return 0;
1000 }
1001 
1002 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1003 			  unsigned char *buf)
1004 {
1005 	int data_direction, payload_length;
1006 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
1007 	struct iscsi_scsi_req *hdr;
1008 	int iscsi_task_attr;
1009 	unsigned char *cdb;
1010 	int sam_task_attr;
1011 
1012 	atomic_long_inc(&conn->sess->cmd_pdus);
1013 
1014 	hdr			= (struct iscsi_scsi_req *) buf;
1015 	payload_length		= ntoh24(hdr->dlength);
1016 
1017 	/* FIXME; Add checks for AdditionalHeaderSegment */
1018 
1019 	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
1020 	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1021 		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
1022 				" not set. Bad iSCSI Initiator.\n");
1023 		return iscsit_add_reject_cmd(cmd,
1024 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1025 	}
1026 
1027 	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1028 	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1029 		/*
1030 		 * From RFC-3720 Section 10.3.1:
1031 		 *
1032 		 * "Either or both of R and W MAY be 1 when either the
1033 		 *  Expected Data Transfer Length and/or Bidirectional Read
1034 		 *  Expected Data Transfer Length are 0"
1035 		 *
1036 		 * For this case, go ahead and clear the unnecssary bits
1037 		 * to avoid any confusion with ->data_direction.
1038 		 */
1039 		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1040 		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1041 
1042 		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1043 			" set when Expected Data Transfer Length is 0 for"
1044 			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1045 	}
1046 
1047 	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1048 	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1049 		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
1050 			" MUST be set if Expected Data Transfer Length is not 0."
1051 			" Bad iSCSI Initiator\n");
1052 		return iscsit_add_reject_cmd(cmd,
1053 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1054 	}
1055 
1056 	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
1057 	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
1058 		pr_err("Bidirectional operations not supported!\n");
1059 		return iscsit_add_reject_cmd(cmd,
1060 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1061 	}
1062 
1063 	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1064 		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
1065 				" Scsi Command PDU.\n");
1066 		return iscsit_add_reject_cmd(cmd,
1067 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1068 	}
1069 
1070 	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
1071 		pr_err("ImmediateData=No but DataSegmentLength=%u,"
1072 			" protocol error.\n", payload_length);
1073 		return iscsit_add_reject_cmd(cmd,
1074 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1075 	}
1076 
1077 	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1078 	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
1079 		pr_err("Expected Data Transfer Length and Length of"
1080 			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
1081 			" bit is not set protocol error\n");
1082 		return iscsit_add_reject_cmd(cmd,
1083 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1084 	}
1085 
1086 	if (payload_length > be32_to_cpu(hdr->data_length)) {
1087 		pr_err("DataSegmentLength: %u is greater than"
1088 			" EDTL: %u, protocol error.\n", payload_length,
1089 				hdr->data_length);
1090 		return iscsit_add_reject_cmd(cmd,
1091 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1092 	}
1093 
1094 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1095 		pr_err("DataSegmentLength: %u is greater than"
1096 			" MaxXmitDataSegmentLength: %u, protocol error.\n",
1097 			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1098 		return iscsit_add_reject_cmd(cmd,
1099 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1100 	}
1101 
1102 	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
1103 		pr_err("DataSegmentLength: %u is greater than"
1104 			" FirstBurstLength: %u, protocol error.\n",
1105 			payload_length, conn->sess->sess_ops->FirstBurstLength);
1106 		return iscsit_add_reject_cmd(cmd,
1107 					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1108 	}
1109 
1110 	cdb = hdr->cdb;
1111 
1112 	if (hdr->hlength) {
1113 		ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
1114 		if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
1115 			pr_err("Additional Header Segment type %d not supported!\n",
1116 			       ecdb_ahdr->ahstype);
1117 			return iscsit_add_reject_cmd(cmd,
1118 				ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
1119 		}
1120 
1121 		cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
1122 			      GFP_KERNEL);
1123 		if (cdb == NULL)
1124 			return iscsit_add_reject_cmd(cmd,
1125 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1126 		memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
1127 		memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
1128 		       be16_to_cpu(ecdb_ahdr->ahslength) - 1);
1129 	}
1130 
1131 	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
1132 			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
1133 			  DMA_NONE;
1134 
1135 	cmd->data_direction = data_direction;
1136 	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
1137 	/*
1138 	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
1139 	 */
1140 	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
1141 	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1142 		sam_task_attr = TCM_SIMPLE_TAG;
1143 	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1144 		sam_task_attr = TCM_ORDERED_TAG;
1145 	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1146 		sam_task_attr = TCM_HEAD_TAG;
1147 	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1148 		sam_task_attr = TCM_ACA_TAG;
1149 	else {
1150 		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1151 			" TCM_SIMPLE_TAG\n", iscsi_task_attr);
1152 		sam_task_attr = TCM_SIMPLE_TAG;
1153 	}
1154 
1155 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
1156 	cmd->i_state		= ISTATE_NEW_CMD;
1157 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1158 	cmd->immediate_data	= (payload_length) ? 1 : 0;
1159 	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
1160 				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
1161 	if (cmd->unsolicited_data)
1162 		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
1163 
1164 	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1165 	if (hdr->flags & ISCSI_FLAG_CMD_READ)
1166 		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1167 	else
1168 		cmd->targ_xfer_tag = 0xFFFFFFFF;
1169 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1170 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1171 	cmd->first_burst_len	= payload_length;
1172 
1173 	if (!conn->sess->sess_ops->RDMAExtensions &&
1174 	     cmd->data_direction == DMA_FROM_DEVICE) {
1175 		struct iscsi_datain_req *dr;
1176 
1177 		dr = iscsit_allocate_datain_req();
1178 		if (!dr) {
1179 			if (cdb != hdr->cdb)
1180 				kfree(cdb);
1181 			return iscsit_add_reject_cmd(cmd,
1182 					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1183 		}
1184 
1185 		iscsit_attach_datain_req(cmd, dr);
1186 	}
1187 
1188 	/*
1189 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1190 	 */
1191 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1192 			  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
1193 			  cmd->data_direction, sam_task_attr,
1194 			  cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
1195 			  conn->cmd_cnt);
1196 
1197 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
1198 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1199 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
1200 		conn->cid);
1201 
1202 	target_get_sess_cmd(&cmd->se_cmd, true);
1203 
1204 	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1205 	cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1206 						GFP_KERNEL);
1207 
1208 	if (cdb != hdr->cdb)
1209 		kfree(cdb);
1210 
1211 	if (cmd->sense_reason) {
1212 		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1213 			return iscsit_add_reject_cmd(cmd,
1214 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1215 		}
1216 
1217 		goto attach_cmd;
1218 	}
1219 
1220 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1221 	if (cmd->sense_reason)
1222 		goto attach_cmd;
1223 
1224 	cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1225 	if (cmd->sense_reason)
1226 		goto attach_cmd;
1227 
1228 	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1229 		return iscsit_add_reject_cmd(cmd,
1230 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1231 	}
1232 
1233 attach_cmd:
1234 	spin_lock_bh(&conn->cmd_lock);
1235 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1236 	spin_unlock_bh(&conn->cmd_lock);
1237 	/*
1238 	 * Check if we need to delay processing because of ALUA
1239 	 * Active/NonOptimized primary access state..
1240 	 */
1241 	core_alua_check_nonop_delay(&cmd->se_cmd);
1242 
1243 	return 0;
1244 }
1245 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1246 
1247 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
1248 {
1249 	iscsit_set_dataout_sequence_values(cmd);
1250 
1251 	spin_lock_bh(&cmd->dataout_timeout_lock);
1252 	iscsit_start_dataout_timer(cmd, cmd->conn);
1253 	spin_unlock_bh(&cmd->dataout_timeout_lock);
1254 }
1255 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1256 
1257 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1258 			    struct iscsi_scsi_req *hdr)
1259 {
1260 	int cmdsn_ret = 0;
1261 	/*
1262 	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1263 	 * the Immediate Bit is not set, and no Immediate
1264 	 * Data is attached.
1265 	 *
1266 	 * A PDU/CmdSN carrying Immediate Data can only
1267 	 * be processed after the DataCRC has passed.
1268 	 * If the DataCRC fails, the CmdSN MUST NOT
1269 	 * be acknowledged. (See below)
1270 	 */
1271 	if (!cmd->immediate_data) {
1272 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1273 					(unsigned char *)hdr, hdr->cmdsn);
1274 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1275 			return -1;
1276 		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1277 			target_put_sess_cmd(&cmd->se_cmd);
1278 			return 0;
1279 		}
1280 	}
1281 
1282 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1283 
1284 	/*
1285 	 * If no Immediate Data is attached, it's OK to return now.
1286 	 */
1287 	if (!cmd->immediate_data) {
1288 		if (!cmd->sense_reason && cmd->unsolicited_data)
1289 			iscsit_set_unsolicited_dataout(cmd);
1290 		if (!cmd->sense_reason)
1291 			return 0;
1292 
1293 		target_put_sess_cmd(&cmd->se_cmd);
1294 		return 0;
1295 	}
1296 
1297 	/*
1298 	 * Early CHECK_CONDITIONs with ImmediateData never make it to command
1299 	 * execution.  These exceptions are processed in CmdSN order using
1300 	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
1301 	 */
1302 	if (cmd->sense_reason)
1303 		return 1;
1304 	/*
1305 	 * Call directly into transport_generic_new_cmd() to perform
1306 	 * the backend memory allocation.
1307 	 */
1308 	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1309 	if (cmd->sense_reason)
1310 		return 1;
1311 
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL(iscsit_process_scsi_cmd);
1315 
1316 static int
1317 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
1318 			  bool dump_payload)
1319 {
1320 	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1321 	int rc;
1322 
1323 	/*
1324 	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
1325 	 */
1326 	if (dump_payload) {
1327 		u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1328 				 cmd->first_burst_len);
1329 
1330 		pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
1331 			 cmd->se_cmd.data_length, cmd->write_data_done,
1332 			 cmd->first_burst_len, length);
1333 		rc = iscsit_dump_data_payload(cmd->conn, length, 1);
1334 		pr_debug("Finished dumping immediate data\n");
1335 		if (rc < 0)
1336 			immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
1337 	} else {
1338 		immed_ret = iscsit_handle_immediate_data(cmd, hdr,
1339 							 cmd->first_burst_len);
1340 	}
1341 
1342 	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1343 		/*
1344 		 * A PDU/CmdSN carrying Immediate Data passed
1345 		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1346 		 * Immediate Bit is not set.
1347 		 */
1348 		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
1349 					(unsigned char *)hdr, hdr->cmdsn);
1350 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1351 			return -1;
1352 
1353 		if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1354 			target_put_sess_cmd(&cmd->se_cmd);
1355 
1356 			return 0;
1357 		} else if (cmd->unsolicited_data)
1358 			iscsit_set_unsolicited_dataout(cmd);
1359 
1360 	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1361 		/*
1362 		 * Immediate Data failed DataCRC and ERL>=1,
1363 		 * silently drop this PDU and let the initiator
1364 		 * plug the CmdSN gap.
1365 		 *
1366 		 * FIXME: Send Unsolicited NOPIN with reserved
1367 		 * TTT here to help the initiator figure out
1368 		 * the missing CmdSN, although they should be
1369 		 * intelligent enough to determine the missing
1370 		 * CmdSN and issue a retry to plug the sequence.
1371 		 */
1372 		cmd->i_state = ISTATE_REMOVE;
1373 		iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
1374 	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1375 		return -1;
1376 
1377 	return 0;
1378 }
1379 
1380 static int
1381 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1382 			   unsigned char *buf)
1383 {
1384 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1385 	int rc, immed_data;
1386 	bool dump_payload = false;
1387 
1388 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1389 	if (rc < 0)
1390 		return 0;
1391 	/*
1392 	 * Allocation iovecs needed for struct socket operations for
1393 	 * traditional iSCSI block I/O.
1394 	 */
1395 	if (iscsit_allocate_iovecs(cmd) < 0) {
1396 		return iscsit_reject_cmd(cmd,
1397 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1398 	}
1399 	immed_data = cmd->immediate_data;
1400 
1401 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1402 	if (rc < 0)
1403 		return rc;
1404 	else if (rc > 0)
1405 		dump_payload = true;
1406 
1407 	if (!immed_data)
1408 		return 0;
1409 
1410 	return iscsit_get_immediate_data(cmd, hdr, dump_payload);
1411 }
1412 
1413 static u32 iscsit_do_crypto_hash_sg(
1414 	struct ahash_request *hash,
1415 	struct iscsit_cmd *cmd,
1416 	u32 data_offset,
1417 	u32 data_length,
1418 	u32 padding,
1419 	u8 *pad_bytes)
1420 {
1421 	u32 data_crc;
1422 	struct scatterlist *sg;
1423 	unsigned int page_off;
1424 
1425 	crypto_ahash_init(hash);
1426 
1427 	sg = cmd->first_data_sg;
1428 	page_off = cmd->first_data_sg_off;
1429 
1430 	if (data_length && page_off) {
1431 		struct scatterlist first_sg;
1432 		u32 len = min_t(u32, data_length, sg->length - page_off);
1433 
1434 		sg_init_table(&first_sg, 1);
1435 		sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
1436 
1437 		ahash_request_set_crypt(hash, &first_sg, NULL, len);
1438 		crypto_ahash_update(hash);
1439 
1440 		data_length -= len;
1441 		sg = sg_next(sg);
1442 	}
1443 
1444 	while (data_length) {
1445 		u32 cur_len = min_t(u32, data_length, sg->length);
1446 
1447 		ahash_request_set_crypt(hash, sg, NULL, cur_len);
1448 		crypto_ahash_update(hash);
1449 
1450 		data_length -= cur_len;
1451 		/* iscsit_map_iovec has already checked for invalid sg pointers */
1452 		sg = sg_next(sg);
1453 	}
1454 
1455 	if (padding) {
1456 		struct scatterlist pad_sg;
1457 
1458 		sg_init_one(&pad_sg, pad_bytes, padding);
1459 		ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
1460 					padding);
1461 		crypto_ahash_finup(hash);
1462 	} else {
1463 		ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
1464 		crypto_ahash_final(hash);
1465 	}
1466 
1467 	return data_crc;
1468 }
1469 
1470 static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1471 	const void *buf, u32 payload_length, u32 padding,
1472 	const void *pad_bytes, void *data_crc)
1473 {
1474 	struct scatterlist sg[2];
1475 
1476 	sg_init_table(sg, ARRAY_SIZE(sg));
1477 	sg_set_buf(sg, buf, payload_length);
1478 	if (padding)
1479 		sg_set_buf(sg + 1, pad_bytes, padding);
1480 
1481 	ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1482 
1483 	crypto_ahash_digest(hash);
1484 }
1485 
1486 int
1487 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1488 			   struct iscsit_cmd *cmd, u32 payload_length,
1489 			   bool *success)
1490 {
1491 	struct iscsi_data *hdr = buf;
1492 	struct se_cmd *se_cmd;
1493 	int rc;
1494 
1495 	/* iSCSI write */
1496 	atomic_long_add(payload_length, &conn->sess->rx_data_octets);
1497 
1498 	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1499 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1500 		hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
1501 		payload_length, conn->cid);
1502 
1503 	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1504 		pr_err("Command ITT: 0x%08x received DataOUT after"
1505 			" last DataOUT received, dumping payload\n",
1506 			cmd->init_task_tag);
1507 		return iscsit_dump_data_payload(conn, payload_length, 1);
1508 	}
1509 
1510 	if (cmd->data_direction != DMA_TO_DEVICE) {
1511 		pr_err("Command ITT: 0x%08x received DataOUT for a"
1512 			" NON-WRITE command.\n", cmd->init_task_tag);
1513 		return iscsit_dump_data_payload(conn, payload_length, 1);
1514 	}
1515 	se_cmd = &cmd->se_cmd;
1516 	iscsit_mod_dataout_timer(cmd);
1517 
1518 	if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1519 		pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
1520 		       be32_to_cpu(hdr->offset), payload_length,
1521 		       cmd->se_cmd.data_length);
1522 		return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
1523 	}
1524 
1525 	if (cmd->unsolicited_data) {
1526 		int dump_unsolicited_data = 0;
1527 
1528 		if (conn->sess->sess_ops->InitialR2T) {
1529 			pr_err("Received unexpected unsolicited data"
1530 				" while InitialR2T=Yes, protocol error.\n");
1531 			transport_send_check_condition_and_sense(&cmd->se_cmd,
1532 					TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1533 			return -1;
1534 		}
1535 		/*
1536 		 * Special case for dealing with Unsolicited DataOUT
1537 		 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1538 		 * failures;
1539 		 */
1540 
1541 		/* Something's amiss if we're not in WRITE_PENDING state... */
1542 		WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1543 		if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1544 			dump_unsolicited_data = 1;
1545 
1546 		if (dump_unsolicited_data) {
1547 			/*
1548 			 * Check if a delayed TASK_ABORTED status needs to
1549 			 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1550 			 * received with the unsolicited data out.
1551 			 */
1552 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1553 				iscsit_stop_dataout_timer(cmd);
1554 
1555 			return iscsit_dump_data_payload(conn, payload_length, 1);
1556 		}
1557 	} else {
1558 		/*
1559 		 * For the normal solicited data path:
1560 		 *
1561 		 * Check for a delayed TASK_ABORTED status and dump any
1562 		 * incoming data out payload if one exists.  Also, when the
1563 		 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1564 		 * data out sequence, we decrement outstanding_r2ts.  Once
1565 		 * outstanding_r2ts reaches zero, go ahead and send the delayed
1566 		 * TASK_ABORTED status.
1567 		 */
1568 		if (se_cmd->transport_state & CMD_T_ABORTED) {
1569 			if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1570 			    --cmd->outstanding_r2ts < 1)
1571 				iscsit_stop_dataout_timer(cmd);
1572 
1573 			return iscsit_dump_data_payload(conn, payload_length, 1);
1574 		}
1575 	}
1576 	/*
1577 	 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1578 	 * within-command recovery checks before receiving the payload.
1579 	 */
1580 	rc = iscsit_check_pre_dataout(cmd, buf);
1581 	if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
1582 		return 0;
1583 	else if (rc == DATAOUT_CANNOT_RECOVER)
1584 		return -1;
1585 	*success = true;
1586 	return 0;
1587 }
1588 EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
1589 
1590 int
1591 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
1592 			 struct iscsit_cmd **out_cmd)
1593 {
1594 	struct iscsi_data *hdr = buf;
1595 	struct iscsit_cmd *cmd;
1596 	u32 payload_length = ntoh24(hdr->dlength);
1597 	int rc;
1598 	bool success = false;
1599 
1600 	if (!payload_length) {
1601 		pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
1602 		return 0;
1603 	}
1604 
1605 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1606 		pr_err_ratelimited("DataSegmentLength: %u is greater than"
1607 			" MaxXmitDataSegmentLength: %u\n", payload_length,
1608 			conn->conn_ops->MaxXmitDataSegmentLength);
1609 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
1610 	}
1611 
1612 	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
1613 	if (!cmd)
1614 		return 0;
1615 
1616 	rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
1617 
1618 	if (success)
1619 		*out_cmd = cmd;
1620 
1621 	return rc;
1622 }
1623 EXPORT_SYMBOL(iscsit_check_dataout_hdr);
1624 
1625 static int
1626 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1627 		   struct iscsi_data *hdr)
1628 {
1629 	struct kvec *iov;
1630 	u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
1631 	u32 payload_length;
1632 	int iov_ret, data_crc_failed = 0;
1633 
1634 	payload_length = min_t(u32, cmd->se_cmd.data_length,
1635 			       ntoh24(hdr->dlength));
1636 	rx_size += payload_length;
1637 	iov = &cmd->iov_data[0];
1638 
1639 	iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
1640 				   be32_to_cpu(hdr->offset), payload_length);
1641 	if (iov_ret < 0)
1642 		return -1;
1643 
1644 	iov_count += iov_ret;
1645 
1646 	padding = ((-payload_length) & 3);
1647 	if (padding != 0) {
1648 		iov[iov_count].iov_base	= cmd->pad_bytes;
1649 		iov[iov_count++].iov_len = padding;
1650 		rx_size += padding;
1651 		pr_debug("Receiving %u padding bytes.\n", padding);
1652 	}
1653 
1654 	if (conn->conn_ops->DataDigest) {
1655 		iov[iov_count].iov_base = &checksum;
1656 		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1657 		rx_size += ISCSI_CRC_LEN;
1658 	}
1659 
1660 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
1661 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1662 
1663 	iscsit_unmap_iovec(cmd);
1664 
1665 	if (rx_got != rx_size)
1666 		return -1;
1667 
1668 	if (conn->conn_ops->DataDigest) {
1669 		u32 data_crc;
1670 
1671 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
1672 						    be32_to_cpu(hdr->offset),
1673 						    payload_length, padding,
1674 						    cmd->pad_bytes);
1675 
1676 		if (checksum != data_crc) {
1677 			pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1678 				" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1679 				" does not match computed 0x%08x\n",
1680 				hdr->itt, hdr->offset, payload_length,
1681 				hdr->datasn, checksum, data_crc);
1682 			data_crc_failed = 1;
1683 		} else {
1684 			pr_debug("Got CRC32C DataDigest 0x%08x for"
1685 				" %u bytes of Data Out\n", checksum,
1686 				payload_length);
1687 		}
1688 	}
1689 
1690 	return data_crc_failed;
1691 }
1692 
1693 int
1694 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
1695 			     bool data_crc_failed)
1696 {
1697 	struct iscsit_conn *conn = cmd->conn;
1698 	int rc, ooo_cmdsn;
1699 	/*
1700 	 * Increment post receive data and CRC values or perform
1701 	 * within-command recovery.
1702 	 */
1703 	rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
1704 	if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
1705 		return 0;
1706 	else if (rc == DATAOUT_SEND_R2T) {
1707 		iscsit_set_dataout_sequence_values(cmd);
1708 		conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
1709 	} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
1710 		/*
1711 		 * Handle extra special case for out of order
1712 		 * Unsolicited Data Out.
1713 		 */
1714 		spin_lock_bh(&cmd->istate_lock);
1715 		ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1716 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1717 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1718 		spin_unlock_bh(&cmd->istate_lock);
1719 
1720 		iscsit_stop_dataout_timer(cmd);
1721 		if (ooo_cmdsn)
1722 			return 0;
1723 		target_execute_cmd(&cmd->se_cmd);
1724 		return 0;
1725 	} else /* DATAOUT_CANNOT_RECOVER */
1726 		return -1;
1727 
1728 	return 0;
1729 }
1730 EXPORT_SYMBOL(iscsit_check_dataout_payload);
1731 
1732 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
1733 {
1734 	struct iscsit_cmd *cmd = NULL;
1735 	struct iscsi_data *hdr = (struct iscsi_data *)buf;
1736 	int rc;
1737 	bool data_crc_failed = false;
1738 
1739 	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1740 	if (rc < 0)
1741 		return 0;
1742 	else if (!cmd)
1743 		return 0;
1744 
1745 	rc = iscsit_get_dataout(conn, cmd, hdr);
1746 	if (rc < 0)
1747 		return rc;
1748 	else if (rc > 0)
1749 		data_crc_failed = true;
1750 
1751 	return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
1752 }
1753 
1754 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1755 			 struct iscsi_nopout *hdr)
1756 {
1757 	u32 payload_length = ntoh24(hdr->dlength);
1758 
1759 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
1760 		pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
1761 		if (!cmd)
1762 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1763 						 (unsigned char *)hdr);
1764 
1765 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1766 					 (unsigned char *)hdr);
1767 	}
1768 
1769 	if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1770 		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1771 			" not set, protocol error.\n");
1772 		if (!cmd)
1773 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1774 						 (unsigned char *)hdr);
1775 
1776 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1777 					 (unsigned char *)hdr);
1778 	}
1779 
1780 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1781 		pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1782 			" greater than MaxXmitDataSegmentLength: %u, protocol"
1783 			" error.\n", payload_length,
1784 			conn->conn_ops->MaxXmitDataSegmentLength);
1785 		if (!cmd)
1786 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1787 						 (unsigned char *)hdr);
1788 
1789 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
1790 					 (unsigned char *)hdr);
1791 	}
1792 
1793 	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
1794 		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1795 		hdr->itt == RESERVED_ITT ? "Response" : "Request",
1796 		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1797 		payload_length);
1798 	/*
1799 	 * This is not a response to a Unsolicited NopIN, which means
1800 	 * it can either be a NOPOUT ping request (with a valid ITT),
1801 	 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1802 	 * Either way, make sure we allocate an struct iscsit_cmd, as both
1803 	 * can contain ping data.
1804 	 */
1805 	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1806 		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
1807 		cmd->i_state		= ISTATE_SEND_NOPIN;
1808 		cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1809 						1 : 0);
1810 		conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1811 		cmd->targ_xfer_tag	= 0xFFFFFFFF;
1812 		cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
1813 		cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1814 		cmd->data_direction	= DMA_NONE;
1815 	}
1816 
1817 	return 0;
1818 }
1819 EXPORT_SYMBOL(iscsit_setup_nop_out);
1820 
1821 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1822 			   struct iscsi_nopout *hdr)
1823 {
1824 	struct iscsit_cmd *cmd_p = NULL;
1825 	int cmdsn_ret = 0;
1826 	/*
1827 	 * Initiator is expecting a NopIN ping reply..
1828 	 */
1829 	if (hdr->itt != RESERVED_ITT) {
1830 		if (!cmd)
1831 			return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1832 						(unsigned char *)hdr);
1833 
1834 		spin_lock_bh(&conn->cmd_lock);
1835 		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1836 		spin_unlock_bh(&conn->cmd_lock);
1837 
1838 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
1839 
1840 		if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1841 			iscsit_add_cmd_to_response_queue(cmd, conn,
1842 							 cmd->i_state);
1843 			return 0;
1844 		}
1845 
1846 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
1847 				(unsigned char *)hdr, hdr->cmdsn);
1848                 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1849 			return 0;
1850 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1851 			return -1;
1852 
1853 		return 0;
1854 	}
1855 	/*
1856 	 * This was a response to a unsolicited NOPIN ping.
1857 	 */
1858 	if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
1859 		cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
1860 		if (!cmd_p)
1861 			return -EINVAL;
1862 
1863 		iscsit_stop_nopin_response_timer(conn);
1864 
1865 		cmd_p->i_state = ISTATE_REMOVE;
1866 		iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
1867 
1868 		iscsit_start_nopin_timer(conn);
1869 		return 0;
1870 	}
1871 	/*
1872 	 * Otherwise, initiator is not expecting a NOPIN is response.
1873 	 * Just ignore for now.
1874 	 */
1875 
1876 	if (cmd)
1877 		iscsit_free_cmd(cmd, false);
1878 
1879         return 0;
1880 }
1881 EXPORT_SYMBOL(iscsit_process_nop_out);
1882 
1883 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
1884 				 unsigned char *buf)
1885 {
1886 	unsigned char *ping_data = NULL;
1887 	struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1888 	struct kvec *iov = NULL;
1889 	u32 payload_length = ntoh24(hdr->dlength);
1890 	int ret;
1891 
1892 	ret = iscsit_setup_nop_out(conn, cmd, hdr);
1893 	if (ret < 0)
1894 		return 0;
1895 	/*
1896 	 * Handle NOP-OUT payload for traditional iSCSI sockets
1897 	 */
1898 	if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
1899 		u32 checksum, data_crc, padding = 0;
1900 		int niov = 0, rx_got, rx_size = payload_length;
1901 
1902 		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1903 		if (!ping_data) {
1904 			ret = -1;
1905 			goto out;
1906 		}
1907 
1908 		iov = &cmd->iov_misc[0];
1909 		iov[niov].iov_base	= ping_data;
1910 		iov[niov++].iov_len	= payload_length;
1911 
1912 		padding = ((-payload_length) & 3);
1913 		if (padding != 0) {
1914 			pr_debug("Receiving %u additional bytes"
1915 				" for padding.\n", padding);
1916 			iov[niov].iov_base	= &cmd->pad_bytes;
1917 			iov[niov++].iov_len	= padding;
1918 			rx_size += padding;
1919 		}
1920 		if (conn->conn_ops->DataDigest) {
1921 			iov[niov].iov_base	= &checksum;
1922 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
1923 			rx_size += ISCSI_CRC_LEN;
1924 		}
1925 
1926 		WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
1927 		rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1928 		if (rx_got != rx_size) {
1929 			ret = -1;
1930 			goto out;
1931 		}
1932 
1933 		if (conn->conn_ops->DataDigest) {
1934 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
1935 						  payload_length, padding,
1936 						  cmd->pad_bytes, &data_crc);
1937 
1938 			if (checksum != data_crc) {
1939 				pr_err("Ping data CRC32C DataDigest"
1940 				" 0x%08x does not match computed 0x%08x\n",
1941 					checksum, data_crc);
1942 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1943 					pr_err("Unable to recover from"
1944 					" NOPOUT Ping DataCRC failure while in"
1945 						" ERL=0.\n");
1946 					ret = -1;
1947 					goto out;
1948 				} else {
1949 					/*
1950 					 * Silently drop this PDU and let the
1951 					 * initiator plug the CmdSN gap.
1952 					 */
1953 					pr_debug("Dropping NOPOUT"
1954 					" Command CmdSN: 0x%08x due to"
1955 					" DataCRC error.\n", hdr->cmdsn);
1956 					ret = 0;
1957 					goto out;
1958 				}
1959 			} else {
1960 				pr_debug("Got CRC32C DataDigest"
1961 				" 0x%08x for %u bytes of ping data.\n",
1962 					checksum, payload_length);
1963 			}
1964 		}
1965 
1966 		ping_data[payload_length] = '\0';
1967 		/*
1968 		 * Attach ping data to struct iscsit_cmd->buf_ptr.
1969 		 */
1970 		cmd->buf_ptr = ping_data;
1971 		cmd->buf_ptr_size = payload_length;
1972 
1973 		pr_debug("Got %u bytes of NOPOUT ping"
1974 			" data.\n", payload_length);
1975 		pr_debug("Ping Data: \"%s\"\n", ping_data);
1976 	}
1977 
1978 	return iscsit_process_nop_out(conn, cmd, hdr);
1979 out:
1980 	if (cmd)
1981 		iscsit_free_cmd(cmd, false);
1982 
1983 	kfree(ping_data);
1984 	return ret;
1985 }
1986 
1987 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
1988 {
1989 	switch (iscsi_tmf) {
1990 	case ISCSI_TM_FUNC_ABORT_TASK:
1991 		return TMR_ABORT_TASK;
1992 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
1993 		return TMR_ABORT_TASK_SET;
1994 	case ISCSI_TM_FUNC_CLEAR_ACA:
1995 		return TMR_CLEAR_ACA;
1996 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1997 		return TMR_CLEAR_TASK_SET;
1998 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1999 		return TMR_LUN_RESET;
2000 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2001 		return TMR_TARGET_WARM_RESET;
2002 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2003 		return TMR_TARGET_COLD_RESET;
2004 	default:
2005 		return TMR_UNKNOWN;
2006 	}
2007 }
2008 
2009 int
2010 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2011 			   unsigned char *buf)
2012 {
2013 	struct se_tmr_req *se_tmr;
2014 	struct iscsi_tmr_req *tmr_req;
2015 	struct iscsi_tm *hdr;
2016 	int out_of_order_cmdsn = 0, ret;
2017 	u8 function, tcm_function = TMR_UNKNOWN;
2018 
2019 	hdr			= (struct iscsi_tm *) buf;
2020 	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2021 	function = hdr->flags;
2022 
2023 	pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
2024 		" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
2025 		" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
2026 		hdr->rtt, hdr->refcmdsn, conn->cid);
2027 
2028 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2029 	    ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2030 	     hdr->rtt != RESERVED_ITT)) {
2031 		pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
2032 		hdr->rtt = RESERVED_ITT;
2033 	}
2034 
2035 	if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
2036 			!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2037 		pr_err("Task Management Request TASK_REASSIGN not"
2038 			" issued as immediate command, bad iSCSI Initiator"
2039 				"implementation\n");
2040 		return iscsit_add_reject_cmd(cmd,
2041 					     ISCSI_REASON_PROTOCOL_ERROR, buf);
2042 	}
2043 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
2044 	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
2045 		hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
2046 
2047 	cmd->data_direction = DMA_NONE;
2048 	cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
2049 	if (!cmd->tmr_req) {
2050 		return iscsit_add_reject_cmd(cmd,
2051 					     ISCSI_REASON_BOOKMARK_NO_RESOURCES,
2052 					     buf);
2053 	}
2054 
2055 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2056 			  conn->sess->se_sess, 0, DMA_NONE,
2057 			  TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
2058 			  scsilun_to_int(&hdr->lun),
2059 			  conn->cmd_cnt);
2060 
2061 	target_get_sess_cmd(&cmd->se_cmd, true);
2062 
2063 	/*
2064 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
2065 	 * LIO-Target $FABRIC_MOD
2066 	 */
2067 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2068 		tcm_function = iscsit_convert_tmf(function);
2069 		if (tcm_function == TMR_UNKNOWN) {
2070 			pr_err("Unknown iSCSI TMR Function:"
2071 			       " 0x%02x\n", function);
2072 			return iscsit_add_reject_cmd(cmd,
2073 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2074 		}
2075 	}
2076 	ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2077 				 GFP_KERNEL);
2078 	if (ret < 0)
2079 		return iscsit_add_reject_cmd(cmd,
2080 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
2081 
2082 	cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2083 
2084 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
2085 	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
2086 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2087 	cmd->init_task_tag	= hdr->itt;
2088 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2089 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2090 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2091 	se_tmr			= cmd->se_cmd.se_tmr_req;
2092 	tmr_req			= cmd->tmr_req;
2093 	/*
2094 	 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
2095 	 */
2096 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
2097 		ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2098 		if (ret < 0) {
2099 			se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
2100 			goto attach;
2101 		}
2102 	}
2103 
2104 	switch (function) {
2105 	case ISCSI_TM_FUNC_ABORT_TASK:
2106 		se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
2107 		if (se_tmr->response)
2108 			goto attach;
2109 		break;
2110 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
2111 	case ISCSI_TM_FUNC_CLEAR_ACA:
2112 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
2113 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
2114 		break;
2115 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
2116 		if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
2117 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2118 			goto attach;
2119 		}
2120 		break;
2121 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
2122 		if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
2123 			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
2124 			goto attach;
2125 		}
2126 		break;
2127 	case ISCSI_TM_FUNC_TASK_REASSIGN:
2128 		se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
2129 		/*
2130 		 * Perform sanity checks on the ExpDataSN only if the
2131 		 * TASK_REASSIGN was successful.
2132 		 */
2133 		if (se_tmr->response)
2134 			break;
2135 
2136 		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
2137 			return iscsit_add_reject_cmd(cmd,
2138 					ISCSI_REASON_BOOKMARK_INVALID, buf);
2139 		break;
2140 	default:
2141 		pr_err("Unknown TMR function: 0x%02x, protocol"
2142 			" error.\n", function);
2143 		se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
2144 		goto attach;
2145 	}
2146 
2147 	if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
2148 	    (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
2149 		se_tmr->call_transport = 1;
2150 attach:
2151 	spin_lock_bh(&conn->cmd_lock);
2152 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2153 	spin_unlock_bh(&conn->cmd_lock);
2154 
2155 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2156 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2157 		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2158 			out_of_order_cmdsn = 1;
2159 		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2160 			target_put_sess_cmd(&cmd->se_cmd);
2161 			return 0;
2162 		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2163 			return -1;
2164 		}
2165 	}
2166 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2167 
2168 	if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2169 		return 0;
2170 	/*
2171 	 * Found the referenced task, send to transport for processing.
2172 	 */
2173 	if (se_tmr->call_transport)
2174 		return transport_generic_handle_tmr(&cmd->se_cmd);
2175 
2176 	/*
2177 	 * Could not find the referenced LUN, task, or Task Management
2178 	 * command not authorized or supported.  Change state and
2179 	 * let the tx_thread send the response.
2180 	 *
2181 	 * For connection recovery, this is also the default action for
2182 	 * TMR TASK_REASSIGN.
2183 	 */
2184 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2185 	target_put_sess_cmd(&cmd->se_cmd);
2186 	return 0;
2187 }
2188 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
2189 
2190 /* #warning FIXME: Support Text Command parameters besides SendTargets */
2191 int
2192 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2193 		      struct iscsi_text *hdr)
2194 {
2195 	u32 payload_length = ntoh24(hdr->dlength);
2196 
2197 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
2198 		pr_err("Unable to accept text parameter length: %u"
2199 			"greater than MaxXmitDataSegmentLength %u.\n",
2200 		       payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
2201 		return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2202 					 (unsigned char *)hdr);
2203 	}
2204 
2205 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
2206 	     (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
2207 		pr_err("Multi sequence text commands currently not supported\n");
2208 		return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
2209 					(unsigned char *)hdr);
2210 	}
2211 
2212 	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
2213 		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
2214 		hdr->exp_statsn, payload_length);
2215 
2216 	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
2217 	cmd->i_state		= ISTATE_SEND_TEXTRSP;
2218 	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2219 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2220 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2221 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
2222 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
2223 	cmd->data_direction	= DMA_NONE;
2224 	kfree(cmd->text_in_ptr);
2225 	cmd->text_in_ptr	= NULL;
2226 
2227 	return 0;
2228 }
2229 EXPORT_SYMBOL(iscsit_setup_text_cmd);
2230 
2231 int
2232 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2233 			struct iscsi_text *hdr)
2234 {
2235 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
2236 	int cmdsn_ret;
2237 
2238 	if (!text_in) {
2239 		cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
2240 		if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
2241 			pr_err("Unable to locate text_in buffer for sendtargets"
2242 			       " discovery\n");
2243 			goto reject;
2244 		}
2245 		goto empty_sendtargets;
2246 	}
2247 	if (strncmp("SendTargets=", text_in, 12) != 0) {
2248 		pr_err("Received Text Data that is not"
2249 			" SendTargets, cannot continue.\n");
2250 		goto reject;
2251 	}
2252 	/* '=' confirmed in strncmp */
2253 	text_ptr = strchr(text_in, '=');
2254 	BUG_ON(!text_ptr);
2255 	if (!strncmp("=All", text_ptr, 5)) {
2256 		cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
2257 	} else if (!strncmp("=iqn.", text_ptr, 5) ||
2258 		   !strncmp("=eui.", text_ptr, 5)) {
2259 		cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
2260 	} else {
2261 		pr_err("Unable to locate valid SendTargets%s value\n",
2262 		       text_ptr);
2263 		goto reject;
2264 	}
2265 
2266 	spin_lock_bh(&conn->cmd_lock);
2267 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2268 	spin_unlock_bh(&conn->cmd_lock);
2269 
2270 empty_sendtargets:
2271 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2272 
2273 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2274 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
2275 				(unsigned char *)hdr, hdr->cmdsn);
2276 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2277 			return -1;
2278 
2279 		return 0;
2280 	}
2281 
2282 	return iscsit_execute_cmd(cmd, 0);
2283 
2284 reject:
2285 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
2286 				 (unsigned char *)hdr);
2287 }
2288 EXPORT_SYMBOL(iscsit_process_text_cmd);
2289 
2290 static int
2291 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2292 		       unsigned char *buf)
2293 {
2294 	struct iscsi_text *hdr = (struct iscsi_text *)buf;
2295 	char *text_in = NULL;
2296 	u32 payload_length = ntoh24(hdr->dlength);
2297 	int rx_size, rc;
2298 
2299 	rc = iscsit_setup_text_cmd(conn, cmd, hdr);
2300 	if (rc < 0)
2301 		return 0;
2302 
2303 	rx_size = payload_length;
2304 	if (payload_length) {
2305 		u32 checksum = 0, data_crc = 0;
2306 		u32 padding = 0;
2307 		int niov = 0, rx_got;
2308 		struct kvec iov[2];
2309 
2310 		rx_size = ALIGN(payload_length, 4);
2311 		text_in = kzalloc(rx_size, GFP_KERNEL);
2312 		if (!text_in)
2313 			goto reject;
2314 
2315 		cmd->text_in_ptr = text_in;
2316 
2317 		memset(iov, 0, sizeof(iov));
2318 		iov[niov].iov_base	= text_in;
2319 		iov[niov++].iov_len	= rx_size;
2320 
2321 		padding = rx_size - payload_length;
2322 		if (padding)
2323 			pr_debug("Receiving %u additional bytes"
2324 					" for padding.\n", padding);
2325 		if (conn->conn_ops->DataDigest) {
2326 			iov[niov].iov_base	= &checksum;
2327 			iov[niov++].iov_len	= ISCSI_CRC_LEN;
2328 			rx_size += ISCSI_CRC_LEN;
2329 		}
2330 
2331 		WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
2332 		rx_got = rx_data(conn, &iov[0], niov, rx_size);
2333 		if (rx_got != rx_size)
2334 			goto reject;
2335 
2336 		if (conn->conn_ops->DataDigest) {
2337 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
2338 						  text_in, rx_size, 0, NULL,
2339 						  &data_crc);
2340 
2341 			if (checksum != data_crc) {
2342 				pr_err("Text data CRC32C DataDigest"
2343 					" 0x%08x does not match computed"
2344 					" 0x%08x\n", checksum, data_crc);
2345 				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2346 					pr_err("Unable to recover from"
2347 					" Text Data digest failure while in"
2348 						" ERL=0.\n");
2349 					goto reject;
2350 				} else {
2351 					/*
2352 					 * Silently drop this PDU and let the
2353 					 * initiator plug the CmdSN gap.
2354 					 */
2355 					pr_debug("Dropping Text"
2356 					" Command CmdSN: 0x%08x due to"
2357 					" DataCRC error.\n", hdr->cmdsn);
2358 					kfree(text_in);
2359 					return 0;
2360 				}
2361 			} else {
2362 				pr_debug("Got CRC32C DataDigest"
2363 					" 0x%08x for %u bytes of text data.\n",
2364 						checksum, payload_length);
2365 			}
2366 		}
2367 		text_in[payload_length - 1] = '\0';
2368 		pr_debug("Successfully read %d bytes of text"
2369 				" data.\n", payload_length);
2370 	}
2371 
2372 	return iscsit_process_text_cmd(conn, cmd, hdr);
2373 
2374 reject:
2375 	kfree(cmd->text_in_ptr);
2376 	cmd->text_in_ptr = NULL;
2377 	return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
2378 }
2379 
2380 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2381 {
2382 	struct iscsit_conn *conn_p;
2383 	struct iscsit_session *sess = conn->sess;
2384 
2385 	pr_debug("Received logout request CLOSESESSION on CID: %hu"
2386 		" for SID: %u.\n", conn->cid, conn->sess->sid);
2387 
2388 	atomic_set(&sess->session_logout, 1);
2389 	atomic_set(&conn->conn_logout_remove, 1);
2390 	conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2391 
2392 	iscsit_inc_conn_usage_count(conn);
2393 	iscsit_inc_session_usage_count(sess);
2394 
2395 	spin_lock_bh(&sess->conn_lock);
2396 	list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2397 		if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2398 			continue;
2399 
2400 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2401 		conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2402 	}
2403 	spin_unlock_bh(&sess->conn_lock);
2404 
2405 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2406 
2407 	return 0;
2408 }
2409 
2410 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2411 {
2412 	struct iscsit_conn *l_conn;
2413 	struct iscsit_session *sess = conn->sess;
2414 
2415 	pr_debug("Received logout request CLOSECONNECTION for CID:"
2416 		" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2417 
2418 	/*
2419 	 * A Logout Request with a CLOSECONNECTION reason code for a CID
2420 	 * can arrive on a connection with a differing CID.
2421 	 */
2422 	if (conn->cid == cmd->logout_cid) {
2423 		spin_lock_bh(&conn->state_lock);
2424 		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2425 		conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2426 
2427 		atomic_set(&conn->conn_logout_remove, 1);
2428 		conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2429 		iscsit_inc_conn_usage_count(conn);
2430 
2431 		spin_unlock_bh(&conn->state_lock);
2432 	} else {
2433 		/*
2434 		 * Handle all different cid CLOSECONNECTION requests in
2435 		 * iscsit_logout_post_handler_diffcid() as to give enough
2436 		 * time for any non immediate command's CmdSN to be
2437 		 * acknowledged on the connection in question.
2438 		 *
2439 		 * Here we simply make sure the CID is still around.
2440 		 */
2441 		l_conn = iscsit_get_conn_from_cid(sess,
2442 				cmd->logout_cid);
2443 		if (!l_conn) {
2444 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2445 			iscsit_add_cmd_to_response_queue(cmd, conn,
2446 					cmd->i_state);
2447 			return 0;
2448 		}
2449 
2450 		iscsit_dec_conn_usage_count(l_conn);
2451 	}
2452 
2453 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2454 
2455 	return 0;
2456 }
2457 
2458 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2459 {
2460 	struct iscsit_session *sess = conn->sess;
2461 
2462 	pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2463 		" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2464 
2465 	if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2466 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2467 			" while ERL!=2.\n");
2468 		cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2469 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2470 		return 0;
2471 	}
2472 
2473 	if (conn->cid == cmd->logout_cid) {
2474 		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2475 			" with CID: %hu on CID: %hu, implementation error.\n",
2476 				cmd->logout_cid, conn->cid);
2477 		cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2478 		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2479 		return 0;
2480 	}
2481 
2482 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2483 
2484 	return 0;
2485 }
2486 
2487 int
2488 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
2489 			unsigned char *buf)
2490 {
2491 	int cmdsn_ret, logout_remove = 0;
2492 	u8 reason_code = 0;
2493 	struct iscsi_logout *hdr;
2494 	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2495 
2496 	hdr			= (struct iscsi_logout *) buf;
2497 	reason_code		= (hdr->flags & 0x7f);
2498 
2499 	if (tiqn) {
2500 		spin_lock(&tiqn->logout_stats.lock);
2501 		if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2502 			tiqn->logout_stats.normal_logouts++;
2503 		else
2504 			tiqn->logout_stats.abnormal_logouts++;
2505 		spin_unlock(&tiqn->logout_stats.lock);
2506 	}
2507 
2508 	pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2509 		" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2510 		hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2511 		hdr->cid, conn->cid);
2512 
2513 	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2514 		pr_err("Received logout request on connection that"
2515 			" is not in logged in state, ignoring request.\n");
2516 		iscsit_free_cmd(cmd, false);
2517 		return 0;
2518 	}
2519 
2520 	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
2521 	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
2522 	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2523 	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
2524 	cmd->targ_xfer_tag      = 0xFFFFFFFF;
2525 	cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
2526 	cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
2527 	cmd->logout_cid         = be16_to_cpu(hdr->cid);
2528 	cmd->logout_reason      = reason_code;
2529 	cmd->data_direction     = DMA_NONE;
2530 
2531 	/*
2532 	 * We need to sleep in these cases (by returning 1) until the Logout
2533 	 * Response gets sent in the tx thread.
2534 	 */
2535 	if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2536 	   ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2537 	    be16_to_cpu(hdr->cid) == conn->cid))
2538 		logout_remove = 1;
2539 
2540 	spin_lock_bh(&conn->cmd_lock);
2541 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
2542 	spin_unlock_bh(&conn->cmd_lock);
2543 
2544 	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2545 		iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2546 
2547 	/*
2548 	 * Immediate commands are executed, well, immediately.
2549 	 * Non-Immediate Logout Commands are executed in CmdSN order.
2550 	 */
2551 	if (cmd->immediate_cmd) {
2552 		int ret = iscsit_execute_cmd(cmd, 0);
2553 
2554 		if (ret < 0)
2555 			return ret;
2556 	} else {
2557 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2558 		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2559 			logout_remove = 0;
2560 		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2561 			return -1;
2562 	}
2563 
2564 	return logout_remove;
2565 }
2566 EXPORT_SYMBOL(iscsit_handle_logout_cmd);
2567 
2568 int iscsit_handle_snack(
2569 	struct iscsit_conn *conn,
2570 	unsigned char *buf)
2571 {
2572 	struct iscsi_snack *hdr;
2573 
2574 	hdr			= (struct iscsi_snack *) buf;
2575 	hdr->flags		&= ~ISCSI_FLAG_CMD_FINAL;
2576 
2577 	pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2578 		" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2579 		" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2580 			hdr->begrun, hdr->runlength, conn->cid);
2581 
2582 	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2583 		pr_err("Initiator sent SNACK request while in"
2584 			" ErrorRecoveryLevel=0.\n");
2585 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2586 					 buf);
2587 	}
2588 	/*
2589 	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
2590 	 * call from inside iscsi_send_recovery_datain_or_r2t().
2591 	 */
2592 	switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2593 	case 0:
2594 		return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2595 			hdr->itt,
2596 			be32_to_cpu(hdr->ttt),
2597 			be32_to_cpu(hdr->begrun),
2598 			be32_to_cpu(hdr->runlength));
2599 	case ISCSI_FLAG_SNACK_TYPE_STATUS:
2600 		return iscsit_handle_status_snack(conn, hdr->itt,
2601 			be32_to_cpu(hdr->ttt),
2602 			be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength));
2603 	case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2604 		return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt),
2605 			be32_to_cpu(hdr->begrun),
2606 			be32_to_cpu(hdr->runlength));
2607 	case ISCSI_FLAG_SNACK_TYPE_RDATA:
2608 		/* FIXME: Support R-Data SNACK */
2609 		pr_err("R-Data SNACK Not Supported.\n");
2610 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2611 					 buf);
2612 	default:
2613 		pr_err("Unknown SNACK type 0x%02x, protocol"
2614 			" error.\n", hdr->flags & 0x0f);
2615 		return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
2616 					 buf);
2617 	}
2618 
2619 	return 0;
2620 }
2621 EXPORT_SYMBOL(iscsit_handle_snack);
2622 
2623 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
2624 {
2625 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2626 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2627 		wait_for_completion_interruptible_timeout(
2628 					&conn->rx_half_close_comp,
2629 					ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2630 	}
2631 }
2632 
2633 static int iscsit_handle_immediate_data(
2634 	struct iscsit_cmd *cmd,
2635 	struct iscsi_scsi_req *hdr,
2636 	u32 length)
2637 {
2638 	int iov_ret, rx_got = 0, rx_size = 0;
2639 	u32 checksum, iov_count = 0, padding = 0;
2640 	struct iscsit_conn *conn = cmd->conn;
2641 	struct kvec *iov;
2642 	void *overflow_buf = NULL;
2643 
2644 	BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2645 	rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2646 	iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
2647 				   cmd->orig_iov_data_count - 2,
2648 				   cmd->write_data_done, rx_size);
2649 	if (iov_ret < 0)
2650 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2651 
2652 	iov_count = iov_ret;
2653 	iov = &cmd->iov_data[0];
2654 	if (rx_size < length) {
2655 		/*
2656 		 * Special case: length of immediate data exceeds the data
2657 		 * buffer size derived from the CDB.
2658 		 */
2659 		overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
2660 		if (!overflow_buf) {
2661 			iscsit_unmap_iovec(cmd);
2662 			return IMMEDIATE_DATA_CANNOT_RECOVER;
2663 		}
2664 		cmd->overflow_buf = overflow_buf;
2665 		iov[iov_count].iov_base = overflow_buf;
2666 		iov[iov_count].iov_len = length - rx_size;
2667 		iov_count++;
2668 		rx_size = length;
2669 	}
2670 
2671 	padding = ((-length) & 3);
2672 	if (padding != 0) {
2673 		iov[iov_count].iov_base	= cmd->pad_bytes;
2674 		iov[iov_count++].iov_len = padding;
2675 		rx_size += padding;
2676 	}
2677 
2678 	if (conn->conn_ops->DataDigest) {
2679 		iov[iov_count].iov_base		= &checksum;
2680 		iov[iov_count++].iov_len	= ISCSI_CRC_LEN;
2681 		rx_size += ISCSI_CRC_LEN;
2682 	}
2683 
2684 	WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
2685 	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2686 
2687 	iscsit_unmap_iovec(cmd);
2688 
2689 	if (rx_got != rx_size) {
2690 		iscsit_rx_thread_wait_for_tcp(conn);
2691 		return IMMEDIATE_DATA_CANNOT_RECOVER;
2692 	}
2693 
2694 	if (conn->conn_ops->DataDigest) {
2695 		u32 data_crc;
2696 
2697 		data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
2698 						    cmd->write_data_done, length, padding,
2699 						    cmd->pad_bytes);
2700 
2701 		if (checksum != data_crc) {
2702 			pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2703 				" does not match computed 0x%08x\n", checksum,
2704 				data_crc);
2705 
2706 			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2707 				pr_err("Unable to recover from"
2708 					" Immediate Data digest failure while"
2709 					" in ERL=0.\n");
2710 				iscsit_reject_cmd(cmd,
2711 						ISCSI_REASON_DATA_DIGEST_ERROR,
2712 						(unsigned char *)hdr);
2713 				return IMMEDIATE_DATA_CANNOT_RECOVER;
2714 			} else {
2715 				iscsit_reject_cmd(cmd,
2716 						ISCSI_REASON_DATA_DIGEST_ERROR,
2717 						(unsigned char *)hdr);
2718 				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2719 			}
2720 		} else {
2721 			pr_debug("Got CRC32C DataDigest 0x%08x for"
2722 				" %u bytes of Immediate Data\n", checksum,
2723 				length);
2724 		}
2725 	}
2726 
2727 	cmd->write_data_done += length;
2728 
2729 	if (cmd->write_data_done == cmd->se_cmd.data_length) {
2730 		spin_lock_bh(&cmd->istate_lock);
2731 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2732 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2733 		spin_unlock_bh(&cmd->istate_lock);
2734 	}
2735 
2736 	return IMMEDIATE_DATA_NORMAL_OPERATION;
2737 }
2738 
2739 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2740 	with active network interface */
2741 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
2742 {
2743 	struct iscsit_cmd *cmd;
2744 	struct iscsit_conn *conn_p;
2745 	bool found = false;
2746 
2747 	lockdep_assert_held(&conn->sess->conn_lock);
2748 
2749 	/*
2750 	 * Only send a Asynchronous Message on connections whos network
2751 	 * interface is still functional.
2752 	 */
2753 	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2754 		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2755 			iscsit_inc_conn_usage_count(conn_p);
2756 			found = true;
2757 			break;
2758 		}
2759 	}
2760 
2761 	if (!found)
2762 		return;
2763 
2764 	cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
2765 	if (!cmd) {
2766 		iscsit_dec_conn_usage_count(conn_p);
2767 		return;
2768 	}
2769 
2770 	cmd->logout_cid = conn->cid;
2771 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2772 	cmd->i_state = ISTATE_SEND_ASYNCMSG;
2773 
2774 	spin_lock_bh(&conn_p->cmd_lock);
2775 	list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
2776 	spin_unlock_bh(&conn_p->cmd_lock);
2777 
2778 	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2779 	iscsit_dec_conn_usage_count(conn_p);
2780 }
2781 
2782 static int iscsit_send_conn_drop_async_message(
2783 	struct iscsit_cmd *cmd,
2784 	struct iscsit_conn *conn)
2785 {
2786 	struct iscsi_async *hdr;
2787 
2788 	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2789 
2790 	hdr			= (struct iscsi_async *) cmd->pdu;
2791 	hdr->opcode		= ISCSI_OP_ASYNC_EVENT;
2792 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
2793 	cmd->init_task_tag	= RESERVED_ITT;
2794 	cmd->targ_xfer_tag	= 0xFFFFFFFF;
2795 	put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2796 	cmd->stat_sn		= conn->stat_sn++;
2797 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2798 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2799 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2800 	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2801 	hdr->param1		= cpu_to_be16(cmd->logout_cid);
2802 	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2803 	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2804 
2805 	pr_debug("Sending Connection Dropped Async Message StatSN:"
2806 		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2807 			cmd->logout_cid, conn->cid);
2808 
2809 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
2810 }
2811 
2812 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
2813 {
2814 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2815 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2816 		wait_for_completion_interruptible_timeout(
2817 					&conn->tx_half_close_comp,
2818 					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
2819 	}
2820 }
2821 
2822 void
2823 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2824 			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
2825 			bool set_statsn)
2826 {
2827 	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
2828 	hdr->flags		= datain->flags;
2829 	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2830 		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2831 			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2832 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2833 		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2834 			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2835 			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2836 		}
2837 	}
2838 	hton24(hdr->dlength, datain->length);
2839 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2840 		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2841 				(struct scsi_lun *)&hdr->lun);
2842 	else
2843 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2844 
2845 	hdr->itt		= cmd->init_task_tag;
2846 
2847 	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2848 		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
2849 	else
2850 		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
2851 	if (set_statsn)
2852 		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
2853 	else
2854 		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
2855 
2856 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
2857 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
2858 	hdr->datasn		= cpu_to_be32(datain->data_sn);
2859 	hdr->offset		= cpu_to_be32(datain->offset);
2860 
2861 	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2862 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2863 		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2864 		ntohl(hdr->offset), datain->length, conn->cid);
2865 }
2866 EXPORT_SYMBOL(iscsit_build_datain_pdu);
2867 
2868 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
2869 {
2870 	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
2871 	struct iscsi_datain datain;
2872 	struct iscsi_datain_req *dr;
2873 	int eodr = 0, ret;
2874 	bool set_statsn = false;
2875 
2876 	memset(&datain, 0, sizeof(struct iscsi_datain));
2877 	dr = iscsit_get_datain_values(cmd, &datain);
2878 	if (!dr) {
2879 		pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2880 				cmd->init_task_tag);
2881 		return -1;
2882 	}
2883 	/*
2884 	 * Be paranoid and double check the logic for now.
2885 	 */
2886 	if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2887 		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2888 			" datain.length: %u exceeds cmd->data_length: %u\n",
2889 			cmd->init_task_tag, datain.offset, datain.length,
2890 			cmd->se_cmd.data_length);
2891 		return -1;
2892 	}
2893 
2894 	atomic_long_add(datain.length, &conn->sess->tx_data_octets);
2895 	/*
2896 	 * Special case for successfully execution w/ both DATAIN
2897 	 * and Sense Data.
2898 	 */
2899 	if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2900 	    (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2901 		datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2902 	else {
2903 		if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2904 		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2905 			iscsit_increment_maxcmdsn(cmd, conn->sess);
2906 			cmd->stat_sn = conn->stat_sn++;
2907 			set_statsn = true;
2908 		} else if (dr->dr_complete ==
2909 			   DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2910 			set_statsn = true;
2911 	}
2912 
2913 	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
2914 
2915 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
2916 	if (ret < 0)
2917 		return ret;
2918 
2919 	if (dr->dr_complete) {
2920 		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2921 				2 : 1;
2922 		iscsit_free_datain_req(cmd, dr);
2923 	}
2924 
2925 	return eodr;
2926 }
2927 
2928 int
2929 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
2930 			struct iscsi_logout_rsp *hdr)
2931 {
2932 	struct iscsit_conn *logout_conn = NULL;
2933 	struct iscsi_conn_recovery *cr = NULL;
2934 	struct iscsit_session *sess = conn->sess;
2935 	/*
2936 	 * The actual shutting down of Sessions and/or Connections
2937 	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2938 	 * is done in scsi_logout_post_handler().
2939 	 */
2940 	switch (cmd->logout_reason) {
2941 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2942 		pr_debug("iSCSI session logout successful, setting"
2943 			" logout response to ISCSI_LOGOUT_SUCCESS.\n");
2944 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2945 		break;
2946 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2947 		if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2948 			break;
2949 		/*
2950 		 * For CLOSECONNECTION logout requests carrying
2951 		 * a matching logout CID -> local CID, the reference
2952 		 * for the local CID will have been incremented in
2953 		 * iscsi_logout_closeconnection().
2954 		 *
2955 		 * For CLOSECONNECTION logout requests carrying
2956 		 * a different CID than the connection it arrived
2957 		 * on, the connection responding to cmd->logout_cid
2958 		 * is stopped in iscsit_logout_post_handler_diffcid().
2959 		 */
2960 
2961 		pr_debug("iSCSI CID: %hu logout on CID: %hu"
2962 			" successful.\n", cmd->logout_cid, conn->cid);
2963 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2964 		break;
2965 	case ISCSI_LOGOUT_REASON_RECOVERY:
2966 		if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2967 		    (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2968 			break;
2969 		/*
2970 		 * If the connection is still active from our point of view
2971 		 * force connection recovery to occur.
2972 		 */
2973 		logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2974 				cmd->logout_cid);
2975 		if (logout_conn) {
2976 			iscsit_connection_reinstatement_rcfr(logout_conn);
2977 			iscsit_dec_conn_usage_count(logout_conn);
2978 		}
2979 
2980 		cr = iscsit_get_inactive_connection_recovery_entry(
2981 				conn->sess, cmd->logout_cid);
2982 		if (!cr) {
2983 			pr_err("Unable to locate CID: %hu for"
2984 			" REMOVECONNFORRECOVERY Logout Request.\n",
2985 				cmd->logout_cid);
2986 			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2987 			break;
2988 		}
2989 
2990 		iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2991 
2992 		pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2993 			" for recovery for CID: %hu on CID: %hu successful.\n",
2994 				cmd->logout_cid, conn->cid);
2995 		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2996 		break;
2997 	default:
2998 		pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2999 				cmd->logout_reason);
3000 		return -1;
3001 	}
3002 
3003 	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
3004 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3005 	hdr->response		= cmd->logout_response;
3006 	hdr->itt		= cmd->init_task_tag;
3007 	cmd->stat_sn		= conn->stat_sn++;
3008 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3009 
3010 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3011 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3012 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3013 
3014 	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
3015 		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
3016 		cmd->init_task_tag, cmd->stat_sn, hdr->response,
3017 		cmd->logout_cid, conn->cid);
3018 
3019 	return 0;
3020 }
3021 EXPORT_SYMBOL(iscsit_build_logout_rsp);
3022 
3023 static int
3024 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3025 {
3026 	int rc;
3027 
3028 	rc = iscsit_build_logout_rsp(cmd, conn,
3029 			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
3030 	if (rc < 0)
3031 		return rc;
3032 
3033 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3034 }
3035 
3036 void
3037 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3038 		       struct iscsi_nopin *hdr, bool nopout_response)
3039 {
3040 	hdr->opcode		= ISCSI_OP_NOOP_IN;
3041 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3042         hton24(hdr->dlength, cmd->buf_ptr_size);
3043 	if (nopout_response)
3044 		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
3045 	hdr->itt		= cmd->init_task_tag;
3046 	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
3047 	cmd->stat_sn		= (nopout_response) ? conn->stat_sn++ :
3048 				  conn->stat_sn;
3049 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3050 
3051 	if (nopout_response)
3052 		iscsit_increment_maxcmdsn(cmd, conn->sess);
3053 
3054 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3055 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3056 
3057 	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
3058 		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
3059 		"Solicited" : "Unsolicited", cmd->init_task_tag,
3060 		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
3061 }
3062 EXPORT_SYMBOL(iscsit_build_nopin_rsp);
3063 
3064 /*
3065  *	Unsolicited NOPIN, either requesting a response or not.
3066  */
3067 static int iscsit_send_unsolicited_nopin(
3068 	struct iscsit_cmd *cmd,
3069 	struct iscsit_conn *conn,
3070 	int want_response)
3071 {
3072 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3073 	int ret;
3074 
3075 	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
3076 
3077 	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
3078 		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
3079 
3080 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3081 	if (ret < 0)
3082 		return ret;
3083 
3084 	spin_lock_bh(&cmd->istate_lock);
3085 	cmd->i_state = want_response ?
3086 		ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
3087 	spin_unlock_bh(&cmd->istate_lock);
3088 
3089 	return 0;
3090 }
3091 
3092 static int
3093 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3094 {
3095 	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
3096 
3097 	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
3098 
3099 	/*
3100 	 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
3101 	 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
3102 	 */
3103 	pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
3104 
3105 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3106 						     cmd->buf_ptr,
3107 						     cmd->buf_ptr_size);
3108 }
3109 
3110 static int iscsit_send_r2t(
3111 	struct iscsit_cmd *cmd,
3112 	struct iscsit_conn *conn)
3113 {
3114 	struct iscsi_r2t *r2t;
3115 	struct iscsi_r2t_rsp *hdr;
3116 	int ret;
3117 
3118 	r2t = iscsit_get_r2t_from_list(cmd);
3119 	if (!r2t)
3120 		return -1;
3121 
3122 	hdr			= (struct iscsi_r2t_rsp *) cmd->pdu;
3123 	memset(hdr, 0, ISCSI_HDR_LEN);
3124 	hdr->opcode		= ISCSI_OP_R2T;
3125 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3126 	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3127 			(struct scsi_lun *)&hdr->lun);
3128 	hdr->itt		= cmd->init_task_tag;
3129 	if (conn->conn_transport->iscsit_get_r2t_ttt)
3130 		conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
3131 	else
3132 		r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
3133 	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
3134 	hdr->statsn		= cpu_to_be32(conn->stat_sn);
3135 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3136 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3137 	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
3138 	hdr->data_offset	= cpu_to_be32(r2t->offset);
3139 	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
3140 
3141 	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
3142 		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
3143 		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
3144 		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
3145 			r2t->offset, r2t->xfer_len, conn->cid);
3146 
3147 	spin_lock_bh(&cmd->r2t_lock);
3148 	r2t->sent_r2t = 1;
3149 	spin_unlock_bh(&cmd->r2t_lock);
3150 
3151 	ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3152 	if (ret < 0) {
3153 		return ret;
3154 	}
3155 
3156 	spin_lock_bh(&cmd->dataout_timeout_lock);
3157 	iscsit_start_dataout_timer(cmd, conn);
3158 	spin_unlock_bh(&cmd->dataout_timeout_lock);
3159 
3160 	return 0;
3161 }
3162 
3163 /*
3164  *	@recovery: If called from iscsi_task_reassign_complete_write() for
3165  *		connection recovery.
3166  */
3167 int iscsit_build_r2ts_for_cmd(
3168 	struct iscsit_conn *conn,
3169 	struct iscsit_cmd *cmd,
3170 	bool recovery)
3171 {
3172 	int first_r2t = 1;
3173 	u32 offset = 0, xfer_len = 0;
3174 
3175 	spin_lock_bh(&cmd->r2t_lock);
3176 	if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
3177 		spin_unlock_bh(&cmd->r2t_lock);
3178 		return 0;
3179 	}
3180 
3181 	if (conn->sess->sess_ops->DataSequenceInOrder &&
3182 	    !recovery)
3183 		cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
3184 
3185 	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
3186 		if (conn->sess->sess_ops->DataSequenceInOrder) {
3187 			offset = cmd->r2t_offset;
3188 
3189 			if (first_r2t && recovery) {
3190 				int new_data_end = offset +
3191 					conn->sess->sess_ops->MaxBurstLength -
3192 					cmd->next_burst_len;
3193 
3194 				if (new_data_end > cmd->se_cmd.data_length)
3195 					xfer_len = cmd->se_cmd.data_length - offset;
3196 				else
3197 					xfer_len =
3198 						conn->sess->sess_ops->MaxBurstLength -
3199 						cmd->next_burst_len;
3200 			} else {
3201 				int new_data_end = offset +
3202 					conn->sess->sess_ops->MaxBurstLength;
3203 
3204 				if (new_data_end > cmd->se_cmd.data_length)
3205 					xfer_len = cmd->se_cmd.data_length - offset;
3206 				else
3207 					xfer_len = conn->sess->sess_ops->MaxBurstLength;
3208 			}
3209 
3210 			if ((s32)xfer_len < 0) {
3211 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3212 				break;
3213 			}
3214 
3215 			cmd->r2t_offset += xfer_len;
3216 
3217 			if (cmd->r2t_offset == cmd->se_cmd.data_length)
3218 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3219 		} else {
3220 			struct iscsi_seq *seq;
3221 
3222 			seq = iscsit_get_seq_holder_for_r2t(cmd);
3223 			if (!seq) {
3224 				spin_unlock_bh(&cmd->r2t_lock);
3225 				return -1;
3226 			}
3227 
3228 			offset = seq->offset;
3229 			xfer_len = seq->xfer_len;
3230 
3231 			if (cmd->seq_send_order == cmd->seq_count)
3232 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
3233 		}
3234 		cmd->outstanding_r2ts++;
3235 		first_r2t = 0;
3236 
3237 		if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
3238 			spin_unlock_bh(&cmd->r2t_lock);
3239 			return -1;
3240 		}
3241 
3242 		if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
3243 			break;
3244 	}
3245 	spin_unlock_bh(&cmd->r2t_lock);
3246 
3247 	return 0;
3248 }
3249 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
3250 
3251 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3252 			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
3253 {
3254 	if (inc_stat_sn)
3255 		cmd->stat_sn = conn->stat_sn++;
3256 
3257 	atomic_long_inc(&conn->sess->rsp_pdus);
3258 
3259 	memset(hdr, 0, ISCSI_HDR_LEN);
3260 	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
3261 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3262 	if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3263 		hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3264 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3265 	} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3266 		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3267 		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3268 	}
3269 	hdr->response		= cmd->iscsi_response;
3270 	hdr->cmd_status		= cmd->se_cmd.scsi_status;
3271 	hdr->itt		= cmd->init_task_tag;
3272 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3273 
3274 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3275 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3276 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3277 
3278 	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3279 		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3280 		cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3281 		cmd->se_cmd.scsi_status, conn->cid);
3282 }
3283 EXPORT_SYMBOL(iscsit_build_rsp_pdu);
3284 
3285 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3286 {
3287 	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
3288 	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
3289 	void *data_buf = NULL;
3290 	u32 padding = 0, data_buf_len = 0;
3291 
3292 	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
3293 
3294 	/*
3295 	 * Attach SENSE DATA payload to iSCSI Response PDU
3296 	 */
3297 	if (cmd->se_cmd.sense_buffer &&
3298 	   ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3299 	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3300 		put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3301 		cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3302 
3303 		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
3304 		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3305 		data_buf = cmd->sense_buffer;
3306 		data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3307 
3308 		if (padding) {
3309 			memset(cmd->sense_buffer +
3310 				cmd->se_cmd.scsi_sense_length, 0, padding);
3311 			pr_debug("Adding %u bytes of padding to"
3312 				" SENSE.\n", padding);
3313 		}
3314 
3315 		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3316 				" Response PDU\n",
3317 				cmd->se_cmd.scsi_sense_length);
3318 	}
3319 
3320 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
3321 						     data_buf_len);
3322 }
3323 
3324 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3325 {
3326 	switch (se_tmr->response) {
3327 	case TMR_FUNCTION_COMPLETE:
3328 		return ISCSI_TMF_RSP_COMPLETE;
3329 	case TMR_TASK_DOES_NOT_EXIST:
3330 		return ISCSI_TMF_RSP_NO_TASK;
3331 	case TMR_LUN_DOES_NOT_EXIST:
3332 		return ISCSI_TMF_RSP_NO_LUN;
3333 	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3334 		return ISCSI_TMF_RSP_NOT_SUPPORTED;
3335 	case TMR_FUNCTION_REJECTED:
3336 	default:
3337 		return ISCSI_TMF_RSP_REJECTED;
3338 	}
3339 }
3340 
3341 void
3342 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3343 			  struct iscsi_tm_rsp *hdr)
3344 {
3345 	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3346 
3347 	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
3348 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
3349 	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
3350 	hdr->itt		= cmd->init_task_tag;
3351 	cmd->stat_sn		= conn->stat_sn++;
3352 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3353 
3354 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3355 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3356 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3357 
3358 	pr_debug("Built Task Management Response ITT: 0x%08x,"
3359 		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3360 		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3361 }
3362 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
3363 
3364 static int
3365 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
3366 {
3367 	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
3368 
3369 	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
3370 
3371 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
3372 }
3373 
3374 #define SENDTARGETS_BUF_LIMIT 32768U
3375 
3376 static int
3377 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
3378 				  enum iscsit_transport_type network_transport,
3379 				  int skip_bytes, bool *completed)
3380 {
3381 	char *payload = NULL;
3382 	struct iscsit_conn *conn = cmd->conn;
3383 	struct iscsi_portal_group *tpg;
3384 	struct iscsi_tiqn *tiqn;
3385 	struct iscsi_tpg_np *tpg_np;
3386 	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3387 	int target_name_printed;
3388 	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
3389 	unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
3390 	bool active;
3391 
3392 	buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
3393 			 SENDTARGETS_BUF_LIMIT);
3394 
3395 	payload = kzalloc(buffer_len, GFP_KERNEL);
3396 	if (!payload)
3397 		return -ENOMEM;
3398 
3399 	/*
3400 	 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
3401 	 * explicit case..
3402 	 */
3403 	if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
3404 		text_ptr = strchr(text_in, '=');
3405 		if (!text_ptr) {
3406 			pr_err("Unable to locate '=' string in text_in:"
3407 			       " %s\n", text_in);
3408 			kfree(payload);
3409 			return -EINVAL;
3410 		}
3411 		/*
3412 		 * Skip over '=' character..
3413 		 */
3414 		text_ptr += 1;
3415 	}
3416 
3417 	spin_lock(&tiqn_lock);
3418 	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3419 		if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
3420 		     strcmp(tiqn->tiqn, text_ptr)) {
3421 			continue;
3422 		}
3423 
3424 		target_name_printed = 0;
3425 
3426 		spin_lock(&tiqn->tiqn_tpg_lock);
3427 		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3428 
3429 			/* If demo_mode_discovery=0 and generate_node_acls=0
3430 			 * (demo mode dislabed) do not return
3431 			 * TargetName+TargetAddress unless a NodeACL exists.
3432 			 */
3433 
3434 			if ((tpg->tpg_attrib.generate_node_acls == 0) &&
3435 			    (tpg->tpg_attrib.demo_mode_discovery == 0) &&
3436 			    (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
3437 				cmd->conn->sess->sess_ops->InitiatorName))) {
3438 				continue;
3439 			}
3440 
3441 			spin_lock(&tpg->tpg_state_lock);
3442 			active = (tpg->tpg_state == TPG_STATE_ACTIVE);
3443 			spin_unlock(&tpg->tpg_state_lock);
3444 
3445 			if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
3446 				continue;
3447 
3448 			spin_lock(&tpg->tpg_np_lock);
3449 			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3450 						tpg_np_list) {
3451 				struct iscsi_np *np = tpg_np->tpg_np;
3452 				struct sockaddr_storage *sockaddr;
3453 
3454 				if (np->np_network_transport != network_transport)
3455 					continue;
3456 
3457 				if (!target_name_printed) {
3458 					len = sprintf(buf, "TargetName=%s",
3459 						      tiqn->tiqn);
3460 					len += 1;
3461 
3462 					if ((len + payload_len) > buffer_len) {
3463 						spin_unlock(&tpg->tpg_np_lock);
3464 						spin_unlock(&tiqn->tiqn_tpg_lock);
3465 						end_of_buf = 1;
3466 						goto eob;
3467 					}
3468 
3469 					if (skip_bytes && len <= skip_bytes) {
3470 						skip_bytes -= len;
3471 					} else {
3472 						memcpy(payload + payload_len, buf, len);
3473 						payload_len += len;
3474 						target_name_printed = 1;
3475 						if (len > skip_bytes)
3476 							skip_bytes = 0;
3477 					}
3478 				}
3479 
3480 				if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
3481 					sockaddr = &conn->local_sockaddr;
3482 				else
3483 					sockaddr = &np->np_sockaddr;
3484 
3485 				len = sprintf(buf, "TargetAddress="
3486 					      "%pISpc,%hu",
3487 					      sockaddr,
3488 					      tpg->tpgt);
3489 				len += 1;
3490 
3491 				if ((len + payload_len) > buffer_len) {
3492 					spin_unlock(&tpg->tpg_np_lock);
3493 					spin_unlock(&tiqn->tiqn_tpg_lock);
3494 					end_of_buf = 1;
3495 					goto eob;
3496 				}
3497 
3498 				if (skip_bytes && len <= skip_bytes) {
3499 					skip_bytes -= len;
3500 				} else {
3501 					memcpy(payload + payload_len, buf, len);
3502 					payload_len += len;
3503 					if (len > skip_bytes)
3504 						skip_bytes = 0;
3505 				}
3506 			}
3507 			spin_unlock(&tpg->tpg_np_lock);
3508 		}
3509 		spin_unlock(&tiqn->tiqn_tpg_lock);
3510 eob:
3511 		if (end_of_buf) {
3512 			*completed = false;
3513 			break;
3514 		}
3515 
3516 		if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
3517 			break;
3518 	}
3519 	spin_unlock(&tiqn_lock);
3520 
3521 	cmd->buf_ptr = payload;
3522 
3523 	return payload_len;
3524 }
3525 
3526 int
3527 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3528 		      struct iscsi_text_rsp *hdr,
3529 		      enum iscsit_transport_type network_transport)
3530 {
3531 	int text_length, padding;
3532 	bool completed = true;
3533 
3534 	text_length = iscsit_build_sendtargets_response(cmd, network_transport,
3535 							cmd->read_data_done,
3536 							&completed);
3537 	if (text_length < 0)
3538 		return text_length;
3539 
3540 	if (completed) {
3541 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
3542 	} else {
3543 		hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3544 		cmd->read_data_done += text_length;
3545 		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3546 			cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
3547 	}
3548 	hdr->opcode = ISCSI_OP_TEXT_RSP;
3549 	padding = ((-text_length) & 3);
3550 	hton24(hdr->dlength, text_length);
3551 	hdr->itt = cmd->init_task_tag;
3552 	hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3553 	cmd->stat_sn = conn->stat_sn++;
3554 	hdr->statsn = cpu_to_be32(cmd->stat_sn);
3555 
3556 	iscsit_increment_maxcmdsn(cmd, conn->sess);
3557 	/*
3558 	 * Reset maxcmdsn_inc in multi-part text payload exchanges to
3559 	 * correctly increment MaxCmdSN for each response answering a
3560 	 * non immediate text request with a valid CmdSN.
3561 	 */
3562 	cmd->maxcmdsn_inc = 0;
3563 	hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3564 	hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3565 
3566 	pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
3567 		" Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
3568 		cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
3569 		!!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
3570 		!!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
3571 
3572 	return text_length + padding;
3573 }
3574 EXPORT_SYMBOL(iscsit_build_text_rsp);
3575 
3576 static int iscsit_send_text_rsp(
3577 	struct iscsit_cmd *cmd,
3578 	struct iscsit_conn *conn)
3579 {
3580 	struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
3581 	int text_length;
3582 
3583 	text_length = iscsit_build_text_rsp(cmd, conn, hdr,
3584 				conn->conn_transport->transport_type);
3585 	if (text_length < 0)
3586 		return text_length;
3587 
3588 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3589 						     cmd->buf_ptr,
3590 						     text_length);
3591 }
3592 
3593 void
3594 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
3595 		    struct iscsi_reject *hdr)
3596 {
3597 	hdr->opcode		= ISCSI_OP_REJECT;
3598 	hdr->reason		= cmd->reject_reason;
3599 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
3600 	hton24(hdr->dlength, ISCSI_HDR_LEN);
3601 	hdr->ffffffff		= cpu_to_be32(0xffffffff);
3602 	cmd->stat_sn		= conn->stat_sn++;
3603 	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
3604 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
3605 	hdr->max_cmdsn		= cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
3606 
3607 }
3608 EXPORT_SYMBOL(iscsit_build_reject);
3609 
3610 static int iscsit_send_reject(
3611 	struct iscsit_cmd *cmd,
3612 	struct iscsit_conn *conn)
3613 {
3614 	struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3615 
3616 	iscsit_build_reject(cmd, conn, hdr);
3617 
3618 	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3619 		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3620 
3621 	return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
3622 						     cmd->buf_ptr,
3623 						     ISCSI_HDR_LEN);
3624 }
3625 
3626 void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
3627 {
3628 	int ord, cpu;
3629 	cpumask_var_t conn_allowed_cpumask;
3630 
3631 	/*
3632 	 * bitmap_id is assigned from iscsit_global->ts_bitmap from
3633 	 * within iscsit_start_kthreads()
3634 	 *
3635 	 * Here we use bitmap_id to determine which CPU that this
3636 	 * iSCSI connection's RX/TX threads will be scheduled to
3637 	 * execute upon.
3638 	 */
3639 	if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
3640 		ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
3641 		for_each_online_cpu(cpu) {
3642 			if (ord-- == 0) {
3643 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3644 				return;
3645 			}
3646 		}
3647 	} else {
3648 		cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
3649 			cpu_online_mask);
3650 
3651 		cpumask_clear(conn->conn_cpumask);
3652 		ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
3653 		for_each_cpu(cpu, conn_allowed_cpumask) {
3654 			if (ord-- == 0) {
3655 				cpumask_set_cpu(cpu, conn->conn_cpumask);
3656 				free_cpumask_var(conn_allowed_cpumask);
3657 				return;
3658 			}
3659 		}
3660 		free_cpumask_var(conn_allowed_cpumask);
3661 	}
3662 	/*
3663 	 * This should never be reached..
3664 	 */
3665 	dump_stack();
3666 	cpumask_setall(conn->conn_cpumask);
3667 }
3668 
3669 static void iscsit_thread_reschedule(struct iscsit_conn *conn)
3670 {
3671 	/*
3672 	 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
3673 	 * connection's RX/TX threads update conn->allowed_cpumask.
3674 	 */
3675 	if (!cpumask_equal(iscsit_global->allowed_cpumask,
3676 			   conn->allowed_cpumask)) {
3677 		iscsit_thread_get_cpumask(conn);
3678 		conn->conn_tx_reset_cpumask = 1;
3679 		conn->conn_rx_reset_cpumask = 1;
3680 		cpumask_copy(conn->allowed_cpumask,
3681 			     iscsit_global->allowed_cpumask);
3682 	}
3683 }
3684 
3685 void iscsit_thread_check_cpumask(
3686 	struct iscsit_conn *conn,
3687 	struct task_struct *p,
3688 	int mode)
3689 {
3690 	/*
3691 	 * The TX and RX threads maybe call iscsit_thread_check_cpumask()
3692 	 * at the same time. The RX thread might be faster and return from
3693 	 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
3694 	 * Then the TX thread sets it back to 1.
3695 	 * The next time the RX thread loops, it sees conn_rx_reset_cpumask
3696 	 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
3697 	 */
3698 	iscsit_thread_reschedule(conn);
3699 
3700 	/*
3701 	 * mode == 1 signals iscsi_target_tx_thread() usage.
3702 	 * mode == 0 signals iscsi_target_rx_thread() usage.
3703 	 */
3704 	if (mode == 1) {
3705 		if (!conn->conn_tx_reset_cpumask)
3706 			return;
3707 	} else {
3708 		if (!conn->conn_rx_reset_cpumask)
3709 			return;
3710 	}
3711 
3712 	/*
3713 	 * Update the CPU mask for this single kthread so that
3714 	 * both TX and RX kthreads are scheduled to run on the
3715 	 * same CPU.
3716 	 */
3717 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
3718 	if (mode == 1)
3719 		conn->conn_tx_reset_cpumask = 0;
3720 	else
3721 		conn->conn_rx_reset_cpumask = 0;
3722 }
3723 EXPORT_SYMBOL(iscsit_thread_check_cpumask);
3724 
3725 int
3726 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3727 {
3728 	int ret;
3729 
3730 	switch (state) {
3731 	case ISTATE_SEND_R2T:
3732 		ret = iscsit_send_r2t(cmd, conn);
3733 		if (ret < 0)
3734 			goto err;
3735 		break;
3736 	case ISTATE_REMOVE:
3737 		spin_lock_bh(&conn->cmd_lock);
3738 		list_del_init(&cmd->i_conn_node);
3739 		spin_unlock_bh(&conn->cmd_lock);
3740 
3741 		iscsit_free_cmd(cmd, false);
3742 		break;
3743 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3744 		iscsit_mod_nopin_response_timer(conn);
3745 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
3746 		if (ret < 0)
3747 			goto err;
3748 		break;
3749 	case ISTATE_SEND_NOPIN_NO_RESPONSE:
3750 		ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
3751 		if (ret < 0)
3752 			goto err;
3753 		break;
3754 	default:
3755 		pr_err("Unknown Opcode: 0x%02x ITT:"
3756 		       " 0x%08x, i_state: %d on CID: %hu\n",
3757 		       cmd->iscsi_opcode, cmd->init_task_tag, state,
3758 		       conn->cid);
3759 		goto err;
3760 	}
3761 
3762 	return 0;
3763 
3764 err:
3765 	return -1;
3766 }
3767 EXPORT_SYMBOL(iscsit_immediate_queue);
3768 
3769 static int
3770 iscsit_handle_immediate_queue(struct iscsit_conn *conn)
3771 {
3772 	struct iscsit_transport *t = conn->conn_transport;
3773 	struct iscsi_queue_req *qr;
3774 	struct iscsit_cmd *cmd;
3775 	u8 state;
3776 	int ret;
3777 
3778 	while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
3779 		atomic_set(&conn->check_immediate_queue, 0);
3780 		cmd = qr->cmd;
3781 		state = qr->state;
3782 		kmem_cache_free(lio_qr_cache, qr);
3783 
3784 		ret = t->iscsit_immediate_queue(conn, cmd, state);
3785 		if (ret < 0)
3786 			return ret;
3787 	}
3788 
3789 	return 0;
3790 }
3791 
3792 int
3793 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
3794 {
3795 	int ret;
3796 
3797 check_rsp_state:
3798 	switch (state) {
3799 	case ISTATE_SEND_DATAIN:
3800 		ret = iscsit_send_datain(cmd, conn);
3801 		if (ret < 0)
3802 			goto err;
3803 		else if (!ret)
3804 			/* more drs */
3805 			goto check_rsp_state;
3806 		else if (ret == 1) {
3807 			/* all done */
3808 			spin_lock_bh(&cmd->istate_lock);
3809 			cmd->i_state = ISTATE_SENT_STATUS;
3810 			spin_unlock_bh(&cmd->istate_lock);
3811 
3812 			if (atomic_read(&conn->check_immediate_queue))
3813 				return 1;
3814 
3815 			return 0;
3816 		} else if (ret == 2) {
3817 			/* Still must send status,
3818 			   SCF_TRANSPORT_TASK_SENSE was set */
3819 			spin_lock_bh(&cmd->istate_lock);
3820 			cmd->i_state = ISTATE_SEND_STATUS;
3821 			spin_unlock_bh(&cmd->istate_lock);
3822 			state = ISTATE_SEND_STATUS;
3823 			goto check_rsp_state;
3824 		}
3825 
3826 		break;
3827 	case ISTATE_SEND_STATUS:
3828 	case ISTATE_SEND_STATUS_RECOVERY:
3829 		ret = iscsit_send_response(cmd, conn);
3830 		break;
3831 	case ISTATE_SEND_LOGOUTRSP:
3832 		ret = iscsit_send_logout(cmd, conn);
3833 		break;
3834 	case ISTATE_SEND_ASYNCMSG:
3835 		ret = iscsit_send_conn_drop_async_message(
3836 			cmd, conn);
3837 		break;
3838 	case ISTATE_SEND_NOPIN:
3839 		ret = iscsit_send_nopin(cmd, conn);
3840 		break;
3841 	case ISTATE_SEND_REJECT:
3842 		ret = iscsit_send_reject(cmd, conn);
3843 		break;
3844 	case ISTATE_SEND_TASKMGTRSP:
3845 		ret = iscsit_send_task_mgt_rsp(cmd, conn);
3846 		if (ret != 0)
3847 			break;
3848 		ret = iscsit_tmr_post_handler(cmd, conn);
3849 		if (ret != 0)
3850 			iscsit_fall_back_to_erl0(conn->sess);
3851 		break;
3852 	case ISTATE_SEND_TEXTRSP:
3853 		ret = iscsit_send_text_rsp(cmd, conn);
3854 		break;
3855 	default:
3856 		pr_err("Unknown Opcode: 0x%02x ITT:"
3857 		       " 0x%08x, i_state: %d on CID: %hu\n",
3858 		       cmd->iscsi_opcode, cmd->init_task_tag,
3859 		       state, conn->cid);
3860 		goto err;
3861 	}
3862 	if (ret < 0)
3863 		goto err;
3864 
3865 	switch (state) {
3866 	case ISTATE_SEND_LOGOUTRSP:
3867 		if (!iscsit_logout_post_handler(cmd, conn))
3868 			return -ECONNRESET;
3869 		fallthrough;
3870 	case ISTATE_SEND_STATUS:
3871 	case ISTATE_SEND_ASYNCMSG:
3872 	case ISTATE_SEND_NOPIN:
3873 	case ISTATE_SEND_STATUS_RECOVERY:
3874 	case ISTATE_SEND_TEXTRSP:
3875 	case ISTATE_SEND_TASKMGTRSP:
3876 	case ISTATE_SEND_REJECT:
3877 		spin_lock_bh(&cmd->istate_lock);
3878 		cmd->i_state = ISTATE_SENT_STATUS;
3879 		spin_unlock_bh(&cmd->istate_lock);
3880 		break;
3881 	default:
3882 		pr_err("Unknown Opcode: 0x%02x ITT:"
3883 		       " 0x%08x, i_state: %d on CID: %hu\n",
3884 		       cmd->iscsi_opcode, cmd->init_task_tag,
3885 		       cmd->i_state, conn->cid);
3886 		goto err;
3887 	}
3888 
3889 	if (atomic_read(&conn->check_immediate_queue))
3890 		return 1;
3891 
3892 	return 0;
3893 
3894 err:
3895 	return -1;
3896 }
3897 EXPORT_SYMBOL(iscsit_response_queue);
3898 
3899 static int iscsit_handle_response_queue(struct iscsit_conn *conn)
3900 {
3901 	struct iscsit_transport *t = conn->conn_transport;
3902 	struct iscsi_queue_req *qr;
3903 	struct iscsit_cmd *cmd;
3904 	u8 state;
3905 	int ret;
3906 
3907 	while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
3908 		cmd = qr->cmd;
3909 		state = qr->state;
3910 		kmem_cache_free(lio_qr_cache, qr);
3911 
3912 		ret = t->iscsit_response_queue(conn, cmd, state);
3913 		if (ret == 1 || ret < 0)
3914 			return ret;
3915 	}
3916 
3917 	return 0;
3918 }
3919 
3920 int iscsi_target_tx_thread(void *arg)
3921 {
3922 	int ret = 0;
3923 	struct iscsit_conn *conn = arg;
3924 	bool conn_freed = false;
3925 
3926 	/*
3927 	 * Allow ourselves to be interrupted by SIGINT so that a
3928 	 * connection recovery / failure event can be triggered externally.
3929 	 */
3930 	allow_signal(SIGINT);
3931 
3932 	while (!kthread_should_stop()) {
3933 		/*
3934 		 * Ensure that both TX and RX per connection kthreads
3935 		 * are scheduled to run on the same CPU.
3936 		 */
3937 		iscsit_thread_check_cpumask(conn, current, 1);
3938 
3939 		wait_event_interruptible(conn->queues_wq,
3940 					 !iscsit_conn_all_queues_empty(conn));
3941 
3942 		if (signal_pending(current))
3943 			goto transport_err;
3944 
3945 get_immediate:
3946 		ret = iscsit_handle_immediate_queue(conn);
3947 		if (ret < 0)
3948 			goto transport_err;
3949 
3950 		ret = iscsit_handle_response_queue(conn);
3951 		if (ret == 1) {
3952 			goto get_immediate;
3953 		} else if (ret == -ECONNRESET) {
3954 			conn_freed = true;
3955 			goto out;
3956 		} else if (ret < 0) {
3957 			goto transport_err;
3958 		}
3959 	}
3960 
3961 transport_err:
3962 	/*
3963 	 * Avoid the normal connection failure code-path if this connection
3964 	 * is still within LOGIN mode, and iscsi_np process context is
3965 	 * responsible for cleaning up the early connection failure.
3966 	 */
3967 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
3968 		iscsit_take_action_for_connection_exit(conn, &conn_freed);
3969 out:
3970 	if (!conn_freed) {
3971 		while (!kthread_should_stop()) {
3972 			msleep(100);
3973 		}
3974 	}
3975 	return 0;
3976 }
3977 
3978 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
3979 {
3980 	struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
3981 	struct iscsit_cmd *cmd;
3982 	int ret = 0;
3983 
3984 	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
3985 	case ISCSI_OP_SCSI_CMD:
3986 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3987 		if (!cmd)
3988 			goto reject;
3989 
3990 		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
3991 		break;
3992 	case ISCSI_OP_SCSI_DATA_OUT:
3993 		ret = iscsit_handle_data_out(conn, buf);
3994 		break;
3995 	case ISCSI_OP_NOOP_OUT:
3996 		cmd = NULL;
3997 		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
3998 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
3999 			if (!cmd)
4000 				goto reject;
4001 		}
4002 		ret = iscsit_handle_nop_out(conn, cmd, buf);
4003 		break;
4004 	case ISCSI_OP_SCSI_TMFUNC:
4005 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4006 		if (!cmd)
4007 			goto reject;
4008 
4009 		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
4010 		break;
4011 	case ISCSI_OP_TEXT:
4012 		if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
4013 			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
4014 			if (!cmd)
4015 				goto reject;
4016 		} else {
4017 			cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4018 			if (!cmd)
4019 				goto reject;
4020 		}
4021 
4022 		ret = iscsit_handle_text_cmd(conn, cmd, buf);
4023 		break;
4024 	case ISCSI_OP_LOGOUT:
4025 		cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
4026 		if (!cmd)
4027 			goto reject;
4028 
4029 		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
4030 		if (ret > 0)
4031 			wait_for_completion_timeout(&conn->conn_logout_comp,
4032 					SECONDS_FOR_LOGOUT_COMP * HZ);
4033 		break;
4034 	case ISCSI_OP_SNACK:
4035 		ret = iscsit_handle_snack(conn, buf);
4036 		break;
4037 	default:
4038 		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
4039 		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
4040 			pr_err("Cannot recover from unknown"
4041 			" opcode while ERL=0, closing iSCSI connection.\n");
4042 			return -1;
4043 		}
4044 		pr_err("Unable to recover from unknown opcode while OFMarker=No,"
4045 		       " closing iSCSI connection.\n");
4046 		ret = -1;
4047 		break;
4048 	}
4049 
4050 	return ret;
4051 reject:
4052 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4053 }
4054 
4055 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
4056 {
4057 	bool ret;
4058 
4059 	spin_lock_bh(&conn->state_lock);
4060 	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4061 	spin_unlock_bh(&conn->state_lock);
4062 
4063 	return ret;
4064 }
4065 
4066 static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
4067 {
4068 	int ret;
4069 	u8 *buffer, *tmp_buf, opcode;
4070 	u32 checksum = 0, digest = 0;
4071 	struct iscsi_hdr *hdr;
4072 	struct kvec iov;
4073 
4074 	buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
4075 	if (!buffer)
4076 		return;
4077 
4078 	while (!kthread_should_stop()) {
4079 		/*
4080 		 * Ensure that both TX and RX per connection kthreads
4081 		 * are scheduled to run on the same CPU.
4082 		 */
4083 		iscsit_thread_check_cpumask(conn, current, 0);
4084 
4085 		memset(&iov, 0, sizeof(struct kvec));
4086 
4087 		iov.iov_base	= buffer;
4088 		iov.iov_len	= ISCSI_HDR_LEN;
4089 
4090 		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
4091 		if (ret != ISCSI_HDR_LEN) {
4092 			iscsit_rx_thread_wait_for_tcp(conn);
4093 			break;
4094 		}
4095 
4096 		hdr = (struct iscsi_hdr *) buffer;
4097 		if (hdr->hlength) {
4098 			iov.iov_len = hdr->hlength * 4;
4099 			tmp_buf = krealloc(buffer,
4100 					  ISCSI_HDR_LEN + iov.iov_len,
4101 					  GFP_KERNEL);
4102 			if (!tmp_buf)
4103 				break;
4104 
4105 			buffer = tmp_buf;
4106 			iov.iov_base = &buffer[ISCSI_HDR_LEN];
4107 
4108 			ret = rx_data(conn, &iov, 1, iov.iov_len);
4109 			if (ret != iov.iov_len) {
4110 				iscsit_rx_thread_wait_for_tcp(conn);
4111 				break;
4112 			}
4113 		}
4114 
4115 		if (conn->conn_ops->HeaderDigest) {
4116 			iov.iov_base	= &digest;
4117 			iov.iov_len	= ISCSI_CRC_LEN;
4118 
4119 			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
4120 			if (ret != ISCSI_CRC_LEN) {
4121 				iscsit_rx_thread_wait_for_tcp(conn);
4122 				break;
4123 			}
4124 
4125 			iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
4126 						  ISCSI_HDR_LEN, 0, NULL,
4127 						  &checksum);
4128 
4129 			if (digest != checksum) {
4130 				pr_err("HeaderDigest CRC32C failed,"
4131 					" received 0x%08x, computed 0x%08x\n",
4132 					digest, checksum);
4133 				/*
4134 				 * Set the PDU to 0xff so it will intentionally
4135 				 * hit default in the switch below.
4136 				 */
4137 				memset(buffer, 0xff, ISCSI_HDR_LEN);
4138 				atomic_long_inc(&conn->sess->conn_digest_errors);
4139 			} else {
4140 				pr_debug("Got HeaderDigest CRC32C"
4141 						" 0x%08x\n", checksum);
4142 			}
4143 		}
4144 
4145 		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
4146 			break;
4147 
4148 		opcode = buffer[0] & ISCSI_OPCODE_MASK;
4149 
4150 		if (conn->sess->sess_ops->SessionType &&
4151 		   ((!(opcode & ISCSI_OP_TEXT)) ||
4152 		    (!(opcode & ISCSI_OP_LOGOUT)))) {
4153 			pr_err("Received illegal iSCSI Opcode: 0x%02x"
4154 			" while in Discovery Session, rejecting.\n", opcode);
4155 			iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
4156 					  buffer);
4157 			break;
4158 		}
4159 
4160 		ret = iscsi_target_rx_opcode(conn, buffer);
4161 		if (ret < 0)
4162 			break;
4163 	}
4164 
4165 	kfree(buffer);
4166 }
4167 
4168 int iscsi_target_rx_thread(void *arg)
4169 {
4170 	int rc;
4171 	struct iscsit_conn *conn = arg;
4172 	bool conn_freed = false;
4173 
4174 	/*
4175 	 * Allow ourselves to be interrupted by SIGINT so that a
4176 	 * connection recovery / failure event can be triggered externally.
4177 	 */
4178 	allow_signal(SIGINT);
4179 	/*
4180 	 * Wait for iscsi_post_login_handler() to complete before allowing
4181 	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4182 	 */
4183 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4184 	if (rc < 0 || iscsi_target_check_conn_state(conn))
4185 		goto out;
4186 
4187 	if (!conn->conn_transport->iscsit_get_rx_pdu)
4188 		return 0;
4189 
4190 	conn->conn_transport->iscsit_get_rx_pdu(conn);
4191 
4192 	if (!signal_pending(current))
4193 		atomic_set(&conn->transport_failed, 1);
4194 	iscsit_take_action_for_connection_exit(conn, &conn_freed);
4195 
4196 out:
4197 	if (!conn_freed) {
4198 		while (!kthread_should_stop()) {
4199 			msleep(100);
4200 		}
4201 	}
4202 
4203 	return 0;
4204 }
4205 
4206 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
4207 {
4208 	LIST_HEAD(tmp_list);
4209 	struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
4210 	struct iscsit_session *sess = conn->sess;
4211 	/*
4212 	 * We expect this function to only ever be called from either RX or TX
4213 	 * thread context via iscsit_close_connection() once the other context
4214 	 * has been reset -> returned sleeping pre-handler state.
4215 	 */
4216 	spin_lock_bh(&conn->cmd_lock);
4217 	list_splice_init(&conn->conn_cmd_list, &tmp_list);
4218 
4219 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4220 		struct se_cmd *se_cmd = &cmd->se_cmd;
4221 
4222 		if (!se_cmd->se_tfo)
4223 			continue;
4224 
4225 		spin_lock_irq(&se_cmd->t_state_lock);
4226 		if (se_cmd->transport_state & CMD_T_ABORTED) {
4227 			if (!(se_cmd->transport_state & CMD_T_TAS))
4228 				/*
4229 				 * LIO's abort path owns the cleanup for this,
4230 				 * so put it back on the list and let
4231 				 * aborted_task handle it.
4232 				 */
4233 				list_move_tail(&cmd->i_conn_node,
4234 					       &conn->conn_cmd_list);
4235 		} else {
4236 			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4237 		}
4238 
4239 		if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
4240 			/*
4241 			 * We never submitted the cmd to LIO core, so we have
4242 			 * to tell LIO to perform the completion process.
4243 			 */
4244 			spin_unlock_irq(&se_cmd->t_state_lock);
4245 			target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
4246 			continue;
4247 		}
4248 		spin_unlock_irq(&se_cmd->t_state_lock);
4249 	}
4250 	spin_unlock_bh(&conn->cmd_lock);
4251 
4252 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
4253 		list_del_init(&cmd->i_conn_node);
4254 
4255 		iscsit_increment_maxcmdsn(cmd, sess);
4256 		iscsit_free_cmd(cmd, true);
4257 
4258 	}
4259 
4260 	/*
4261 	 * Wait on commands that were cleaned up via the aborted_task path.
4262 	 * LLDs that implement iscsit_wait_conn will already have waited for
4263 	 * commands.
4264 	 */
4265 	if (!conn->conn_transport->iscsit_wait_conn) {
4266 		target_stop_cmd_counter(conn->cmd_cnt);
4267 		target_wait_for_cmds(conn->cmd_cnt);
4268 	}
4269 }
4270 
4271 static void iscsit_stop_timers_for_cmds(
4272 	struct iscsit_conn *conn)
4273 {
4274 	struct iscsit_cmd *cmd;
4275 
4276 	spin_lock_bh(&conn->cmd_lock);
4277 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
4278 		if (cmd->data_direction == DMA_TO_DEVICE)
4279 			iscsit_stop_dataout_timer(cmd);
4280 	}
4281 	spin_unlock_bh(&conn->cmd_lock);
4282 }
4283 
4284 int iscsit_close_connection(
4285 	struct iscsit_conn *conn)
4286 {
4287 	int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
4288 	struct iscsit_session	*sess = conn->sess;
4289 
4290 	pr_debug("Closing iSCSI connection CID %hu on SID:"
4291 		" %u\n", conn->cid, sess->sid);
4292 	/*
4293 	 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
4294 	 * case just in case the RX Thread in iscsi_target_rx_opcode() is
4295 	 * sleeping and the logout response never got sent because the
4296 	 * connection failed.
4297 	 *
4298 	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
4299 	 * to signal logout response TX interrupt completion.  Go ahead and skip
4300 	 * this for iser since isert_rx_opcode() does not wait on logout failure,
4301 	 * and to avoid iscsit_conn pointer dereference in iser-target code.
4302 	 */
4303 	if (!conn->conn_transport->rdma_shutdown)
4304 		complete(&conn->conn_logout_comp);
4305 
4306 	if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
4307 		if (conn->tx_thread &&
4308 		    cmpxchg(&conn->tx_thread_active, true, false)) {
4309 			send_sig(SIGINT, conn->tx_thread, 1);
4310 			kthread_stop(conn->tx_thread);
4311 		}
4312 	} else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
4313 		if (conn->rx_thread &&
4314 		    cmpxchg(&conn->rx_thread_active, true, false)) {
4315 			send_sig(SIGINT, conn->rx_thread, 1);
4316 			kthread_stop(conn->rx_thread);
4317 		}
4318 	}
4319 
4320 	spin_lock(&iscsit_global->ts_bitmap_lock);
4321 	bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
4322 			      get_order(1));
4323 	spin_unlock(&iscsit_global->ts_bitmap_lock);
4324 
4325 	iscsit_stop_timers_for_cmds(conn);
4326 	iscsit_stop_nopin_response_timer(conn);
4327 	iscsit_stop_nopin_timer(conn);
4328 
4329 	if (conn->conn_transport->iscsit_wait_conn)
4330 		conn->conn_transport->iscsit_wait_conn(conn);
4331 
4332 	/*
4333 	 * During Connection recovery drop unacknowledged out of order
4334 	 * commands for this connection, and prepare the other commands
4335 	 * for reallegiance.
4336 	 *
4337 	 * During normal operation clear the out of order commands (but
4338 	 * do not free the struct iscsi_ooo_cmdsn's) and release all
4339 	 * struct iscsit_cmds.
4340 	 */
4341 	if (atomic_read(&conn->connection_recovery)) {
4342 		iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
4343 		iscsit_prepare_cmds_for_reallegiance(conn);
4344 	} else {
4345 		iscsit_clear_ooo_cmdsns_for_conn(conn);
4346 		iscsit_release_commands_from_conn(conn);
4347 	}
4348 	iscsit_free_queue_reqs_for_conn(conn);
4349 
4350 	/*
4351 	 * Handle decrementing session or connection usage count if
4352 	 * a logout response was not able to be sent because the
4353 	 * connection failed.  Fall back to Session Recovery here.
4354 	 */
4355 	if (atomic_read(&conn->conn_logout_remove)) {
4356 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4357 			iscsit_dec_conn_usage_count(conn);
4358 			iscsit_dec_session_usage_count(sess);
4359 		}
4360 		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4361 			iscsit_dec_conn_usage_count(conn);
4362 
4363 		atomic_set(&conn->conn_logout_remove, 0);
4364 		atomic_set(&sess->session_reinstatement, 0);
4365 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4366 	}
4367 
4368 	spin_lock_bh(&sess->conn_lock);
4369 	list_del(&conn->conn_list);
4370 
4371 	/*
4372 	 * Attempt to let the Initiator know this connection failed by
4373 	 * sending an Connection Dropped Async Message on another
4374 	 * active connection.
4375 	 */
4376 	if (atomic_read(&conn->connection_recovery))
4377 		iscsit_build_conn_drop_async_message(conn);
4378 
4379 	spin_unlock_bh(&sess->conn_lock);
4380 
4381 	/*
4382 	 * If connection reinstatement is being performed on this connection,
4383 	 * up the connection reinstatement semaphore that is being blocked on
4384 	 * in iscsit_cause_connection_reinstatement().
4385 	 */
4386 	spin_lock_bh(&conn->state_lock);
4387 	if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4388 		spin_unlock_bh(&conn->state_lock);
4389 		complete(&conn->conn_wait_comp);
4390 		wait_for_completion(&conn->conn_post_wait_comp);
4391 		spin_lock_bh(&conn->state_lock);
4392 	}
4393 
4394 	/*
4395 	 * If connection reinstatement is being performed on this connection
4396 	 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4397 	 * connection wait rcfr semaphore that is being blocked on
4398 	 * an iscsit_connection_reinstatement_rcfr().
4399 	 */
4400 	if (atomic_read(&conn->connection_wait_rcfr)) {
4401 		spin_unlock_bh(&conn->state_lock);
4402 		complete(&conn->conn_wait_rcfr_comp);
4403 		wait_for_completion(&conn->conn_post_wait_comp);
4404 		spin_lock_bh(&conn->state_lock);
4405 	}
4406 	atomic_set(&conn->connection_reinstatement, 1);
4407 	spin_unlock_bh(&conn->state_lock);
4408 
4409 	/*
4410 	 * If any other processes are accessing this connection pointer we
4411 	 * must wait until they have completed.
4412 	 */
4413 	iscsit_check_conn_usage_count(conn);
4414 
4415 	ahash_request_free(conn->conn_tx_hash);
4416 	if (conn->conn_rx_hash) {
4417 		struct crypto_ahash *tfm;
4418 
4419 		tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
4420 		ahash_request_free(conn->conn_rx_hash);
4421 		crypto_free_ahash(tfm);
4422 	}
4423 
4424 	if (conn->sock)
4425 		sock_release(conn->sock);
4426 
4427 	if (conn->conn_transport->iscsit_free_conn)
4428 		conn->conn_transport->iscsit_free_conn(conn);
4429 
4430 	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4431 	conn->conn_state = TARG_CONN_STATE_FREE;
4432 	iscsit_free_conn(conn);
4433 
4434 	spin_lock_bh(&sess->conn_lock);
4435 	atomic_dec(&sess->nconn);
4436 	pr_debug("Decremented iSCSI connection count to %d from node:"
4437 		" %s\n", atomic_read(&sess->nconn),
4438 		sess->sess_ops->InitiatorName);
4439 	/*
4440 	 * Make sure that if one connection fails in an non ERL=2 iSCSI
4441 	 * Session that they all fail.
4442 	 */
4443 	if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4444 	     !atomic_read(&sess->session_logout))
4445 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4446 
4447 	/*
4448 	 * If this was not the last connection in the session, and we are
4449 	 * performing session reinstatement or falling back to ERL=0, call
4450 	 * iscsit_stop_session() without sleeping to shutdown the other
4451 	 * active connections.
4452 	 */
4453 	if (atomic_read(&sess->nconn)) {
4454 		if (!atomic_read(&sess->session_reinstatement) &&
4455 		    !atomic_read(&sess->session_fall_back_to_erl0)) {
4456 			spin_unlock_bh(&sess->conn_lock);
4457 			return 0;
4458 		}
4459 		if (!atomic_read(&sess->session_stop_active)) {
4460 			atomic_set(&sess->session_stop_active, 1);
4461 			spin_unlock_bh(&sess->conn_lock);
4462 			iscsit_stop_session(sess, 0, 0);
4463 			return 0;
4464 		}
4465 		spin_unlock_bh(&sess->conn_lock);
4466 		return 0;
4467 	}
4468 
4469 	/*
4470 	 * If this was the last connection in the session and one of the
4471 	 * following is occurring:
4472 	 *
4473 	 * Session Reinstatement is not being performed, and are falling back
4474 	 * to ERL=0 call iscsit_close_session().
4475 	 *
4476 	 * Session Logout was requested.  iscsit_close_session() will be called
4477 	 * elsewhere.
4478 	 *
4479 	 * Session Continuation is not being performed, start the Time2Retain
4480 	 * handler and check if sleep_on_sess_wait_sem is active.
4481 	 */
4482 	if (!atomic_read(&sess->session_reinstatement) &&
4483 	     atomic_read(&sess->session_fall_back_to_erl0)) {
4484 		spin_unlock_bh(&sess->conn_lock);
4485 		complete_all(&sess->session_wait_comp);
4486 		iscsit_close_session(sess, true);
4487 
4488 		return 0;
4489 	} else if (atomic_read(&sess->session_logout)) {
4490 		pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4491 		sess->session_state = TARG_SESS_STATE_FREE;
4492 
4493 		if (atomic_read(&sess->session_close)) {
4494 			spin_unlock_bh(&sess->conn_lock);
4495 			complete_all(&sess->session_wait_comp);
4496 			iscsit_close_session(sess, true);
4497 		} else {
4498 			spin_unlock_bh(&sess->conn_lock);
4499 		}
4500 
4501 		return 0;
4502 	} else {
4503 		pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4504 		sess->session_state = TARG_SESS_STATE_FAILED;
4505 
4506 		if (!atomic_read(&sess->session_continuation))
4507 			iscsit_start_time2retain_handler(sess);
4508 
4509 		if (atomic_read(&sess->session_close)) {
4510 			spin_unlock_bh(&sess->conn_lock);
4511 			complete_all(&sess->session_wait_comp);
4512 			iscsit_close_session(sess, true);
4513 		} else {
4514 			spin_unlock_bh(&sess->conn_lock);
4515 		}
4516 
4517 		return 0;
4518 	}
4519 }
4520 
4521 /*
4522  * If the iSCSI Session for the iSCSI Initiator Node exists,
4523  * forcefully shutdown the iSCSI NEXUS.
4524  */
4525 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
4526 {
4527 	struct iscsi_portal_group *tpg = sess->tpg;
4528 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4529 
4530 	if (atomic_read(&sess->nconn)) {
4531 		pr_err("%d connection(s) still exist for iSCSI session"
4532 			" to %s\n", atomic_read(&sess->nconn),
4533 			sess->sess_ops->InitiatorName);
4534 		BUG();
4535 	}
4536 
4537 	spin_lock_bh(&se_tpg->session_lock);
4538 	atomic_set(&sess->session_logout, 1);
4539 	atomic_set(&sess->session_reinstatement, 1);
4540 	iscsit_stop_time2retain_timer(sess);
4541 	spin_unlock_bh(&se_tpg->session_lock);
4542 
4543 	if (sess->sess_ops->ErrorRecoveryLevel == 2)
4544 		iscsit_free_connection_recovery_entries(sess);
4545 
4546 	/*
4547 	 * transport_deregister_session_configfs() will clear the
4548 	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4549 	 * can be setting it again with __transport_register_session() in
4550 	 * iscsi_post_login_handler() again after the iscsit_stop_session()
4551 	 * completes in iscsi_np context.
4552 	 */
4553 	transport_deregister_session_configfs(sess->se_sess);
4554 
4555 	/*
4556 	 * If any other processes are accessing this session pointer we must
4557 	 * wait until they have completed.  If we are in an interrupt (the
4558 	 * time2retain handler) and contain and active session usage count we
4559 	 * restart the timer and exit.
4560 	 */
4561 	if (iscsit_check_session_usage_count(sess, can_sleep)) {
4562 		atomic_set(&sess->session_logout, 0);
4563 		iscsit_start_time2retain_handler(sess);
4564 		return 0;
4565 	}
4566 
4567 	transport_deregister_session(sess->se_sess);
4568 
4569 	iscsit_free_all_ooo_cmdsns(sess);
4570 
4571 	spin_lock_bh(&se_tpg->session_lock);
4572 	pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4573 	sess->session_state = TARG_SESS_STATE_FREE;
4574 	pr_debug("Released iSCSI session from node: %s\n",
4575 			sess->sess_ops->InitiatorName);
4576 	tpg->nsessions--;
4577 	if (tpg->tpg_tiqn)
4578 		tpg->tpg_tiqn->tiqn_nsessions--;
4579 
4580 	pr_debug("Decremented number of active iSCSI Sessions on"
4581 		" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4582 
4583 	ida_free(&sess_ida, sess->session_index);
4584 	kfree(sess->sess_ops);
4585 	sess->sess_ops = NULL;
4586 	spin_unlock_bh(&se_tpg->session_lock);
4587 
4588 	kfree(sess);
4589 	return 0;
4590 }
4591 
4592 static void iscsit_logout_post_handler_closesession(
4593 	struct iscsit_conn *conn)
4594 {
4595 	struct iscsit_session *sess = conn->sess;
4596 	int sleep = 1;
4597 	/*
4598 	 * Traditional iscsi/tcp will invoke this logic from TX thread
4599 	 * context during session logout, so clear tx_thread_active and
4600 	 * sleep if iscsit_close_connection() has not already occured.
4601 	 *
4602 	 * Since iser-target invokes this logic from it's own workqueue,
4603 	 * always sleep waiting for RX/TX thread shutdown to complete
4604 	 * within iscsit_close_connection().
4605 	 */
4606 	if (!conn->conn_transport->rdma_shutdown) {
4607 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4608 		if (!sleep)
4609 			return;
4610 	}
4611 
4612 	atomic_set(&conn->conn_logout_remove, 0);
4613 	complete(&conn->conn_logout_comp);
4614 
4615 	iscsit_dec_conn_usage_count(conn);
4616 	atomic_set(&sess->session_close, 1);
4617 	iscsit_stop_session(sess, sleep, sleep);
4618 	iscsit_dec_session_usage_count(sess);
4619 }
4620 
4621 static void iscsit_logout_post_handler_samecid(
4622 	struct iscsit_conn *conn)
4623 {
4624 	int sleep = 1;
4625 
4626 	if (!conn->conn_transport->rdma_shutdown) {
4627 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
4628 		if (!sleep)
4629 			return;
4630 	}
4631 
4632 	atomic_set(&conn->conn_logout_remove, 0);
4633 	complete(&conn->conn_logout_comp);
4634 
4635 	iscsit_cause_connection_reinstatement(conn, sleep);
4636 	iscsit_dec_conn_usage_count(conn);
4637 }
4638 
4639 static void iscsit_logout_post_handler_diffcid(
4640 	struct iscsit_conn *conn,
4641 	u16 cid)
4642 {
4643 	struct iscsit_conn *l_conn;
4644 	struct iscsit_session *sess = conn->sess;
4645 	bool conn_found = false;
4646 
4647 	if (!sess)
4648 		return;
4649 
4650 	spin_lock_bh(&sess->conn_lock);
4651 	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4652 		if (l_conn->cid == cid) {
4653 			iscsit_inc_conn_usage_count(l_conn);
4654 			conn_found = true;
4655 			break;
4656 		}
4657 	}
4658 	spin_unlock_bh(&sess->conn_lock);
4659 
4660 	if (!conn_found)
4661 		return;
4662 
4663 	if (l_conn->sock)
4664 		l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4665 
4666 	spin_lock_bh(&l_conn->state_lock);
4667 	pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4668 	l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4669 	spin_unlock_bh(&l_conn->state_lock);
4670 
4671 	iscsit_cause_connection_reinstatement(l_conn, 1);
4672 	iscsit_dec_conn_usage_count(l_conn);
4673 }
4674 
4675 /*
4676  *	Return of 0 causes the TX thread to restart.
4677  */
4678 int iscsit_logout_post_handler(
4679 	struct iscsit_cmd *cmd,
4680 	struct iscsit_conn *conn)
4681 {
4682 	int ret = 0;
4683 
4684 	switch (cmd->logout_reason) {
4685 	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4686 		switch (cmd->logout_response) {
4687 		case ISCSI_LOGOUT_SUCCESS:
4688 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4689 		default:
4690 			iscsit_logout_post_handler_closesession(conn);
4691 			break;
4692 		}
4693 		break;
4694 	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4695 		if (conn->cid == cmd->logout_cid) {
4696 			switch (cmd->logout_response) {
4697 			case ISCSI_LOGOUT_SUCCESS:
4698 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4699 			default:
4700 				iscsit_logout_post_handler_samecid(conn);
4701 				break;
4702 			}
4703 		} else {
4704 			switch (cmd->logout_response) {
4705 			case ISCSI_LOGOUT_SUCCESS:
4706 				iscsit_logout_post_handler_diffcid(conn,
4707 					cmd->logout_cid);
4708 				break;
4709 			case ISCSI_LOGOUT_CID_NOT_FOUND:
4710 			case ISCSI_LOGOUT_CLEANUP_FAILED:
4711 			default:
4712 				break;
4713 			}
4714 			ret = 1;
4715 		}
4716 		break;
4717 	case ISCSI_LOGOUT_REASON_RECOVERY:
4718 		switch (cmd->logout_response) {
4719 		case ISCSI_LOGOUT_SUCCESS:
4720 		case ISCSI_LOGOUT_CID_NOT_FOUND:
4721 		case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4722 		case ISCSI_LOGOUT_CLEANUP_FAILED:
4723 		default:
4724 			break;
4725 		}
4726 		ret = 1;
4727 		break;
4728 	default:
4729 		break;
4730 
4731 	}
4732 	return ret;
4733 }
4734 EXPORT_SYMBOL(iscsit_logout_post_handler);
4735 
4736 void iscsit_fail_session(struct iscsit_session *sess)
4737 {
4738 	struct iscsit_conn *conn;
4739 
4740 	spin_lock_bh(&sess->conn_lock);
4741 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4742 		pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4743 		conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4744 	}
4745 	spin_unlock_bh(&sess->conn_lock);
4746 
4747 	pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4748 	sess->session_state = TARG_SESS_STATE_FAILED;
4749 }
4750 
4751 void iscsit_stop_session(
4752 	struct iscsit_session *sess,
4753 	int session_sleep,
4754 	int connection_sleep)
4755 {
4756 	u16 conn_count = atomic_read(&sess->nconn);
4757 	struct iscsit_conn *conn, *conn_tmp = NULL;
4758 	int is_last;
4759 
4760 	spin_lock_bh(&sess->conn_lock);
4761 
4762 	if (connection_sleep) {
4763 		list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4764 				conn_list) {
4765 			if (conn_count == 0)
4766 				break;
4767 
4768 			if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4769 				is_last = 1;
4770 			} else {
4771 				iscsit_inc_conn_usage_count(conn_tmp);
4772 				is_last = 0;
4773 			}
4774 			iscsit_inc_conn_usage_count(conn);
4775 
4776 			spin_unlock_bh(&sess->conn_lock);
4777 			iscsit_cause_connection_reinstatement(conn, 1);
4778 			spin_lock_bh(&sess->conn_lock);
4779 
4780 			iscsit_dec_conn_usage_count(conn);
4781 			if (is_last == 0)
4782 				iscsit_dec_conn_usage_count(conn_tmp);
4783 			conn_count--;
4784 		}
4785 	} else {
4786 		list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4787 			iscsit_cause_connection_reinstatement(conn, 0);
4788 	}
4789 
4790 	if (session_sleep && atomic_read(&sess->nconn)) {
4791 		spin_unlock_bh(&sess->conn_lock);
4792 		wait_for_completion(&sess->session_wait_comp);
4793 	} else
4794 		spin_unlock_bh(&sess->conn_lock);
4795 }
4796 
4797 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4798 {
4799 	struct iscsit_session *sess;
4800 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4801 	struct se_session *se_sess, *se_sess_tmp;
4802 	LIST_HEAD(free_list);
4803 	int session_count = 0;
4804 
4805 	spin_lock_bh(&se_tpg->session_lock);
4806 	if (tpg->nsessions && !force) {
4807 		spin_unlock_bh(&se_tpg->session_lock);
4808 		return -1;
4809 	}
4810 
4811 	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4812 			sess_list) {
4813 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4814 
4815 		spin_lock(&sess->conn_lock);
4816 		if (atomic_read(&sess->session_fall_back_to_erl0) ||
4817 		    atomic_read(&sess->session_logout) ||
4818 		    atomic_read(&sess->session_close) ||
4819 		    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4820 			spin_unlock(&sess->conn_lock);
4821 			continue;
4822 		}
4823 		iscsit_inc_session_usage_count(sess);
4824 		atomic_set(&sess->session_reinstatement, 1);
4825 		atomic_set(&sess->session_fall_back_to_erl0, 1);
4826 		atomic_set(&sess->session_close, 1);
4827 		spin_unlock(&sess->conn_lock);
4828 
4829 		list_move_tail(&se_sess->sess_list, &free_list);
4830 	}
4831 	spin_unlock_bh(&se_tpg->session_lock);
4832 
4833 	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4834 		sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
4835 
4836 		list_del_init(&se_sess->sess_list);
4837 		iscsit_stop_session(sess, 1, 1);
4838 		iscsit_dec_session_usage_count(sess);
4839 		session_count++;
4840 	}
4841 
4842 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
4843 			" Group: %hu\n", session_count, tpg->tpgt);
4844 	return 0;
4845 }
4846 
4847 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4848 MODULE_VERSION("4.1.x");
4849 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4850 MODULE_LICENSE("GPL");
4851 
4852 module_init(iscsi_target_init_module);
4853 module_exit(iscsi_target_cleanup_module);
4854