1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_device.c (based on iscsi_target_device.c)
4  *
5  * This file contains the TCM Virtual Device and Disk Transport
6  * agnostic related functions.
7  *
8  * (c) Copyright 2003-2013 Datera, Inc.
9  *
10  * Nicholas A. Bellinger <nab@kernel.org>
11  *
12  ******************************************************************************/
13 
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/in.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
25 #include <net/sock.h>
26 #include <net/tcp.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
29 
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
33 
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
38 
39 static DEFINE_MUTEX(device_mutex);
40 static LIST_HEAD(device_list);
41 static DEFINE_IDR(devices_idr);
42 
43 static struct se_hba *lun0_hba;
44 /* not static, needed by tpg.c */
45 struct se_device *g_lun0_dev;
46 
47 sense_reason_t
transport_lookup_cmd_lun(struct se_cmd * se_cmd)48 transport_lookup_cmd_lun(struct se_cmd *se_cmd)
49 {
50 	struct se_lun *se_lun = NULL;
51 	struct se_session *se_sess = se_cmd->se_sess;
52 	struct se_node_acl *nacl = se_sess->se_node_acl;
53 	struct se_dev_entry *deve;
54 	sense_reason_t ret = TCM_NO_SENSE;
55 
56 	rcu_read_lock();
57 	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
58 	if (deve) {
59 		atomic_long_inc(&deve->total_cmds);
60 
61 		if (se_cmd->data_direction == DMA_TO_DEVICE)
62 			atomic_long_add(se_cmd->data_length,
63 					&deve->write_bytes);
64 		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 			atomic_long_add(se_cmd->data_length,
66 					&deve->read_bytes);
67 
68 		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
69 		    deve->lun_access_ro) {
70 			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
71 				" Access for 0x%08llx\n",
72 				se_cmd->se_tfo->fabric_name,
73 				se_cmd->orig_fe_lun);
74 			rcu_read_unlock();
75 			return TCM_WRITE_PROTECTED;
76 		}
77 
78 		se_lun = rcu_dereference(deve->se_lun);
79 
80 		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
81 			se_lun = NULL;
82 			goto out_unlock;
83 		}
84 
85 		se_cmd->se_lun = se_lun;
86 		se_cmd->pr_res_key = deve->pr_res_key;
87 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
88 		se_cmd->lun_ref_active = true;
89 	}
90 out_unlock:
91 	rcu_read_unlock();
92 
93 	if (!se_lun) {
94 		/*
95 		 * Use the se_portal_group->tpg_virt_lun0 to allow for
96 		 * REPORT_LUNS, et al to be returned when no active
97 		 * MappedLUN=0 exists for this Initiator Port.
98 		 */
99 		if (se_cmd->orig_fe_lun != 0) {
100 			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
101 				" Access for 0x%08llx from %s\n",
102 				se_cmd->se_tfo->fabric_name,
103 				se_cmd->orig_fe_lun,
104 				nacl->initiatorname);
105 			return TCM_NON_EXISTENT_LUN;
106 		}
107 
108 		/*
109 		 * Force WRITE PROTECT for virtual LUN 0
110 		 */
111 		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 		    (se_cmd->data_direction != DMA_NONE))
113 			return TCM_WRITE_PROTECTED;
114 
115 		se_lun = se_sess->se_tpg->tpg_virt_lun0;
116 		if (!percpu_ref_tryget_live(&se_lun->lun_ref))
117 			return TCM_NON_EXISTENT_LUN;
118 
119 		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
121 		se_cmd->lun_ref_active = true;
122 	}
123 	/*
124 	 * RCU reference protected by percpu se_lun->lun_ref taken above that
125 	 * must drop to zero (including initial reference) before this se_lun
126 	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
127 	 * target_core_fabric_configfs.c:target_fabric_port_release
128 	 */
129 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
130 	atomic_long_inc(&se_cmd->se_dev->num_cmds);
131 
132 	if (se_cmd->data_direction == DMA_TO_DEVICE)
133 		atomic_long_add(se_cmd->data_length,
134 				&se_cmd->se_dev->write_bytes);
135 	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
136 		atomic_long_add(se_cmd->data_length,
137 				&se_cmd->se_dev->read_bytes);
138 
139 	return ret;
140 }
141 EXPORT_SYMBOL(transport_lookup_cmd_lun);
142 
transport_lookup_tmr_lun(struct se_cmd * se_cmd)143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
144 {
145 	struct se_dev_entry *deve;
146 	struct se_lun *se_lun = NULL;
147 	struct se_session *se_sess = se_cmd->se_sess;
148 	struct se_node_acl *nacl = se_sess->se_node_acl;
149 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
150 	unsigned long flags;
151 
152 	rcu_read_lock();
153 	deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
154 	if (deve) {
155 		se_lun = rcu_dereference(deve->se_lun);
156 
157 		if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
158 			se_lun = NULL;
159 			goto out_unlock;
160 		}
161 
162 		se_cmd->se_lun = se_lun;
163 		se_cmd->pr_res_key = deve->pr_res_key;
164 		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
165 		se_cmd->lun_ref_active = true;
166 	}
167 out_unlock:
168 	rcu_read_unlock();
169 
170 	if (!se_lun) {
171 		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
172 			" Access for 0x%08llx for %s\n",
173 			se_cmd->se_tfo->fabric_name,
174 			se_cmd->orig_fe_lun,
175 			nacl->initiatorname);
176 		return -ENODEV;
177 	}
178 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
179 	se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
180 
181 	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
182 	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
183 	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
184 
185 	return 0;
186 }
187 EXPORT_SYMBOL(transport_lookup_tmr_lun);
188 
target_lun_is_rdonly(struct se_cmd * cmd)189 bool target_lun_is_rdonly(struct se_cmd *cmd)
190 {
191 	struct se_session *se_sess = cmd->se_sess;
192 	struct se_dev_entry *deve;
193 	bool ret;
194 
195 	rcu_read_lock();
196 	deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
197 	ret = deve && deve->lun_access_ro;
198 	rcu_read_unlock();
199 
200 	return ret;
201 }
202 EXPORT_SYMBOL(target_lun_is_rdonly);
203 
204 /*
205  * This function is called from core_scsi3_emulate_pro_register_and_move()
206  * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
207  * when a matching rtpi is found.
208  */
core_get_se_deve_from_rtpi(struct se_node_acl * nacl,u16 rtpi)209 struct se_dev_entry *core_get_se_deve_from_rtpi(
210 	struct se_node_acl *nacl,
211 	u16 rtpi)
212 {
213 	struct se_dev_entry *deve;
214 	struct se_lun *lun;
215 	struct se_portal_group *tpg = nacl->se_tpg;
216 
217 	rcu_read_lock();
218 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
219 		lun = rcu_dereference(deve->se_lun);
220 		if (!lun) {
221 			pr_err("%s device entries device pointer is"
222 				" NULL, but Initiator has access.\n",
223 				tpg->se_tpg_tfo->fabric_name);
224 			continue;
225 		}
226 		if (lun->lun_rtpi != rtpi)
227 			continue;
228 
229 		kref_get(&deve->pr_kref);
230 		rcu_read_unlock();
231 
232 		return deve;
233 	}
234 	rcu_read_unlock();
235 
236 	return NULL;
237 }
238 
core_free_device_list_for_node(struct se_node_acl * nacl,struct se_portal_group * tpg)239 void core_free_device_list_for_node(
240 	struct se_node_acl *nacl,
241 	struct se_portal_group *tpg)
242 {
243 	struct se_dev_entry *deve;
244 
245 	mutex_lock(&nacl->lun_entry_mutex);
246 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
247 		struct se_lun *lun = rcu_dereference_check(deve->se_lun,
248 					lockdep_is_held(&nacl->lun_entry_mutex));
249 		core_disable_device_list_for_node(lun, deve, nacl, tpg);
250 	}
251 	mutex_unlock(&nacl->lun_entry_mutex);
252 }
253 
core_update_device_list_access(u64 mapped_lun,bool lun_access_ro,struct se_node_acl * nacl)254 void core_update_device_list_access(
255 	u64 mapped_lun,
256 	bool lun_access_ro,
257 	struct se_node_acl *nacl)
258 {
259 	struct se_dev_entry *deve;
260 
261 	mutex_lock(&nacl->lun_entry_mutex);
262 	deve = target_nacl_find_deve(nacl, mapped_lun);
263 	if (deve)
264 		deve->lun_access_ro = lun_access_ro;
265 	mutex_unlock(&nacl->lun_entry_mutex);
266 }
267 
268 /*
269  * Called with rcu_read_lock or nacl->device_list_lock held.
270  */
target_nacl_find_deve(struct se_node_acl * nacl,u64 mapped_lun)271 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
272 {
273 	struct se_dev_entry *deve;
274 
275 	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
276 		if (deve->mapped_lun == mapped_lun)
277 			return deve;
278 
279 	return NULL;
280 }
281 EXPORT_SYMBOL(target_nacl_find_deve);
282 
target_pr_kref_release(struct kref * kref)283 void target_pr_kref_release(struct kref *kref)
284 {
285 	struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
286 						 pr_kref);
287 	complete(&deve->pr_comp);
288 }
289 
290 static void
target_luns_data_has_changed(struct se_node_acl * nacl,struct se_dev_entry * new,bool skip_new)291 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
292 			     bool skip_new)
293 {
294 	struct se_dev_entry *tmp;
295 
296 	rcu_read_lock();
297 	hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
298 		if (skip_new && tmp == new)
299 			continue;
300 		core_scsi3_ua_allocate(tmp, 0x3F,
301 				       ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
302 	}
303 	rcu_read_unlock();
304 }
305 
core_enable_device_list_for_node(struct se_lun * lun,struct se_lun_acl * lun_acl,u64 mapped_lun,bool lun_access_ro,struct se_node_acl * nacl,struct se_portal_group * tpg)306 int core_enable_device_list_for_node(
307 	struct se_lun *lun,
308 	struct se_lun_acl *lun_acl,
309 	u64 mapped_lun,
310 	bool lun_access_ro,
311 	struct se_node_acl *nacl,
312 	struct se_portal_group *tpg)
313 {
314 	struct se_dev_entry *orig, *new;
315 
316 	new = kzalloc(sizeof(*new), GFP_KERNEL);
317 	if (!new) {
318 		pr_err("Unable to allocate se_dev_entry memory\n");
319 		return -ENOMEM;
320 	}
321 
322 	spin_lock_init(&new->ua_lock);
323 	INIT_LIST_HEAD(&new->ua_list);
324 	INIT_LIST_HEAD(&new->lun_link);
325 
326 	new->mapped_lun = mapped_lun;
327 	kref_init(&new->pr_kref);
328 	init_completion(&new->pr_comp);
329 
330 	new->lun_access_ro = lun_access_ro;
331 	new->creation_time = get_jiffies_64();
332 	new->attach_count++;
333 
334 	mutex_lock(&nacl->lun_entry_mutex);
335 	orig = target_nacl_find_deve(nacl, mapped_lun);
336 	if (orig && orig->se_lun) {
337 		struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
338 					lockdep_is_held(&nacl->lun_entry_mutex));
339 
340 		if (orig_lun != lun) {
341 			pr_err("Existing orig->se_lun doesn't match new lun"
342 			       " for dynamic -> explicit NodeACL conversion:"
343 				" %s\n", nacl->initiatorname);
344 			mutex_unlock(&nacl->lun_entry_mutex);
345 			kfree(new);
346 			return -EINVAL;
347 		}
348 		if (orig->se_lun_acl != NULL) {
349 			pr_warn_ratelimited("Detected existing explicit"
350 				" se_lun_acl->se_lun_group reference for %s"
351 				" mapped_lun: %llu, failing\n",
352 				 nacl->initiatorname, mapped_lun);
353 			mutex_unlock(&nacl->lun_entry_mutex);
354 			kfree(new);
355 			return -EINVAL;
356 		}
357 
358 		rcu_assign_pointer(new->se_lun, lun);
359 		rcu_assign_pointer(new->se_lun_acl, lun_acl);
360 		hlist_del_rcu(&orig->link);
361 		hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
362 		mutex_unlock(&nacl->lun_entry_mutex);
363 
364 		spin_lock(&lun->lun_deve_lock);
365 		list_del(&orig->lun_link);
366 		list_add_tail(&new->lun_link, &lun->lun_deve_list);
367 		spin_unlock(&lun->lun_deve_lock);
368 
369 		kref_put(&orig->pr_kref, target_pr_kref_release);
370 		wait_for_completion(&orig->pr_comp);
371 
372 		target_luns_data_has_changed(nacl, new, true);
373 		kfree_rcu(orig, rcu_head);
374 		return 0;
375 	}
376 
377 	rcu_assign_pointer(new->se_lun, lun);
378 	rcu_assign_pointer(new->se_lun_acl, lun_acl);
379 	hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
380 	mutex_unlock(&nacl->lun_entry_mutex);
381 
382 	spin_lock(&lun->lun_deve_lock);
383 	list_add_tail(&new->lun_link, &lun->lun_deve_list);
384 	spin_unlock(&lun->lun_deve_lock);
385 
386 	target_luns_data_has_changed(nacl, new, true);
387 	return 0;
388 }
389 
core_disable_device_list_for_node(struct se_lun * lun,struct se_dev_entry * orig,struct se_node_acl * nacl,struct se_portal_group * tpg)390 void core_disable_device_list_for_node(
391 	struct se_lun *lun,
392 	struct se_dev_entry *orig,
393 	struct se_node_acl *nacl,
394 	struct se_portal_group *tpg)
395 {
396 	/*
397 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
398 	 * reference to se_device->dev_group.
399 	 */
400 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
401 
402 	lockdep_assert_held(&nacl->lun_entry_mutex);
403 
404 	/*
405 	 * If the MappedLUN entry is being disabled, the entry in
406 	 * lun->lun_deve_list must be removed now before clearing the
407 	 * struct se_dev_entry pointers below as logic in
408 	 * core_alua_do_transition_tg_pt() depends on these being present.
409 	 *
410 	 * deve->se_lun_acl will be NULL for demo-mode created LUNs
411 	 * that have not been explicitly converted to MappedLUNs ->
412 	 * struct se_lun_acl, but we remove deve->lun_link from
413 	 * lun->lun_deve_list. This also means that active UAs and
414 	 * NodeACL context specific PR metadata for demo-mode
415 	 * MappedLUN *deve will be released below..
416 	 */
417 	spin_lock(&lun->lun_deve_lock);
418 	list_del(&orig->lun_link);
419 	spin_unlock(&lun->lun_deve_lock);
420 	/*
421 	 * Disable struct se_dev_entry LUN ACL mapping
422 	 */
423 	core_scsi3_ua_release_all(orig);
424 
425 	hlist_del_rcu(&orig->link);
426 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
427 	orig->lun_access_ro = false;
428 	orig->creation_time = 0;
429 	orig->attach_count--;
430 	/*
431 	 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
432 	 * or REGISTER_AND_MOVE PR operation to complete.
433 	 */
434 	kref_put(&orig->pr_kref, target_pr_kref_release);
435 	wait_for_completion(&orig->pr_comp);
436 
437 	rcu_assign_pointer(orig->se_lun, NULL);
438 	rcu_assign_pointer(orig->se_lun_acl, NULL);
439 
440 	kfree_rcu(orig, rcu_head);
441 
442 	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
443 	target_luns_data_has_changed(nacl, NULL, false);
444 }
445 
446 /*      core_clear_lun_from_tpg():
447  *
448  *
449  */
core_clear_lun_from_tpg(struct se_lun * lun,struct se_portal_group * tpg)450 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
451 {
452 	struct se_node_acl *nacl;
453 	struct se_dev_entry *deve;
454 
455 	mutex_lock(&tpg->acl_node_mutex);
456 	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
457 
458 		mutex_lock(&nacl->lun_entry_mutex);
459 		hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
460 			struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
461 					lockdep_is_held(&nacl->lun_entry_mutex));
462 
463 			if (lun != tmp_lun)
464 				continue;
465 
466 			core_disable_device_list_for_node(lun, deve, nacl, tpg);
467 		}
468 		mutex_unlock(&nacl->lun_entry_mutex);
469 	}
470 	mutex_unlock(&tpg->acl_node_mutex);
471 }
472 
core_alloc_rtpi(struct se_lun * lun,struct se_device * dev)473 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
474 {
475 	struct se_lun *tmp;
476 
477 	spin_lock(&dev->se_port_lock);
478 	if (dev->export_count == 0x0000ffff) {
479 		pr_warn("Reached dev->dev_port_count =="
480 				" 0x0000ffff\n");
481 		spin_unlock(&dev->se_port_lock);
482 		return -ENOSPC;
483 	}
484 again:
485 	/*
486 	 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
487 	 * Here is the table from spc4r17 section 7.7.3.8.
488 	 *
489 	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
490 	 *
491 	 * Code      Description
492 	 * 0h        Reserved
493 	 * 1h        Relative port 1, historically known as port A
494 	 * 2h        Relative port 2, historically known as port B
495 	 * 3h to FFFFh    Relative port 3 through 65 535
496 	 */
497 	lun->lun_rtpi = dev->dev_rpti_counter++;
498 	if (!lun->lun_rtpi)
499 		goto again;
500 
501 	list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
502 		/*
503 		 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
504 		 * for 16-bit wrap..
505 		 */
506 		if (lun->lun_rtpi == tmp->lun_rtpi)
507 			goto again;
508 	}
509 	spin_unlock(&dev->se_port_lock);
510 
511 	return 0;
512 }
513 
se_release_vpd_for_dev(struct se_device * dev)514 static void se_release_vpd_for_dev(struct se_device *dev)
515 {
516 	struct t10_vpd *vpd, *vpd_tmp;
517 
518 	spin_lock(&dev->t10_wwn.t10_vpd_lock);
519 	list_for_each_entry_safe(vpd, vpd_tmp,
520 			&dev->t10_wwn.t10_vpd_list, vpd_list) {
521 		list_del(&vpd->vpd_list);
522 		kfree(vpd);
523 	}
524 	spin_unlock(&dev->t10_wwn.t10_vpd_lock);
525 }
526 
se_dev_align_max_sectors(u32 max_sectors,u32 block_size)527 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
528 {
529 	u32 aligned_max_sectors;
530 	u32 alignment;
531 	/*
532 	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
533 	 * transport_allocate_data_tasks() operation.
534 	 */
535 	alignment = max(1ul, PAGE_SIZE / block_size);
536 	aligned_max_sectors = rounddown(max_sectors, alignment);
537 
538 	if (max_sectors != aligned_max_sectors)
539 		pr_info("Rounding down aligned max_sectors from %u to %u\n",
540 			max_sectors, aligned_max_sectors);
541 
542 	return aligned_max_sectors;
543 }
544 
core_dev_add_lun(struct se_portal_group * tpg,struct se_device * dev,struct se_lun * lun)545 int core_dev_add_lun(
546 	struct se_portal_group *tpg,
547 	struct se_device *dev,
548 	struct se_lun *lun)
549 {
550 	int rc;
551 
552 	rc = core_tpg_add_lun(tpg, lun, false, dev);
553 	if (rc < 0)
554 		return rc;
555 
556 	pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
557 		" CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
558 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
559 		tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
560 	/*
561 	 * Update LUN maps for dynamically added initiators when
562 	 * generate_node_acl is enabled.
563 	 */
564 	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
565 		struct se_node_acl *acl;
566 
567 		mutex_lock(&tpg->acl_node_mutex);
568 		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
569 			if (acl->dynamic_node_acl &&
570 			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
571 			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
572 				core_tpg_add_node_to_devs(acl, tpg, lun);
573 			}
574 		}
575 		mutex_unlock(&tpg->acl_node_mutex);
576 	}
577 
578 	return 0;
579 }
580 
581 /*      core_dev_del_lun():
582  *
583  *
584  */
core_dev_del_lun(struct se_portal_group * tpg,struct se_lun * lun)585 void core_dev_del_lun(
586 	struct se_portal_group *tpg,
587 	struct se_lun *lun)
588 {
589 	pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
590 		" device object\n", tpg->se_tpg_tfo->fabric_name,
591 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
592 		tpg->se_tpg_tfo->fabric_name);
593 
594 	core_tpg_remove_lun(tpg, lun);
595 }
596 
core_dev_init_initiator_node_lun_acl(struct se_portal_group * tpg,struct se_node_acl * nacl,u64 mapped_lun,int * ret)597 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
598 	struct se_portal_group *tpg,
599 	struct se_node_acl *nacl,
600 	u64 mapped_lun,
601 	int *ret)
602 {
603 	struct se_lun_acl *lacl;
604 
605 	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
606 		pr_err("%s InitiatorName exceeds maximum size.\n",
607 			tpg->se_tpg_tfo->fabric_name);
608 		*ret = -EOVERFLOW;
609 		return NULL;
610 	}
611 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
612 	if (!lacl) {
613 		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
614 		*ret = -ENOMEM;
615 		return NULL;
616 	}
617 
618 	lacl->mapped_lun = mapped_lun;
619 	lacl->se_lun_nacl = nacl;
620 
621 	return lacl;
622 }
623 
core_dev_add_initiator_node_lun_acl(struct se_portal_group * tpg,struct se_lun_acl * lacl,struct se_lun * lun,bool lun_access_ro)624 int core_dev_add_initiator_node_lun_acl(
625 	struct se_portal_group *tpg,
626 	struct se_lun_acl *lacl,
627 	struct se_lun *lun,
628 	bool lun_access_ro)
629 {
630 	struct se_node_acl *nacl = lacl->se_lun_nacl;
631 	/*
632 	 * rcu_dereference_raw protected by se_lun->lun_group symlink
633 	 * reference to se_device->dev_group.
634 	 */
635 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
636 
637 	if (!nacl)
638 		return -EINVAL;
639 
640 	if (lun->lun_access_ro)
641 		lun_access_ro = true;
642 
643 	lacl->se_lun = lun;
644 
645 	if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
646 			lun_access_ro, nacl, tpg) < 0)
647 		return -EINVAL;
648 
649 	pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
650 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
651 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
652 		lun_access_ro ? "RO" : "RW",
653 		nacl->initiatorname);
654 	/*
655 	 * Check to see if there are any existing persistent reservation APTPL
656 	 * pre-registrations that need to be enabled for this LUN ACL..
657 	 */
658 	core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
659 					    lacl->mapped_lun);
660 	return 0;
661 }
662 
core_dev_del_initiator_node_lun_acl(struct se_lun * lun,struct se_lun_acl * lacl)663 int core_dev_del_initiator_node_lun_acl(
664 	struct se_lun *lun,
665 	struct se_lun_acl *lacl)
666 {
667 	struct se_portal_group *tpg = lun->lun_tpg;
668 	struct se_node_acl *nacl;
669 	struct se_dev_entry *deve;
670 
671 	nacl = lacl->se_lun_nacl;
672 	if (!nacl)
673 		return -EINVAL;
674 
675 	mutex_lock(&nacl->lun_entry_mutex);
676 	deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
677 	if (deve)
678 		core_disable_device_list_for_node(lun, deve, nacl, tpg);
679 	mutex_unlock(&nacl->lun_entry_mutex);
680 
681 	pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
682 		" InitiatorNode: %s Mapped LUN: %llu\n",
683 		tpg->se_tpg_tfo->fabric_name,
684 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
685 		nacl->initiatorname, lacl->mapped_lun);
686 
687 	return 0;
688 }
689 
core_dev_free_initiator_node_lun_acl(struct se_portal_group * tpg,struct se_lun_acl * lacl)690 void core_dev_free_initiator_node_lun_acl(
691 	struct se_portal_group *tpg,
692 	struct se_lun_acl *lacl)
693 {
694 	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
695 		" Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
696 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
697 		tpg->se_tpg_tfo->fabric_name,
698 		lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
699 
700 	kfree(lacl);
701 }
702 
scsi_dump_inquiry(struct se_device * dev)703 static void scsi_dump_inquiry(struct se_device *dev)
704 {
705 	struct t10_wwn *wwn = &dev->t10_wwn;
706 	int device_type = dev->transport->get_device_type(dev);
707 
708 	/*
709 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
710 	 */
711 	pr_debug("  Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
712 		wwn->vendor);
713 	pr_debug("  Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
714 		wwn->model);
715 	pr_debug("  Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
716 		wwn->revision);
717 	pr_debug("  Type:   %s ", scsi_device_type(device_type));
718 }
719 
target_alloc_device(struct se_hba * hba,const char * name)720 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
721 {
722 	struct se_device *dev;
723 	struct se_lun *xcopy_lun;
724 	int i;
725 
726 	dev = hba->backend->ops->alloc_device(hba, name);
727 	if (!dev)
728 		return NULL;
729 
730 	dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
731 	if (!dev->queues) {
732 		dev->transport->free_device(dev);
733 		return NULL;
734 	}
735 
736 	dev->queue_cnt = nr_cpu_ids;
737 	for (i = 0; i < dev->queue_cnt; i++) {
738 		struct se_device_queue *q;
739 
740 		q = &dev->queues[i];
741 		INIT_LIST_HEAD(&q->state_list);
742 		spin_lock_init(&q->lock);
743 
744 		init_llist_head(&q->sq.cmd_list);
745 		INIT_WORK(&q->sq.work, target_queued_submit_work);
746 	}
747 
748 	dev->se_hba = hba;
749 	dev->transport = hba->backend->ops;
750 	dev->transport_flags = dev->transport->transport_flags_default;
751 	dev->prot_length = sizeof(struct t10_pi_tuple);
752 	dev->hba_index = hba->hba_index;
753 
754 	INIT_LIST_HEAD(&dev->dev_sep_list);
755 	INIT_LIST_HEAD(&dev->dev_tmr_list);
756 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
757 	INIT_LIST_HEAD(&dev->qf_cmd_list);
758 	spin_lock_init(&dev->delayed_cmd_lock);
759 	spin_lock_init(&dev->dev_reservation_lock);
760 	spin_lock_init(&dev->se_port_lock);
761 	spin_lock_init(&dev->se_tmr_lock);
762 	spin_lock_init(&dev->qf_cmd_lock);
763 	sema_init(&dev->caw_sem, 1);
764 	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
765 	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
766 	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
767 	INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
768 	spin_lock_init(&dev->t10_pr.registration_lock);
769 	spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
770 	INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
771 	spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
772 	INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
773 	spin_lock_init(&dev->t10_alua.lba_map_lock);
774 
775 	dev->t10_wwn.t10_dev = dev;
776 	dev->t10_alua.t10_dev = dev;
777 
778 	dev->dev_attrib.da_dev = dev;
779 	dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
780 	dev->dev_attrib.emulate_dpo = 1;
781 	dev->dev_attrib.emulate_fua_write = 1;
782 	dev->dev_attrib.emulate_fua_read = 1;
783 	dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
784 	dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
785 	dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
786 	dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
787 	dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
788 	dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
789 	dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
790 	dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
791 	dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
792 	dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
793 	dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
794 	dev->dev_attrib.is_nonrot = DA_IS_NONROT;
795 	dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
796 	dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
797 	dev->dev_attrib.max_unmap_block_desc_count =
798 		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
799 	dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
800 	dev->dev_attrib.unmap_granularity_alignment =
801 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
802 	dev->dev_attrib.unmap_zeroes_data =
803 				DA_UNMAP_ZEROES_DATA_DEFAULT;
804 	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
805 
806 	xcopy_lun = &dev->xcopy_lun;
807 	rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
808 	init_completion(&xcopy_lun->lun_shutdown_comp);
809 	INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
810 	INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
811 	mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
812 	xcopy_lun->lun_tpg = &xcopy_pt_tpg;
813 
814 	/* Preload the default INQUIRY const values */
815 	strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
816 	strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
817 		sizeof(dev->t10_wwn.model));
818 	strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
819 		sizeof(dev->t10_wwn.revision));
820 
821 	return dev;
822 }
823 
824 /*
825  * Check if the underlying struct block_device request_queue supports
826  * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
827  * in ATA and we need to set TPE=1
828  */
target_configure_unmap_from_queue(struct se_dev_attrib * attrib,struct request_queue * q)829 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
830 				       struct request_queue *q)
831 {
832 	int block_size = queue_logical_block_size(q);
833 
834 	if (!blk_queue_discard(q))
835 		return false;
836 
837 	attrib->max_unmap_lba_count =
838 		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
839 	/*
840 	 * Currently hardcoded to 1 in Linux/SCSI code..
841 	 */
842 	attrib->max_unmap_block_desc_count = 1;
843 	attrib->unmap_granularity = q->limits.discard_granularity / block_size;
844 	attrib->unmap_granularity_alignment = q->limits.discard_alignment /
845 								block_size;
846 	attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
847 	return true;
848 }
849 EXPORT_SYMBOL(target_configure_unmap_from_queue);
850 
851 /*
852  * Convert from blocksize advertised to the initiator to the 512 byte
853  * units unconditionally used by the Linux block layer.
854  */
target_to_linux_sector(struct se_device * dev,sector_t lb)855 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
856 {
857 	switch (dev->dev_attrib.block_size) {
858 	case 4096:
859 		return lb << 3;
860 	case 2048:
861 		return lb << 2;
862 	case 1024:
863 		return lb << 1;
864 	default:
865 		return lb;
866 	}
867 }
868 EXPORT_SYMBOL(target_to_linux_sector);
869 
870 struct devices_idr_iter {
871 	struct config_item *prev_item;
872 	int (*fn)(struct se_device *dev, void *data);
873 	void *data;
874 };
875 
target_devices_idr_iter(int id,void * p,void * data)876 static int target_devices_idr_iter(int id, void *p, void *data)
877 	 __must_hold(&device_mutex)
878 {
879 	struct devices_idr_iter *iter = data;
880 	struct se_device *dev = p;
881 	int ret;
882 
883 	config_item_put(iter->prev_item);
884 	iter->prev_item = NULL;
885 
886 	/*
887 	 * We add the device early to the idr, so it can be used
888 	 * by backend modules during configuration. We do not want
889 	 * to allow other callers to access partially setup devices,
890 	 * so we skip them here.
891 	 */
892 	if (!target_dev_configured(dev))
893 		return 0;
894 
895 	iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
896 	if (!iter->prev_item)
897 		return 0;
898 	mutex_unlock(&device_mutex);
899 
900 	ret = iter->fn(dev, iter->data);
901 
902 	mutex_lock(&device_mutex);
903 	return ret;
904 }
905 
906 /**
907  * target_for_each_device - iterate over configured devices
908  * @fn: iterator function
909  * @data: pointer to data that will be passed to fn
910  *
911  * fn must return 0 to continue looping over devices. non-zero will break
912  * from the loop and return that value to the caller.
913  */
target_for_each_device(int (* fn)(struct se_device * dev,void * data),void * data)914 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
915 			   void *data)
916 {
917 	struct devices_idr_iter iter = { .fn = fn, .data = data };
918 	int ret;
919 
920 	mutex_lock(&device_mutex);
921 	ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
922 	mutex_unlock(&device_mutex);
923 	config_item_put(iter.prev_item);
924 	return ret;
925 }
926 
target_configure_device(struct se_device * dev)927 int target_configure_device(struct se_device *dev)
928 {
929 	struct se_hba *hba = dev->se_hba;
930 	int ret, id;
931 
932 	if (target_dev_configured(dev)) {
933 		pr_err("se_dev->se_dev_ptr already set for storage"
934 				" object\n");
935 		return -EEXIST;
936 	}
937 
938 	/*
939 	 * Add early so modules like tcmu can use during its
940 	 * configuration.
941 	 */
942 	mutex_lock(&device_mutex);
943 	/*
944 	 * Use cyclic to try and avoid collisions with devices
945 	 * that were recently removed.
946 	 */
947 	id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
948 	mutex_unlock(&device_mutex);
949 	if (id < 0) {
950 		ret = -ENOMEM;
951 		goto out;
952 	}
953 	dev->dev_index = id;
954 
955 	ret = dev->transport->configure_device(dev);
956 	if (ret)
957 		goto out_free_index;
958 	/*
959 	 * XXX: there is not much point to have two different values here..
960 	 */
961 	dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
962 	dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
963 
964 	/*
965 	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
966 	 */
967 	dev->dev_attrib.hw_max_sectors =
968 		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
969 					 dev->dev_attrib.hw_block_size);
970 	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
971 
972 	dev->creation_time = get_jiffies_64();
973 
974 	ret = core_setup_alua(dev);
975 	if (ret)
976 		goto out_destroy_device;
977 
978 	/*
979 	 * Setup work_queue for QUEUE_FULL
980 	 */
981 	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
982 
983 	scsi_dump_inquiry(dev);
984 
985 	spin_lock(&hba->device_lock);
986 	hba->dev_count++;
987 	spin_unlock(&hba->device_lock);
988 
989 	dev->dev_flags |= DF_CONFIGURED;
990 
991 	return 0;
992 
993 out_destroy_device:
994 	dev->transport->destroy_device(dev);
995 out_free_index:
996 	mutex_lock(&device_mutex);
997 	idr_remove(&devices_idr, dev->dev_index);
998 	mutex_unlock(&device_mutex);
999 out:
1000 	se_release_vpd_for_dev(dev);
1001 	return ret;
1002 }
1003 
target_free_device(struct se_device * dev)1004 void target_free_device(struct se_device *dev)
1005 {
1006 	struct se_hba *hba = dev->se_hba;
1007 
1008 	WARN_ON(!list_empty(&dev->dev_sep_list));
1009 
1010 	if (target_dev_configured(dev)) {
1011 		dev->transport->destroy_device(dev);
1012 
1013 		mutex_lock(&device_mutex);
1014 		idr_remove(&devices_idr, dev->dev_index);
1015 		mutex_unlock(&device_mutex);
1016 
1017 		spin_lock(&hba->device_lock);
1018 		hba->dev_count--;
1019 		spin_unlock(&hba->device_lock);
1020 	}
1021 
1022 	core_alua_free_lu_gp_mem(dev);
1023 	core_alua_set_lba_map(dev, NULL, 0, 0);
1024 	core_scsi3_free_all_registrations(dev);
1025 	se_release_vpd_for_dev(dev);
1026 
1027 	if (dev->transport->free_prot)
1028 		dev->transport->free_prot(dev);
1029 
1030 	kfree(dev->queues);
1031 	dev->transport->free_device(dev);
1032 }
1033 
core_dev_setup_virtual_lun0(void)1034 int core_dev_setup_virtual_lun0(void)
1035 {
1036 	struct se_hba *hba;
1037 	struct se_device *dev;
1038 	char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1039 	int ret;
1040 
1041 	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1042 	if (IS_ERR(hba))
1043 		return PTR_ERR(hba);
1044 
1045 	dev = target_alloc_device(hba, "virt_lun0");
1046 	if (!dev) {
1047 		ret = -ENOMEM;
1048 		goto out_free_hba;
1049 	}
1050 
1051 	hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1052 
1053 	ret = target_configure_device(dev);
1054 	if (ret)
1055 		goto out_free_se_dev;
1056 
1057 	lun0_hba = hba;
1058 	g_lun0_dev = dev;
1059 	return 0;
1060 
1061 out_free_se_dev:
1062 	target_free_device(dev);
1063 out_free_hba:
1064 	core_delete_hba(hba);
1065 	return ret;
1066 }
1067 
1068 
core_dev_release_virtual_lun0(void)1069 void core_dev_release_virtual_lun0(void)
1070 {
1071 	struct se_hba *hba = lun0_hba;
1072 
1073 	if (!hba)
1074 		return;
1075 
1076 	if (g_lun0_dev)
1077 		target_free_device(g_lun0_dev);
1078 	core_delete_hba(hba);
1079 }
1080 
1081 /*
1082  * Common CDB parsing for kernel and user passthrough.
1083  */
1084 sense_reason_t
passthrough_parse_cdb(struct se_cmd * cmd,sense_reason_t (* exec_cmd)(struct se_cmd * cmd))1085 passthrough_parse_cdb(struct se_cmd *cmd,
1086 	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1087 {
1088 	unsigned char *cdb = cmd->t_task_cdb;
1089 	struct se_device *dev = cmd->se_dev;
1090 	unsigned int size;
1091 
1092 	/*
1093 	 * For REPORT LUNS we always need to emulate the response, for everything
1094 	 * else, pass it up.
1095 	 */
1096 	if (cdb[0] == REPORT_LUNS) {
1097 		cmd->execute_cmd = spc_emulate_report_luns;
1098 		return TCM_NO_SENSE;
1099 	}
1100 
1101 	/*
1102 	 * With emulate_pr disabled, all reservation requests should fail,
1103 	 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1104 	 */
1105 	if (!dev->dev_attrib.emulate_pr &&
1106 	    ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1107 	     (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1108 	     (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1109 	     (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1110 		return TCM_UNSUPPORTED_SCSI_OPCODE;
1111 	}
1112 
1113 	/*
1114 	 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1115 	 * emulate the response, since tcmu does not have the information
1116 	 * required to process these commands.
1117 	 */
1118 	if (!(dev->transport_flags &
1119 	      TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1120 		if (cdb[0] == PERSISTENT_RESERVE_IN) {
1121 			cmd->execute_cmd = target_scsi3_emulate_pr_in;
1122 			size = get_unaligned_be16(&cdb[7]);
1123 			return target_cmd_size_check(cmd, size);
1124 		}
1125 		if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1126 			cmd->execute_cmd = target_scsi3_emulate_pr_out;
1127 			size = get_unaligned_be32(&cdb[5]);
1128 			return target_cmd_size_check(cmd, size);
1129 		}
1130 
1131 		if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1132 			cmd->execute_cmd = target_scsi2_reservation_release;
1133 			if (cdb[0] == RELEASE_10)
1134 				size = get_unaligned_be16(&cdb[7]);
1135 			else
1136 				size = cmd->data_length;
1137 			return target_cmd_size_check(cmd, size);
1138 		}
1139 		if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1140 			cmd->execute_cmd = target_scsi2_reservation_reserve;
1141 			if (cdb[0] == RESERVE_10)
1142 				size = get_unaligned_be16(&cdb[7]);
1143 			else
1144 				size = cmd->data_length;
1145 			return target_cmd_size_check(cmd, size);
1146 		}
1147 	}
1148 
1149 	/* Set DATA_CDB flag for ops that should have it */
1150 	switch (cdb[0]) {
1151 	case READ_6:
1152 	case READ_10:
1153 	case READ_12:
1154 	case READ_16:
1155 	case WRITE_6:
1156 	case WRITE_10:
1157 	case WRITE_12:
1158 	case WRITE_16:
1159 	case WRITE_VERIFY:
1160 	case WRITE_VERIFY_12:
1161 	case WRITE_VERIFY_16:
1162 	case COMPARE_AND_WRITE:
1163 	case XDWRITEREAD_10:
1164 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1165 		break;
1166 	case VARIABLE_LENGTH_CMD:
1167 		switch (get_unaligned_be16(&cdb[8])) {
1168 		case READ_32:
1169 		case WRITE_32:
1170 		case WRITE_VERIFY_32:
1171 		case XDWRITEREAD_32:
1172 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1173 			break;
1174 		}
1175 	}
1176 
1177 	cmd->execute_cmd = exec_cmd;
1178 
1179 	return TCM_NO_SENSE;
1180 }
1181 EXPORT_SYMBOL(passthrough_parse_cdb);
1182