xref: /freebsd/sys/cam/ctl/ctl_frontend_ioctl.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5  * Copyright (c) 2012 The FreeBSD Foundation
6  * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
7  * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/types.h>
37 #include <sys/lock.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/condvar.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/queue.h>
44 #include <sys/sysctl.h>
45 #include <sys/nv.h>
46 #include <sys/dnv.h>
47 
48 #include <cam/cam.h>
49 #include <cam/scsi/scsi_all.h>
50 #include <cam/scsi/scsi_da.h>
51 #include <cam/ctl/ctl_io.h>
52 #include <cam/ctl/ctl.h>
53 #include <cam/ctl/ctl_frontend.h>
54 #include <cam/ctl/ctl_util.h>
55 #include <cam/ctl/ctl_backend.h>
56 #include <cam/ctl/ctl_ioctl.h>
57 #include <cam/ctl/ctl_ha.h>
58 #include <cam/ctl/ctl_private.h>
59 #include <cam/ctl/ctl_debug.h>
60 #include <cam/ctl/ctl_error.h>
61 
62 typedef enum {
63 	CTL_IOCTL_INPROG,
64 	CTL_IOCTL_DATAMOVE,
65 	CTL_IOCTL_DONE
66 } ctl_fe_ioctl_state;
67 
68 struct ctl_fe_ioctl_params {
69 	struct cv		sem;
70 	struct mtx		ioctl_mtx;
71 	ctl_fe_ioctl_state	state;
72 };
73 
74 struct cfi_port {
75 	TAILQ_ENTRY(cfi_port)	link;
76 	u_int			cur_tag_num;
77 	struct cdev *		dev;
78 	struct ctl_port		port;
79 };
80 
81 struct cfi_softc {
82 	TAILQ_HEAD(, cfi_port)	ports;
83 };
84 
85 static struct cfi_softc cfi_softc;
86 
87 static int cfi_init(void);
88 static int cfi_shutdown(void);
89 static void cfi_datamove(union ctl_io *io);
90 static void cfi_done(union ctl_io *io);
91 static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
92     struct thread *td);
93 static void cfi_ioctl_port_create(struct ctl_req *req);
94 static void cfi_ioctl_port_remove(struct ctl_req *req);
95 
96 static struct cdevsw cfi_cdevsw = {
97 	.d_version = D_VERSION,
98 	.d_flags = 0,
99 	.d_ioctl = ctl_ioctl_io
100 };
101 
102 static struct ctl_frontend cfi_frontend =
103 {
104 	.name = "ioctl",
105 	.init = cfi_init,
106 	.ioctl = cfi_ioctl,
107 	.shutdown = cfi_shutdown,
108 };
109 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
110 
111 static int
112 cfi_init(void)
113 {
114 	struct cfi_softc *isoftc = &cfi_softc;
115 	struct cfi_port *cfi;
116 	struct ctl_port *port;
117 	int error = 0;
118 
119 	memset(isoftc, 0, sizeof(*isoftc));
120 	TAILQ_INIT(&isoftc->ports);
121 
122 	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
123 	port = &cfi->port;
124 	port->frontend = &cfi_frontend;
125 	port->port_type = CTL_PORT_IOCTL;
126 	port->num_requested_ctl_io = 100;
127 	port->port_name = "ioctl";
128 	port->fe_datamove = cfi_datamove;
129 	port->fe_done = cfi_done;
130 	port->physical_port = 0;
131 	port->targ_port = -1;
132 
133 	if ((error = ctl_port_register(port)) != 0) {
134 		printf("%s: ioctl port registration failed\n", __func__);
135 		return (error);
136 	}
137 
138 	ctl_port_online(port);
139 	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
140 	return (0);
141 }
142 
143 static int
144 cfi_shutdown(void)
145 {
146 	struct cfi_softc *isoftc = &cfi_softc;
147 	struct cfi_port *cfi, *temp;
148 	struct ctl_port *port;
149 	int error;
150 
151 	TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) {
152 		port = &cfi->port;
153 		ctl_port_offline(port);
154 		error = ctl_port_deregister(port);
155 		if (error != 0) {
156 			printf("%s: ctl_frontend_deregister() failed\n",
157 			   __func__);
158 			return (error);
159 		}
160 
161 		TAILQ_REMOVE(&isoftc->ports, cfi, link);
162 		free(cfi, M_CTL);
163 	}
164 
165 	return (0);
166 }
167 
168 static void
169 cfi_ioctl_port_create(struct ctl_req *req)
170 {
171 	struct cfi_softc *isoftc = &cfi_softc;
172 	struct cfi_port *cfi;
173 	struct ctl_port *port;
174 	struct make_dev_args args;
175 	const char *val;
176 	int retval;
177 	int pp = -1, vp = 0;
178 
179 	val = dnvlist_get_string(req->args_nvl, "pp", NULL);
180 	if (val != NULL)
181 		pp = strtol(val, NULL, 10);
182 
183 	val = dnvlist_get_string(req->args_nvl, "vp", NULL);
184 	if (val != NULL)
185 		vp = strtol(val, NULL, 10);
186 
187 	if (pp != -1) {
188 		/* Check for duplicates */
189 		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
190 			if (pp == cfi->port.physical_port &&
191 			    vp == cfi->port.virtual_port) {
192 				req->status = CTL_LUN_ERROR;
193 				snprintf(req->error_str, sizeof(req->error_str),
194 				    "port %d already exists", pp);
195 
196 				return;
197 			}
198 		}
199 	} else {
200 		/* Find free port number */
201 		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
202 			pp = MAX(pp, cfi->port.physical_port);
203 		}
204 
205 		pp++;
206 	}
207 
208 	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
209 	port = &cfi->port;
210 	port->frontend = &cfi_frontend;
211 	port->port_type = CTL_PORT_IOCTL;
212 	port->num_requested_ctl_io = 100;
213 	port->port_name = "ioctl";
214 	port->fe_datamove = cfi_datamove;
215 	port->fe_done = cfi_done;
216 	port->physical_port = pp;
217 	port->virtual_port = vp;
218 	port->targ_port = -1;
219 
220 	retval = ctl_port_register(port);
221 	if (retval != 0) {
222 		req->status = CTL_LUN_ERROR;
223 		snprintf(req->error_str, sizeof(req->error_str),
224 		    "ctl_port_register() failed with error %d", retval);
225 		free(cfi, M_CTL);
226 		return;
227 	}
228 
229 	req->result_nvl = nvlist_create(0);
230 	nvlist_add_number(req->result_nvl, "port_id", port->targ_port);
231 	ctl_port_online(port);
232 
233 	make_dev_args_init(&args);
234 	args.mda_devsw = &cfi_cdevsw;
235 	args.mda_uid = UID_ROOT;
236 	args.mda_gid = GID_OPERATOR;
237 	args.mda_mode = 0600;
238 	args.mda_si_drv1 = NULL;
239 	args.mda_si_drv2 = cfi;
240 
241 	retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp);
242 	if (retval != 0) {
243 		req->status = CTL_LUN_ERROR;
244 		snprintf(req->error_str, sizeof(req->error_str),
245 		    "make_dev_s() failed with error %d", retval);
246 		ctl_port_offline(port);
247 		ctl_port_deregister(port);
248 		free(cfi, M_CTL);
249 		return;
250 	}
251 
252 	req->status = CTL_LUN_OK;
253 	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
254 }
255 
256 static void
257 cfi_ioctl_port_remove(struct ctl_req *req)
258 {
259 	struct cfi_softc *isoftc = &cfi_softc;
260 	struct cfi_port *cfi = NULL;
261 	const char *val;
262 	int port_id = -1;
263 
264 	val = dnvlist_get_string(req->args_nvl, "port_id", NULL);
265 	if (val != NULL)
266 		port_id = strtol(val, NULL, 10);
267 
268 	if (port_id == -1) {
269 		req->status = CTL_LUN_ERROR;
270 		snprintf(req->error_str, sizeof(req->error_str),
271 		    "port_id not provided");
272 		return;
273 	}
274 
275 	TAILQ_FOREACH(cfi, &isoftc->ports, link) {
276 		if (cfi->port.targ_port == port_id)
277 			break;
278 	}
279 
280 	if (cfi == NULL) {
281 		req->status = CTL_LUN_ERROR;
282 		snprintf(req->error_str, sizeof(req->error_str),
283 		    "cannot find port %d", port_id);
284 
285 		return;
286 	}
287 
288 	if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) {
289 		req->status = CTL_LUN_ERROR;
290 		snprintf(req->error_str, sizeof(req->error_str),
291 		    "cannot destroy default ioctl port");
292 
293 		return;
294 	}
295 
296 	ctl_port_offline(&cfi->port);
297 	ctl_port_deregister(&cfi->port);
298 	TAILQ_REMOVE(&isoftc->ports, cfi, link);
299 	destroy_dev(cfi->dev);
300 	free(cfi, M_CTL);
301 	req->status = CTL_LUN_OK;
302 }
303 
304 static int
305 cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
306     struct thread *td)
307 {
308 	struct ctl_req *req;
309 
310 	if (cmd == CTL_PORT_REQ) {
311 		req = (struct ctl_req *)addr;
312 		switch (req->reqtype) {
313 		case CTL_REQ_CREATE:
314 			cfi_ioctl_port_create(req);
315 			break;
316 		case CTL_REQ_REMOVE:
317 			cfi_ioctl_port_remove(req);
318 			break;
319 		default:
320 			req->status = CTL_LUN_ERROR;
321 			snprintf(req->error_str, sizeof(req->error_str),
322 			    "Unsupported request type %d", req->reqtype);
323 		}
324 		return (0);
325 	}
326 
327 	return (ENOTTY);
328 }
329 
330 /*
331  * Data movement routine for the CTL ioctl frontend port.
332  */
333 static int
334 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
335 {
336 	struct ctl_sg_entry *ext_sglist, *kern_sglist;
337 	struct ctl_sg_entry ext_entry, kern_entry;
338 	int ext_sglen, ext_sg_entries, kern_sg_entries;
339 	int ext_sg_start, ext_offset;
340 	int len_to_copy;
341 	int kern_watermark, ext_watermark;
342 	int ext_sglist_malloced;
343 	int i, j;
344 
345 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
346 
347 	/*
348 	 * If this flag is set, fake the data transfer.
349 	 */
350 	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
351 		ext_sglist_malloced = 0;
352 		ctsio->ext_data_filled += ctsio->kern_data_len;
353 		ctsio->kern_data_resid = 0;
354 		goto bailout;
355 	}
356 
357 	/*
358 	 * To simplify things here, if we have a single buffer, stick it in
359 	 * a S/G entry and just make it a single entry S/G list.
360 	 */
361 	if (ctsio->ext_sg_entries > 0) {
362 		int len_seen;
363 
364 		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
365 		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
366 							   M_WAITOK);
367 		ext_sglist_malloced = 1;
368 		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
369 			ctsio->io_hdr.port_status = 31343;
370 			goto bailout;
371 		}
372 		ext_sg_entries = ctsio->ext_sg_entries;
373 		ext_sg_start = ext_sg_entries;
374 		ext_offset = 0;
375 		len_seen = 0;
376 		for (i = 0; i < ext_sg_entries; i++) {
377 			if ((len_seen + ext_sglist[i].len) >=
378 			     ctsio->ext_data_filled) {
379 				ext_sg_start = i;
380 				ext_offset = ctsio->ext_data_filled - len_seen;
381 				break;
382 			}
383 			len_seen += ext_sglist[i].len;
384 		}
385 	} else {
386 		ext_sglist = &ext_entry;
387 		ext_sglist_malloced = 0;
388 		ext_sglist->addr = ctsio->ext_data_ptr;
389 		ext_sglist->len = ctsio->ext_data_len;
390 		ext_sg_entries = 1;
391 		ext_sg_start = 0;
392 		ext_offset = ctsio->ext_data_filled;
393 	}
394 
395 	if (ctsio->kern_sg_entries > 0) {
396 		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
397 		kern_sg_entries = ctsio->kern_sg_entries;
398 	} else {
399 		kern_sglist = &kern_entry;
400 		kern_sglist->addr = ctsio->kern_data_ptr;
401 		kern_sglist->len = ctsio->kern_data_len;
402 		kern_sg_entries = 1;
403 	}
404 
405 	kern_watermark = 0;
406 	ext_watermark = ext_offset;
407 	for (i = ext_sg_start, j = 0;
408 	     i < ext_sg_entries && j < kern_sg_entries;) {
409 		uint8_t *ext_ptr, *kern_ptr;
410 
411 		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
412 				  kern_sglist[j].len - kern_watermark);
413 
414 		ext_ptr = (uint8_t *)ext_sglist[i].addr;
415 		ext_ptr = ext_ptr + ext_watermark;
416 		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
417 			/*
418 			 * XXX KDM fix this!
419 			 */
420 			panic("need to implement bus address support");
421 #if 0
422 			kern_ptr = bus_to_virt(kern_sglist[j].addr);
423 #endif
424 		} else
425 			kern_ptr = (uint8_t *)kern_sglist[j].addr;
426 		kern_ptr = kern_ptr + kern_watermark;
427 
428 		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
429 		     CTL_FLAG_DATA_IN) {
430 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
431 					 "bytes to user\n", len_to_copy));
432 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
433 					 "to %p\n", kern_ptr, ext_ptr));
434 			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
435 				ctsio->io_hdr.port_status = 31344;
436 				goto bailout;
437 			}
438 		} else {
439 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
440 					 "bytes from user\n", len_to_copy));
441 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
442 					 "to %p\n", ext_ptr, kern_ptr));
443 			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
444 				ctsio->io_hdr.port_status = 31345;
445 				goto bailout;
446 			}
447 		}
448 
449 		ctsio->ext_data_filled += len_to_copy;
450 		ctsio->kern_data_resid -= len_to_copy;
451 
452 		ext_watermark += len_to_copy;
453 		if (ext_sglist[i].len == ext_watermark) {
454 			i++;
455 			ext_watermark = 0;
456 		}
457 
458 		kern_watermark += len_to_copy;
459 		if (kern_sglist[j].len == kern_watermark) {
460 			j++;
461 			kern_watermark = 0;
462 		}
463 	}
464 
465 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
466 			 "kern_sg_entries: %d\n", ext_sg_entries,
467 			 kern_sg_entries));
468 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
469 			 "kern_data_len = %d\n", ctsio->ext_data_len,
470 			 ctsio->kern_data_len));
471 
472 bailout:
473 	if (ext_sglist_malloced != 0)
474 		free(ext_sglist, M_CTL);
475 
476 	return (CTL_RETVAL_COMPLETE);
477 }
478 
479 static void
480 cfi_datamove(union ctl_io *io)
481 {
482 	struct ctl_fe_ioctl_params *params;
483 
484 	params = (struct ctl_fe_ioctl_params *)
485 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
486 
487 	mtx_lock(&params->ioctl_mtx);
488 	params->state = CTL_IOCTL_DATAMOVE;
489 	cv_broadcast(&params->sem);
490 	mtx_unlock(&params->ioctl_mtx);
491 }
492 
493 static void
494 cfi_done(union ctl_io *io)
495 {
496 	struct ctl_fe_ioctl_params *params;
497 
498 	params = (struct ctl_fe_ioctl_params *)
499 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
500 
501 	mtx_lock(&params->ioctl_mtx);
502 	params->state = CTL_IOCTL_DONE;
503 	cv_broadcast(&params->sem);
504 	mtx_unlock(&params->ioctl_mtx);
505 }
506 
507 static int
508 cfi_submit_wait(union ctl_io *io)
509 {
510 	struct ctl_fe_ioctl_params params;
511 	ctl_fe_ioctl_state last_state;
512 	int done, retval;
513 
514 	bzero(&params, sizeof(params));
515 	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
516 	cv_init(&params.sem, "ctlioccv");
517 	params.state = CTL_IOCTL_INPROG;
518 	last_state = params.state;
519 
520 	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
521 
522 	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
523 
524 	/* This shouldn't happen */
525 	if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE)
526 		return (retval);
527 
528 	done = 0;
529 
530 	do {
531 		mtx_lock(&params.ioctl_mtx);
532 		/*
533 		 * Check the state here, and don't sleep if the state has
534 		 * already changed (i.e. wakeup has already occurred, but we
535 		 * weren't waiting yet).
536 		 */
537 		if (params.state == last_state) {
538 			/* XXX KDM cv_wait_sig instead? */
539 			cv_wait(&params.sem, &params.ioctl_mtx);
540 		}
541 		last_state = params.state;
542 
543 		switch (params.state) {
544 		case CTL_IOCTL_INPROG:
545 			/* Why did we wake up? */
546 			/* XXX KDM error here? */
547 			mtx_unlock(&params.ioctl_mtx);
548 			break;
549 		case CTL_IOCTL_DATAMOVE:
550 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
551 
552 			/*
553 			 * change last_state back to INPROG to avoid
554 			 * deadlock on subsequent data moves.
555 			 */
556 			params.state = last_state = CTL_IOCTL_INPROG;
557 
558 			mtx_unlock(&params.ioctl_mtx);
559 			ctl_ioctl_do_datamove(&io->scsiio);
560 			/*
561 			 * Note that in some cases, most notably writes,
562 			 * this will queue the I/O and call us back later.
563 			 * In other cases, generally reads, this routine
564 			 * will immediately call back and wake us up,
565 			 * probably using our own context.
566 			 */
567 			ctl_datamove_done(io, false);
568 			break;
569 		case CTL_IOCTL_DONE:
570 			mtx_unlock(&params.ioctl_mtx);
571 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
572 			done = 1;
573 			break;
574 		default:
575 			mtx_unlock(&params.ioctl_mtx);
576 			/* XXX KDM error here? */
577 			break;
578 		}
579 	} while (done == 0);
580 
581 	mtx_destroy(&params.ioctl_mtx);
582 	cv_destroy(&params.sem);
583 
584 	return (CTL_RETVAL_COMPLETE);
585 }
586 
587 int
588 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
589     struct thread *td)
590 {
591 	struct cfi_port *cfi;
592 	union ctl_io *io;
593 	void *pool_tmp, *sc_tmp;
594 	int retval = 0;
595 
596 	if (cmd != CTL_IO)
597 		return (ENOTTY);
598 
599 	cfi = dev->si_drv2 == NULL
600 	    ? TAILQ_FIRST(&cfi_softc.ports)
601 	    : dev->si_drv2;
602 
603 	/*
604 	 * If we haven't been "enabled", don't allow any SCSI I/O
605 	 * to this FETD.
606 	 */
607 	if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0)
608 		return (EPERM);
609 
610 	io = ctl_alloc_io(cfi->port.ctl_pool_ref);
611 
612 	/*
613 	 * Need to save the pool reference so it doesn't get
614 	 * spammed by the user's ctl_io.
615 	 */
616 	pool_tmp = io->io_hdr.pool;
617 	sc_tmp = CTL_SOFTC(io);
618 	memcpy(io, (void *)addr, sizeof(*io));
619 	io->io_hdr.pool = pool_tmp;
620 	CTL_SOFTC(io) = sc_tmp;
621 	TAILQ_INIT(&io->io_hdr.blocked_queue);
622 
623 	/*
624 	 * No status yet, so make sure the status is set properly.
625 	 */
626 	io->io_hdr.status = CTL_STATUS_NONE;
627 
628 	/*
629 	 * The user sets the initiator ID, target and LUN IDs.
630 	 */
631 	io->io_hdr.nexus.targ_port = cfi->port.targ_port;
632 	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
633 	if ((io->io_hdr.flags & CTL_FLAG_USER_TAG) == 0 &&
634 	    io->io_hdr.io_type == CTL_IO_SCSI &&
635 	    io->scsiio.tag_type != CTL_TAG_UNTAGGED)
636 		io->scsiio.tag_num = atomic_fetchadd_int(&cfi->cur_tag_num, 1);
637 
638 	retval = cfi_submit_wait(io);
639 	if (retval == 0)
640 		memcpy((void *)addr, io, sizeof(*io));
641 
642 	ctl_free_io(io);
643 	return (retval);
644 }
645