xref: /freebsd/sys/cam/nvme/nvme_xpt.c (revision 47dd1d1b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015 Netflix, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification, immediately at the beginning of the file.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * derived from ata_xpt.c: Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/time.h>
41 #include <sys/conf.h>
42 #include <sys/fcntl.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 
50 #include <cam/cam.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_queue.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_xpt_internal.h>
59 #include <cam/cam_debug.h>
60 
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/nvme/nvme_all.h>
64 #include <machine/stdarg.h>	/* for xpt_print below */
65 #include "opt_cam.h"
66 
67 struct nvme_quirk_entry {
68 	u_int quirks;
69 #define CAM_QUIRK_MAXTAGS 1
70 	u_int mintags;
71 	u_int maxtags;
72 };
73 
74 /* Not even sure why we need this */
75 static periph_init_t nvme_probe_periph_init;
76 
77 static struct periph_driver nvme_probe_driver =
78 {
79 	nvme_probe_periph_init, "nvme_probe",
80 	TAILQ_HEAD_INITIALIZER(nvme_probe_driver.units), /* generation */ 0,
81 	CAM_PERIPH_DRV_EARLY
82 };
83 
84 PERIPHDRIVER_DECLARE(nvme_probe, nvme_probe_driver);
85 
86 typedef enum {
87 	NVME_PROBE_IDENTIFY,
88 	NVME_PROBE_DONE,
89 	NVME_PROBE_INVALID,
90 	NVME_PROBE_RESET
91 } nvme_probe_action;
92 
93 static char *nvme_probe_action_text[] = {
94 	"NVME_PROBE_IDENTIFY",
95 	"NVME_PROBE_DONE",
96 	"NVME_PROBE_INVALID",
97 	"NVME_PROBE_RESET",
98 };
99 
100 #define NVME_PROBE_SET_ACTION(softc, newaction)	\
101 do {									\
102 	char **text;							\
103 	text = nvme_probe_action_text;					\
104 	CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE,		\
105 	    ("Probe %s to %s\n", text[(softc)->action],			\
106 	    text[(newaction)]));					\
107 	(softc)->action = (newaction);					\
108 } while(0)
109 
110 typedef enum {
111 	NVME_PROBE_NO_ANNOUNCE	= 0x04
112 } nvme_probe_flags;
113 
114 typedef struct {
115 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
116 	nvme_probe_action	action;
117 	nvme_probe_flags	flags;
118 	int		restart;
119 	struct cam_periph *periph;
120 } nvme_probe_softc;
121 
122 static struct nvme_quirk_entry nvme_quirk_table[] =
123 {
124 	{
125 //		{
126 //		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
127 //		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
128 //		},
129 		.quirks = 0, .mintags = 0, .maxtags = 0
130 	},
131 };
132 
133 static const int nvme_quirk_table_size =
134 	sizeof(nvme_quirk_table) / sizeof(*nvme_quirk_table);
135 
136 static cam_status	nvme_probe_register(struct cam_periph *periph,
137 				      void *arg);
138 static void	 nvme_probe_schedule(struct cam_periph *nvme_probe_periph);
139 static void	 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb);
140 static void	 nvme_probe_cleanup(struct cam_periph *periph);
141 //static void	 nvme_find_quirk(struct cam_ed *device);
142 static void	 nvme_scan_lun(struct cam_periph *periph,
143 			       struct cam_path *path, cam_flags flags,
144 			       union ccb *ccb);
145 static struct cam_ed *
146 		 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target,
147 				   lun_id_t lun_id);
148 static void	 nvme_device_transport(struct cam_path *path);
149 static void	 nvme_dev_async(u_int32_t async_code,
150 				struct cam_eb *bus,
151 				struct cam_et *target,
152 				struct cam_ed *device,
153 				void *async_arg);
154 static void	 nvme_action(union ccb *start_ccb);
155 static void	 nvme_announce_periph(struct cam_periph *periph);
156 static void	 nvme_proto_announce(struct cam_ed *device);
157 static void	 nvme_proto_denounce(struct cam_ed *device);
158 static void	 nvme_proto_debug_out(union ccb *ccb);
159 
160 static struct xpt_xport_ops nvme_xport_ops = {
161 	.alloc_device = nvme_alloc_device,
162 	.action = nvme_action,
163 	.async = nvme_dev_async,
164 	.announce = nvme_announce_periph,
165 };
166 #define NVME_XPT_XPORT(x, X)			\
167 static struct xpt_xport nvme_xport_ ## x = {	\
168 	.xport = XPORT_ ## X,			\
169 	.name = #x,				\
170 	.ops = &nvme_xport_ops,			\
171 };						\
172 CAM_XPT_XPORT(nvme_xport_ ## x);
173 
174 NVME_XPT_XPORT(nvme, NVME);
175 
176 #undef NVME_XPT_XPORT
177 
178 static struct xpt_proto_ops nvme_proto_ops = {
179 	.announce = nvme_proto_announce,
180 	.denounce = nvme_proto_denounce,
181 	.debug_out = nvme_proto_debug_out,
182 };
183 static struct xpt_proto nvme_proto = {
184 	.proto = PROTO_NVME,
185 	.name = "nvme",
186 	.ops = &nvme_proto_ops,
187 };
188 CAM_XPT_PROTO(nvme_proto);
189 
190 static void
191 nvme_probe_periph_init()
192 {
193 
194 }
195 
196 static cam_status
197 nvme_probe_register(struct cam_periph *periph, void *arg)
198 {
199 	union ccb *request_ccb;	/* CCB representing the probe request */
200 	nvme_probe_softc *softc;
201 
202 	request_ccb = (union ccb *)arg;
203 	if (request_ccb == NULL) {
204 		printf("nvme_probe_register: no probe CCB, "
205 		       "can't register device\n");
206 		return(CAM_REQ_CMP_ERR);
207 	}
208 
209 	softc = (nvme_probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_ZERO | M_NOWAIT);
210 
211 	if (softc == NULL) {
212 		printf("nvme_probe_register: Unable to probe new device. "
213 		       "Unable to allocate softc\n");
214 		return(CAM_REQ_CMP_ERR);
215 	}
216 	TAILQ_INIT(&softc->request_ccbs);
217 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
218 			  periph_links.tqe);
219 	softc->flags = 0;
220 	periph->softc = softc;
221 	softc->periph = periph;
222 	softc->action = NVME_PROBE_INVALID;
223 	if (cam_periph_acquire(periph) != 0)
224 		return (CAM_REQ_CMP_ERR);
225 
226 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
227 
228 //	nvme_device_transport(periph->path);
229 	nvme_probe_schedule(periph);
230 
231 	return(CAM_REQ_CMP);
232 }
233 
234 static void
235 nvme_probe_schedule(struct cam_periph *periph)
236 {
237 	union ccb *ccb;
238 	nvme_probe_softc *softc;
239 
240 	softc = (nvme_probe_softc *)periph->softc;
241 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
242 
243 	NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY);
244 
245 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
246 		softc->flags |= NVME_PROBE_NO_ANNOUNCE;
247 	else
248 		softc->flags &= ~NVME_PROBE_NO_ANNOUNCE;
249 
250 	xpt_schedule(periph, CAM_PRIORITY_XPT);
251 }
252 
253 static void
254 nvme_probe_start(struct cam_periph *periph, union ccb *start_ccb)
255 {
256 	struct ccb_nvmeio *nvmeio;
257 	struct ccb_scsiio *csio;
258 	nvme_probe_softc *softc;
259 	struct cam_path *path;
260 	const struct nvme_namespace_data *nvme_data;
261 	lun_id_t lun;
262 
263 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("nvme_probe_start\n"));
264 
265 	softc = (nvme_probe_softc *)periph->softc;
266 	path = start_ccb->ccb_h.path;
267 	nvmeio = &start_ccb->nvmeio;
268 	csio = &start_ccb->csio;
269 	nvme_data = periph->path->device->nvme_data;
270 
271 	if (softc->restart) {
272 		softc->restart = 0;
273 		if (periph->path->device->flags & CAM_DEV_UNCONFIGURED)
274 			NVME_PROBE_SET_ACTION(softc, NVME_PROBE_RESET);
275 		else
276 			NVME_PROBE_SET_ACTION(softc, NVME_PROBE_IDENTIFY);
277 	}
278 
279 	/*
280 	 * Other transports have to ask their SIM to do a lot of action.
281 	 * NVMe doesn't, so don't do the dance. Just do things
282 	 * directly.
283 	 */
284 	switch (softc->action) {
285 	case NVME_PROBE_RESET:
286 		/* FALLTHROUGH */
287 	case NVME_PROBE_IDENTIFY:
288 		nvme_device_transport(path);
289 		/*
290 		 * Test for lun == CAM_LUN_WILDCARD is lame, but
291 		 * appears to be necessary here. XXX
292 		 */
293 		lun = xpt_path_lun_id(periph->path);
294 		if (lun == CAM_LUN_WILDCARD ||
295 		    periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
296 			path->device->flags &= ~CAM_DEV_UNCONFIGURED;
297 			xpt_acquire_device(path->device);
298 			start_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
299 			xpt_action(start_ccb);
300 			xpt_async(AC_FOUND_DEVICE, path, start_ccb);
301 		}
302 		NVME_PROBE_SET_ACTION(softc, NVME_PROBE_DONE);
303 		break;
304 	default:
305 		panic("nvme_probe_start: invalid action state 0x%x\n", softc->action);
306 	}
307 	/*
308 	 * Probing is now done. We need to complete any lingering items
309 	 * in the queue, though there shouldn't be any.
310 	 */
311 	xpt_release_ccb(start_ccb);
312 	CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
313 	while ((start_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs))) {
314 		TAILQ_REMOVE(&softc->request_ccbs,
315 		    &start_ccb->ccb_h, periph_links.tqe);
316 		start_ccb->ccb_h.status = CAM_REQ_CMP;
317 		xpt_done(start_ccb);
318 	}
319 	cam_periph_invalidate(periph);
320 	cam_periph_release_locked(periph);
321 }
322 
323 static void
324 nvme_probe_cleanup(struct cam_periph *periph)
325 {
326 
327 	free(periph->softc, M_CAMXPT);
328 }
329 
330 #if 0
331 /* XXX should be used, don't delete */
332 static void
333 nvme_find_quirk(struct cam_ed *device)
334 {
335 	struct nvme_quirk_entry *quirk;
336 	caddr_t	match;
337 
338 	match = cam_quirkmatch((caddr_t)&device->nvme_data,
339 			       (caddr_t)nvme_quirk_table,
340 			       nvme_quirk_table_size,
341 			       sizeof(*nvme_quirk_table), nvme_identify_match);
342 
343 	if (match == NULL)
344 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
345 
346 	quirk = (struct nvme_quirk_entry *)match;
347 	device->quirk = quirk;
348 	if (quirk->quirks & CAM_QUIRK_MAXTAGS) {
349 		device->mintags = quirk->mintags;
350 		device->maxtags = quirk->maxtags;
351 	}
352 }
353 #endif
354 
355 static void
356 nvme_scan_lun(struct cam_periph *periph, struct cam_path *path,
357 	     cam_flags flags, union ccb *request_ccb)
358 {
359 	struct ccb_pathinq cpi;
360 	cam_status status;
361 	struct cam_periph *old_periph;
362 	int lock;
363 
364 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun\n"));
365 
366 	xpt_path_inq(&cpi, path);
367 
368 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
369 		if (request_ccb != NULL) {
370 			request_ccb->ccb_h.status = cpi.ccb_h.status;
371 			xpt_done(request_ccb);
372 		}
373 		return;
374 	}
375 
376 	if (xpt_path_lun_id(path) == CAM_LUN_WILDCARD) {
377 		CAM_DEBUG(path, CAM_DEBUG_TRACE, ("nvme_scan_lun ignoring bus\n"));
378 		request_ccb->ccb_h.status = CAM_REQ_CMP;	/* XXX signal error ? */
379 		xpt_done(request_ccb);
380 		return;
381 	}
382 
383 	lock = (xpt_path_owned(path) == 0);
384 	if (lock)
385 		xpt_path_lock(path);
386 	if ((old_periph = cam_periph_find(path, "nvme_probe")) != NULL) {
387 		if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
388 			nvme_probe_softc *softc;
389 
390 			softc = (nvme_probe_softc *)old_periph->softc;
391 			TAILQ_INSERT_TAIL(&softc->request_ccbs,
392 				&request_ccb->ccb_h, periph_links.tqe);
393 			softc->restart = 1;
394 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
395 			    ("restarting nvme_probe device\n"));
396 		} else {
397 			request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
398 			CAM_DEBUG(path, CAM_DEBUG_TRACE,
399 			    ("Failing to restart nvme_probe device\n"));
400 			xpt_done(request_ccb);
401 		}
402 	} else {
403 		CAM_DEBUG(path, CAM_DEBUG_TRACE,
404 		    ("Adding nvme_probe device\n"));
405 		status = cam_periph_alloc(nvme_probe_register, NULL, nvme_probe_cleanup,
406 					  nvme_probe_start, "nvme_probe",
407 					  CAM_PERIPH_BIO,
408 					  request_ccb->ccb_h.path, NULL, 0,
409 					  request_ccb);
410 
411 		if (status != CAM_REQ_CMP) {
412 			xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
413 			    "returned an error, can't continue probe\n");
414 			request_ccb->ccb_h.status = status;
415 			xpt_done(request_ccb);
416 		}
417 	}
418 	if (lock)
419 		xpt_path_unlock(path);
420 }
421 
422 static struct cam_ed *
423 nvme_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
424 {
425 	struct nvme_quirk_entry *quirk;
426 	struct cam_ed *device;
427 
428 	device = xpt_alloc_device(bus, target, lun_id);
429 	if (device == NULL)
430 		return (NULL);
431 
432 	/*
433 	 * Take the default quirk entry until we have inquiry
434 	 * data from nvme and can determine a better quirk to use.
435 	 */
436 	quirk = &nvme_quirk_table[nvme_quirk_table_size - 1];
437 	device->quirk = (void *)quirk;
438 	device->mintags = 0;
439 	device->maxtags = 0;
440 	device->inq_flags = 0;
441 	device->queue_flags = 0;
442 	device->device_id = NULL;	/* XXX Need to set this somewhere */
443 	device->device_id_len = 0;
444 	device->serial_num = NULL;	/* XXX Need to set this somewhere */
445 	device->serial_num_len = 0;
446 	return (device);
447 }
448 
449 static void
450 nvme_device_transport(struct cam_path *path)
451 {
452 	struct ccb_pathinq cpi;
453 	struct ccb_trans_settings cts;
454 	/* XXX get data from nvme namespace and other info ??? */
455 
456 	/* Get transport information from the SIM */
457 	xpt_path_inq(&cpi, path);
458 
459 	path->device->transport = cpi.transport;
460 	path->device->transport_version = cpi.transport_version;
461 
462 	path->device->protocol = cpi.protocol;
463 	path->device->protocol_version = cpi.protocol_version;
464 
465 	/* Tell the controller what we think */
466 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
467 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
468 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
469 	cts.transport = path->device->transport;
470 	cts.transport_version = path->device->transport_version;
471 	cts.protocol = path->device->protocol;
472 	cts.protocol_version = path->device->protocol_version;
473 	cts.proto_specific.valid = 0;
474 	cts.xport_specific.valid = 0;
475 	xpt_action((union ccb *)&cts);
476 }
477 
478 static void
479 nvme_dev_advinfo(union ccb *start_ccb)
480 {
481 	struct cam_ed *device;
482 	struct ccb_dev_advinfo *cdai;
483 	off_t amt;
484 
485 	start_ccb->ccb_h.status = CAM_REQ_INVALID;
486 	device = start_ccb->ccb_h.path->device;
487 	cdai = &start_ccb->cdai;
488 	switch(cdai->buftype) {
489 	case CDAI_TYPE_SCSI_DEVID:
490 		if (cdai->flags & CDAI_FLAG_STORE)
491 			return;
492 		cdai->provsiz = device->device_id_len;
493 		if (device->device_id_len == 0)
494 			break;
495 		amt = device->device_id_len;
496 		if (cdai->provsiz > cdai->bufsiz)
497 			amt = cdai->bufsiz;
498 		memcpy(cdai->buf, device->device_id, amt);
499 		break;
500 	case CDAI_TYPE_SERIAL_NUM:
501 		if (cdai->flags & CDAI_FLAG_STORE)
502 			return;
503 		cdai->provsiz = device->serial_num_len;
504 		if (device->serial_num_len == 0)
505 			break;
506 		amt = device->serial_num_len;
507 		if (cdai->provsiz > cdai->bufsiz)
508 			amt = cdai->bufsiz;
509 		memcpy(cdai->buf, device->serial_num, amt);
510 		break;
511 	case CDAI_TYPE_PHYS_PATH:
512 		if (cdai->flags & CDAI_FLAG_STORE) {
513 			if (device->physpath != NULL)
514 				free(device->physpath, M_CAMXPT);
515 			device->physpath_len = cdai->bufsiz;
516 			/* Clear existing buffer if zero length */
517 			if (cdai->bufsiz == 0)
518 				break;
519 			device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
520 			if (device->physpath == NULL) {
521 				start_ccb->ccb_h.status = CAM_REQ_ABORTED;
522 				return;
523 			}
524 			memcpy(device->physpath, cdai->buf, cdai->bufsiz);
525 		} else {
526 			cdai->provsiz = device->physpath_len;
527 			if (device->physpath_len == 0)
528 				break;
529 			amt = device->physpath_len;
530 			if (cdai->provsiz > cdai->bufsiz)
531 				amt = cdai->bufsiz;
532 			memcpy(cdai->buf, device->physpath, amt);
533 		}
534 		break;
535 	case CDAI_TYPE_NVME_CNTRL:
536 		if (cdai->flags & CDAI_FLAG_STORE)
537 			return;
538 		amt = sizeof(struct nvme_controller_data);
539 		cdai->provsiz = amt;
540 		if (amt > cdai->bufsiz)
541 			amt = cdai->bufsiz;
542 		memcpy(cdai->buf, device->nvme_cdata, amt);
543 		break;
544 	case CDAI_TYPE_NVME_NS:
545 		if (cdai->flags & CDAI_FLAG_STORE)
546 			return;
547 		amt = sizeof(struct nvme_namespace_data);
548 		cdai->provsiz = amt;
549 		if (amt > cdai->bufsiz)
550 			amt = cdai->bufsiz;
551 		memcpy(cdai->buf, device->nvme_data, amt);
552 		break;
553 	default:
554 		return;
555 	}
556 	start_ccb->ccb_h.status = CAM_REQ_CMP;
557 
558 	if (cdai->flags & CDAI_FLAG_STORE) {
559 		xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
560 			  (void *)(uintptr_t)cdai->buftype);
561 	}
562 }
563 
564 static void
565 nvme_action(union ccb *start_ccb)
566 {
567 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
568 	    ("nvme_action: func= %#x\n", start_ccb->ccb_h.func_code));
569 
570 	switch (start_ccb->ccb_h.func_code) {
571 	case XPT_SCAN_BUS:
572 	case XPT_SCAN_TGT:
573 	case XPT_SCAN_LUN:
574 		nvme_scan_lun(start_ccb->ccb_h.path->periph,
575 			      start_ccb->ccb_h.path, start_ccb->crcn.flags,
576 			      start_ccb);
577 		break;
578 	case XPT_DEV_ADVINFO:
579 		nvme_dev_advinfo(start_ccb);
580 		break;
581 
582 	default:
583 		xpt_action_default(start_ccb);
584 		break;
585 	}
586 }
587 
588 /*
589  * Handle any per-device event notifications that require action by the XPT.
590  */
591 static void
592 nvme_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
593 	      struct cam_ed *device, void *async_arg)
594 {
595 
596 	/*
597 	 * We only need to handle events for real devices.
598 	 */
599 	if (target->target_id == CAM_TARGET_WILDCARD
600 	 || device->lun_id == CAM_LUN_WILDCARD)
601 		return;
602 
603 	if (async_code == AC_LOST_DEVICE &&
604 	    (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
605 		device->flags |= CAM_DEV_UNCONFIGURED;
606 		xpt_release_device(device);
607 	}
608 }
609 
610 static void
611 nvme_announce_periph(struct cam_periph *periph)
612 {
613 	struct	ccb_pathinq cpi;
614 	struct	ccb_trans_settings cts;
615 	struct	cam_path *path = periph->path;
616 	struct ccb_trans_settings_nvme	*nvmex;
617 
618 	cam_periph_assert(periph, MA_OWNED);
619 
620 	/* Ask the SIM for connection details */
621 	xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
622 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
623 	cts.type = CTS_TYPE_CURRENT_SETTINGS;
624 	xpt_action((union ccb*)&cts);
625 	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
626 		return;
627 	nvmex = &cts.xport_specific.nvme;
628 
629 	/* Ask the SIM for its base transfer speed */
630 	xpt_path_inq(&cpi, periph->path);
631 	printf("%s%d: nvme version %d.%d x%d (max x%d) lanes PCIe Gen%d (max Gen%d) link",
632 	    periph->periph_name, periph->unit_number,
633 	    NVME_MAJOR(nvmex->spec),
634 	    NVME_MINOR(nvmex->spec),
635 	    nvmex->lanes, nvmex->max_lanes,
636 	    nvmex->speed, nvmex->max_speed);
637 	printf("\n");
638 }
639 
640 static void
641 nvme_proto_announce(struct cam_ed *device)
642 {
643 	struct sbuf	sb;
644 	char		buffer[120];
645 
646 	sbuf_new(&sb, buffer, sizeof(buffer), SBUF_FIXEDLEN);
647 	nvme_print_ident(device->nvme_cdata, device->nvme_data, &sb);
648 	sbuf_finish(&sb);
649 	sbuf_putbuf(&sb);
650 }
651 
652 static void
653 nvme_proto_denounce(struct cam_ed *device)
654 {
655 
656 	nvme_proto_announce(device);
657 }
658 
659 static void
660 nvme_proto_debug_out(union ccb *ccb)
661 {
662 	char cdb_str[(sizeof(struct nvme_command) * 3) + 1];
663 
664 	if (ccb->ccb_h.func_code != XPT_NVME_IO)
665 		return;
666 
667 	CAM_DEBUG(ccb->ccb_h.path,
668 	    CAM_DEBUG_CDB,("%s. NCB: %s\n", nvme_op_string(&ccb->nvmeio.cmd),
669 		nvme_cmd_string(&ccb->nvmeio.cmd, cdb_str, sizeof(cdb_str))));
670 }
671 
672