xref: /openbsd/sys/dev/ic/nvme.c (revision fc6d48fd)
1*fc6d48fdSkrw /*	$OpenBSD: nvme.c,v 1.117 2024/06/04 20:31:35 krw Exp $ */
2282c0692Sdlg 
3282c0692Sdlg /*
4282c0692Sdlg  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
5282c0692Sdlg  *
6282c0692Sdlg  * Permission to use, copy, modify, and distribute this software for any
7282c0692Sdlg  * purpose with or without fee is hereby granted, provided that the above
8282c0692Sdlg  * copyright notice and this permission notice appear in all copies.
9282c0692Sdlg  *
10282c0692Sdlg  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11282c0692Sdlg  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12282c0692Sdlg  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13282c0692Sdlg  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14282c0692Sdlg  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15282c0692Sdlg  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16282c0692Sdlg  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17282c0692Sdlg  */
18282c0692Sdlg 
197f4636ceSkrw #include "bio.h"
207f4636ceSkrw 
21282c0692Sdlg #include <sys/param.h>
227f4636ceSkrw #include <sys/ioctl.h>
23282c0692Sdlg #include <sys/systm.h>
24282c0692Sdlg #include <sys/buf.h>
25282c0692Sdlg #include <sys/kernel.h>
26282c0692Sdlg #include <sys/malloc.h>
27282c0692Sdlg #include <sys/device.h>
28282c0692Sdlg #include <sys/queue.h>
29282c0692Sdlg #include <sys/mutex.h>
30282c0692Sdlg #include <sys/pool.h>
317f4636ceSkrw #include <sys/disk.h>
32282c0692Sdlg 
3303d86467Sjmatthew #include <sys/atomic.h>
3403d86467Sjmatthew 
35282c0692Sdlg #include <machine/bus.h>
36282c0692Sdlg 
37282c0692Sdlg #include <scsi/scsi_all.h>
38c947e044Sdlg #include <scsi/scsi_disk.h>
39282c0692Sdlg #include <scsi/scsiconf.h>
407f4636ceSkrw #include <scsi/sdvar.h>
41282c0692Sdlg 
427f4636ceSkrw #include <dev/biovar.h>
43448b3c09Sdlg #include <dev/ic/nvmereg.h>
44448b3c09Sdlg #include <dev/ic/nvmevar.h>
454e9514d6Skrw #include <dev/ic/nvmeio.h>
46448b3c09Sdlg 
47282c0692Sdlg struct cfdriver nvme_cd = {
48282c0692Sdlg 	NULL,
49282c0692Sdlg 	"nvme",
50282c0692Sdlg 	DV_DULL
51282c0692Sdlg };
52282c0692Sdlg 
53282c0692Sdlg int	nvme_ready(struct nvme_softc *, u_int32_t);
54ee8b2d53Skrw int	nvme_enable(struct nvme_softc *);
55282c0692Sdlg int	nvme_disable(struct nvme_softc *);
565313ab17Sdlg int	nvme_shutdown(struct nvme_softc *);
57aa5ddcf9Ssf int	nvme_resume(struct nvme_softc *);
58282c0692Sdlg 
59282c0692Sdlg void	nvme_dumpregs(struct nvme_softc *);
6088aa9192Sdlg int	nvme_identify(struct nvme_softc *, u_int);
61e623ca5aSdlg void	nvme_fill_identify(struct nvme_softc *, struct nvme_ccb *, void *);
62282c0692Sdlg 
63448b3c09Sdlg int	nvme_ccbs_alloc(struct nvme_softc *, u_int);
64a1141c40Stedu void	nvme_ccbs_free(struct nvme_softc *, u_int);
65448b3c09Sdlg 
66448b3c09Sdlg void *	nvme_ccb_get(void *);
67448b3c09Sdlg void	nvme_ccb_put(void *, void *);
68448b3c09Sdlg 
69e623ca5aSdlg int	nvme_poll(struct nvme_softc *, struct nvme_queue *, struct nvme_ccb *,
7054904088Skrw 	    void (*)(struct nvme_softc *, struct nvme_ccb *, void *), u_int32_t);
71e623ca5aSdlg void	nvme_poll_fill(struct nvme_softc *, struct nvme_ccb *, void *);
72e623ca5aSdlg void	nvme_poll_done(struct nvme_softc *, struct nvme_ccb *,
73e623ca5aSdlg 	    struct nvme_cqe *);
74ad9e5681Sdlg void	nvme_sqe_fill(struct nvme_softc *, struct nvme_ccb *, void *);
75e623ca5aSdlg void	nvme_empty_done(struct nvme_softc *, struct nvme_ccb *,
76e623ca5aSdlg 	    struct nvme_cqe *);
77448b3c09Sdlg 
78e623ca5aSdlg struct nvme_queue *
7959968badSdlg 	nvme_q_alloc(struct nvme_softc *, u_int16_t, u_int, u_int);
80cd15a86fSdlg int	nvme_q_create(struct nvme_softc *, struct nvme_queue *);
81aa5ddcf9Ssf int	nvme_q_reset(struct nvme_softc *, struct nvme_queue *);
825313ab17Sdlg int	nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
83448b3c09Sdlg void	nvme_q_submit(struct nvme_softc *,
84448b3c09Sdlg 	    struct nvme_queue *, struct nvme_ccb *,
85e623ca5aSdlg 	    void (*)(struct nvme_softc *, struct nvme_ccb *, void *));
86e623ca5aSdlg int	nvme_q_complete(struct nvme_softc *, struct nvme_queue *);
87e623ca5aSdlg void	nvme_q_free(struct nvme_softc *, struct nvme_queue *);
88282c0692Sdlg 
8911624e4cSdlg void	nvme_scsi_cmd(struct scsi_xfer *);
90767e8532Skrw void	nvme_minphys(struct buf *, struct scsi_link *);
9111624e4cSdlg int	nvme_scsi_probe(struct scsi_link *);
9211624e4cSdlg void	nvme_scsi_free(struct scsi_link *);
937f4636ceSkrw uint64_t nvme_scsi_size(const struct nvm_identify_namespace *);
944e9514d6Skrw int	nvme_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
954e9514d6Skrw int	nvme_passthrough_cmd(struct nvme_softc *, struct nvme_pt_cmd *,
964e9514d6Skrw 	int, int);
9711624e4cSdlg 
9803d86467Sjmatthew #ifdef HIBERNATE
9903d86467Sjmatthew #include <uvm/uvm_extern.h>
10003d86467Sjmatthew #include <sys/hibernate.h>
10103d86467Sjmatthew #include <sys/disklabel.h>
10203d86467Sjmatthew 
10303d86467Sjmatthew int	nvme_hibernate_io(dev_t, daddr_t, vaddr_t, size_t, int, void *);
10403d86467Sjmatthew #endif
10503d86467Sjmatthew 
1067f4636ceSkrw #if NBIO > 0
1077f4636ceSkrw void	nvme_bio_status(struct bio_status *, const char *, ...);
1087f4636ceSkrw 
1097f4636ceSkrw const char *nvme_bioctl_sdname(const struct nvme_softc *, int);
1107f4636ceSkrw 
1117f4636ceSkrw int	nvme_bioctl(struct device *, u_long, caddr_t);
1127f4636ceSkrw int	nvme_bioctl_inq(struct nvme_softc *, struct bioc_inq *);
1137f4636ceSkrw int	nvme_bioctl_vol(struct nvme_softc *, struct bioc_vol *);
1147f4636ceSkrw int	nvme_bioctl_disk(struct nvme_softc *, struct bioc_disk *);
1157f4636ceSkrw #endif	/* NBIO > 0 */
1167f4636ceSkrw 
117a454aff3Snaddy const struct scsi_adapter nvme_switch = {
1184e9514d6Skrw 	nvme_scsi_cmd, nvme_minphys, nvme_scsi_probe, nvme_scsi_free,
1194e9514d6Skrw 	nvme_scsi_ioctl
12011624e4cSdlg };
12111624e4cSdlg 
1221eef25a1Sdlg void	nvme_scsi_io(struct scsi_xfer *, int);
1231eef25a1Sdlg void	nvme_scsi_io_fill(struct nvme_softc *, struct nvme_ccb *, void *);
124a1e87500Sdlg void	nvme_scsi_io_done(struct nvme_softc *, struct nvme_ccb *,
125a1e87500Sdlg 	    struct nvme_cqe *);
126a1e87500Sdlg 
127689c2c68Sdlg void	nvme_scsi_sync(struct scsi_xfer *);
128689c2c68Sdlg void	nvme_scsi_sync_fill(struct nvme_softc *, struct nvme_ccb *, void *);
129689c2c68Sdlg void	nvme_scsi_sync_done(struct nvme_softc *, struct nvme_ccb *,
130689c2c68Sdlg 	    struct nvme_cqe *);
131689c2c68Sdlg 
1329856392eSdlg void	nvme_scsi_inq(struct scsi_xfer *);
1339856392eSdlg void	nvme_scsi_inquiry(struct scsi_xfer *);
134c947e044Sdlg void	nvme_scsi_capacity16(struct scsi_xfer *);
135c947e044Sdlg void	nvme_scsi_capacity(struct scsi_xfer *);
1369856392eSdlg 
1377599295eSdlg uint32_t	nvme_op_sq_enter(struct nvme_softc *,
1387599295eSdlg 		    struct nvme_queue *, struct nvme_ccb *);
1397599295eSdlg void		nvme_op_sq_leave(struct nvme_softc *,
1407599295eSdlg 		    struct nvme_queue *, struct nvme_ccb *);
1417599295eSdlg uint32_t	nvme_op_sq_enter_locked(struct nvme_softc *,
1427599295eSdlg 		    struct nvme_queue *, struct nvme_ccb *);
1437599295eSdlg void		nvme_op_sq_leave_locked(struct nvme_softc *,
1447599295eSdlg 		    struct nvme_queue *, struct nvme_ccb *);
1457599295eSdlg 
1467599295eSdlg void		nvme_op_cq_done(struct nvme_softc *,
1477599295eSdlg 		    struct nvme_queue *, struct nvme_ccb *);
1487599295eSdlg 
1497599295eSdlg static const struct nvme_ops nvme_ops = {
1507599295eSdlg 	.op_sq_enter		= nvme_op_sq_enter,
1517599295eSdlg 	.op_sq_leave		= nvme_op_sq_leave,
1527599295eSdlg 	.op_sq_enter_locked	= nvme_op_sq_enter_locked,
1537599295eSdlg 	.op_sq_leave_locked	= nvme_op_sq_leave_locked,
1547599295eSdlg 
1557599295eSdlg 	.op_cq_done		= nvme_op_cq_done,
1567599295eSdlg };
1577599295eSdlg 
15854904088Skrw #define NVME_TIMO_QOP			5000	/* ms to create/delete queue */
1591488c4e9Skrw #define NVME_TIMO_PT			5000	/* ms to complete passthrough */
16054904088Skrw #define NVME_TIMO_IDENT			10000	/* ms to probe/identify */
16154904088Skrw #define NVME_TIMO_DELAYNS		10	/* ns to delay() in poll loop */
16254904088Skrw 
163343e9e5aSmpi /*
164343e9e5aSmpi  * Some controllers, at least Apple NVMe, always require split
165343e9e5aSmpi  * transfers, so don't use bus_space_{read,write}_8() on LP64.
166343e9e5aSmpi  */
167a30550dbSdlg u_int64_t
nvme_read8(struct nvme_softc * sc,bus_size_t r)168282c0692Sdlg nvme_read8(struct nvme_softc *sc, bus_size_t r)
169282c0692Sdlg {
170282c0692Sdlg 	u_int64_t v;
171282c0692Sdlg 
1727bd1ac27Sdlg 	v = (u_int64_t)nvme_read4(sc, r) |
1737bd1ac27Sdlg 	    (u_int64_t)nvme_read4(sc, r + 4) << 32;
174282c0692Sdlg 
175282c0692Sdlg 	return (v);
176282c0692Sdlg }
177282c0692Sdlg 
178a30550dbSdlg void
nvme_write8(struct nvme_softc * sc,bus_size_t r,u_int64_t v)179282c0692Sdlg nvme_write8(struct nvme_softc *sc, bus_size_t r, u_int64_t v)
180282c0692Sdlg {
1817bd1ac27Sdlg 	nvme_write4(sc, r, v);
1827bd1ac27Sdlg 	nvme_write4(sc, r + 4, v >> 32);
183282c0692Sdlg }
184282c0692Sdlg 
185282c0692Sdlg void
nvme_dumpregs(struct nvme_softc * sc)186282c0692Sdlg nvme_dumpregs(struct nvme_softc *sc)
187282c0692Sdlg {
188282c0692Sdlg 	u_int64_t r8;
189282c0692Sdlg 	u_int32_t r4;
190282c0692Sdlg 
191282c0692Sdlg 	r8 = nvme_read8(sc, NVME_CAP);
192282c0692Sdlg 	printf("%s: cap  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_CAP));
193282c0692Sdlg 	printf("%s:  mpsmax %u (%u)\n", DEVNAME(sc),
194282c0692Sdlg 	    (u_int)NVME_CAP_MPSMAX(r8), (1 << NVME_CAP_MPSMAX(r8)));
195282c0692Sdlg 	printf("%s:  mpsmin %u (%u)\n", DEVNAME(sc),
196282c0692Sdlg 	    (u_int)NVME_CAP_MPSMIN(r8), (1 << NVME_CAP_MPSMIN(r8)));
197282c0692Sdlg 	printf("%s:  css %llu\n", DEVNAME(sc), NVME_CAP_CSS(r8));
198282c0692Sdlg 	printf("%s:  nssrs %llu\n", DEVNAME(sc), NVME_CAP_NSSRS(r8));
199a7f13332Sdlg 	printf("%s:  dstrd %u\n", DEVNAME(sc), NVME_CAP_DSTRD(r8));
200282c0692Sdlg 	printf("%s:  to %llu msec\n", DEVNAME(sc), NVME_CAP_TO(r8));
201282c0692Sdlg 	printf("%s:  ams %llu\n", DEVNAME(sc), NVME_CAP_AMS(r8));
202282c0692Sdlg 	printf("%s:  cqr %llu\n", DEVNAME(sc), NVME_CAP_CQR(r8));
203282c0692Sdlg 	printf("%s:  mqes %llu\n", DEVNAME(sc), NVME_CAP_MQES(r8));
204282c0692Sdlg 
205a7f13332Sdlg 	printf("%s: vs   0x%04x\n", DEVNAME(sc), nvme_read4(sc, NVME_VS));
206282c0692Sdlg 
207282c0692Sdlg 	r4 = nvme_read4(sc, NVME_CC);
208a7f13332Sdlg 	printf("%s: cc   0x%04x\n", DEVNAME(sc), r4);
209282c0692Sdlg 	printf("%s:  iocqes %u\n", DEVNAME(sc), NVME_CC_IOCQES_R(r4));
210282c0692Sdlg 	printf("%s:  iosqes %u\n", DEVNAME(sc), NVME_CC_IOSQES_R(r4));
211282c0692Sdlg 	printf("%s:  shn %u\n", DEVNAME(sc), NVME_CC_SHN_R(r4));
212282c0692Sdlg 	printf("%s:  ams %u\n", DEVNAME(sc), NVME_CC_AMS_R(r4));
213282c0692Sdlg 	printf("%s:  mps %u\n", DEVNAME(sc), NVME_CC_MPS_R(r4));
214282c0692Sdlg 	printf("%s:  css %u\n", DEVNAME(sc), NVME_CC_CSS_R(r4));
215282c0692Sdlg 	printf("%s:  en %u\n", DEVNAME(sc), ISSET(r4, NVME_CC_EN));
216282c0692Sdlg 
217a7f13332Sdlg 	printf("%s: csts 0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_CSTS));
218a7f13332Sdlg 	printf("%s: aqa  0x%08x\n", DEVNAME(sc), nvme_read4(sc, NVME_AQA));
219282c0692Sdlg 	printf("%s: asq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ASQ));
220282c0692Sdlg 	printf("%s: acq  0x%016llx\n", DEVNAME(sc), nvme_read8(sc, NVME_ACQ));
221282c0692Sdlg }
222282c0692Sdlg 
223282c0692Sdlg int
nvme_ready(struct nvme_softc * sc,u_int32_t rdy)224282c0692Sdlg nvme_ready(struct nvme_softc *sc, u_int32_t rdy)
225282c0692Sdlg {
2269fb4d6d3Skrw 	u_int i = 0;
227282c0692Sdlg 
2289fb4d6d3Skrw 	while ((nvme_read4(sc, NVME_CSTS) & NVME_CSTS_RDY) != rdy) {
2299fb4d6d3Skrw 		if (i++ > sc->sc_rdy_to)
2309fb4d6d3Skrw 			return (1);
231282c0692Sdlg 
232282c0692Sdlg 		delay(1000);
233282c0692Sdlg 		nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
234282c0692Sdlg 	}
235282c0692Sdlg 
2369fb4d6d3Skrw 	return (0);
237282c0692Sdlg }
238282c0692Sdlg 
239282c0692Sdlg int
nvme_enable(struct nvme_softc * sc)240ee8b2d53Skrw nvme_enable(struct nvme_softc *sc)
241282c0692Sdlg {
2429fb4d6d3Skrw 	u_int32_t cc;
243282c0692Sdlg 
244282c0692Sdlg 	cc = nvme_read4(sc, NVME_CC);
2459fb4d6d3Skrw 	if (ISSET(cc, NVME_CC_EN))
246282c0692Sdlg 		return (nvme_ready(sc, NVME_CSTS_RDY));
247282c0692Sdlg 
2487599295eSdlg 	if (sc->sc_ops->op_enable != NULL)
2497599295eSdlg 		sc->sc_ops->op_enable(sc);
2507599295eSdlg 
25183d8579cSdlg 	nvme_write4(sc, NVME_AQA, NVME_AQA_ACQS(sc->sc_admin_q->q_entries) |
25283d8579cSdlg 	    NVME_AQA_ASQS(sc->sc_admin_q->q_entries));
25383d8579cSdlg 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
25483d8579cSdlg 
255282c0692Sdlg 	nvme_write8(sc, NVME_ASQ, NVME_DMA_DVA(sc->sc_admin_q->q_sq_dmamem));
256282c0692Sdlg 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
257282c0692Sdlg 	nvme_write8(sc, NVME_ACQ, NVME_DMA_DVA(sc->sc_admin_q->q_cq_dmamem));
258282c0692Sdlg 	nvme_barrier(sc, 0, sc->sc_ios, BUS_SPACE_BARRIER_WRITE);
259282c0692Sdlg 
260282c0692Sdlg 	CLR(cc, NVME_CC_IOCQES_MASK | NVME_CC_IOSQES_MASK | NVME_CC_SHN_MASK |
261282c0692Sdlg 	    NVME_CC_AMS_MASK | NVME_CC_MPS_MASK | NVME_CC_CSS_MASK);
2620f19ad86Skrw 	SET(cc, NVME_CC_IOSQES(6));	/* Submission queue size == 2**6 (64) */
2630f19ad86Skrw 	SET(cc, NVME_CC_IOCQES(4));	/* Completion queue size == 2**4 (16) */
264282c0692Sdlg 	SET(cc, NVME_CC_SHN(NVME_CC_SHN_NONE));
265282c0692Sdlg 	SET(cc, NVME_CC_CSS(NVME_CC_CSS_NVM));
266282c0692Sdlg 	SET(cc, NVME_CC_AMS(NVME_CC_AMS_RR));
2670f19ad86Skrw 	SET(cc, NVME_CC_MPS(ffs(sc->sc_mps) - 1));
268282c0692Sdlg 	SET(cc, NVME_CC_EN);
269282c0692Sdlg 
270282c0692Sdlg 	nvme_write4(sc, NVME_CC, cc);
271282c0692Sdlg 	nvme_barrier(sc, 0, sc->sc_ios,
272282c0692Sdlg 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
273282c0692Sdlg 
27483d8579cSdlg 	return (nvme_ready(sc, NVME_CSTS_RDY));
275282c0692Sdlg }
276282c0692Sdlg 
277282c0692Sdlg int
nvme_disable(struct nvme_softc * sc)278282c0692Sdlg nvme_disable(struct nvme_softc *sc)
279282c0692Sdlg {
280282c0692Sdlg 	u_int32_t cc, csts;
281282c0692Sdlg 
282282c0692Sdlg 	cc = nvme_read4(sc, NVME_CC);
2839fb4d6d3Skrw 	if (ISSET(cc, NVME_CC_EN)) {
284282c0692Sdlg 		csts = nvme_read4(sc, NVME_CSTS);
2859fb4d6d3Skrw 		if (!ISSET(csts, NVME_CSTS_CFS) &&
2869fb4d6d3Skrw 		    nvme_ready(sc, NVME_CSTS_RDY) != 0)
287282c0692Sdlg 			return (1);
288282c0692Sdlg 	}
289282c0692Sdlg 
290282c0692Sdlg 	CLR(cc, NVME_CC_EN);
291282c0692Sdlg 
292282c0692Sdlg 	nvme_write4(sc, NVME_CC, cc);
293282c0692Sdlg 	nvme_barrier(sc, 0, sc->sc_ios,
294282c0692Sdlg 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
295282c0692Sdlg 
296282c0692Sdlg 	return (nvme_ready(sc, 0));
297282c0692Sdlg }
298282c0692Sdlg 
299282c0692Sdlg int
nvme_attach(struct nvme_softc * sc)300282c0692Sdlg nvme_attach(struct nvme_softc *sc)
301282c0692Sdlg {
30201b059acSdlg 	struct scsibus_attach_args saa;
303282c0692Sdlg 	u_int64_t cap;
304282c0692Sdlg 	u_int32_t reg;
305a1141c40Stedu 	u_int nccbs = 0;
306282c0692Sdlg 
3073f4555f0Sdlg 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
3087f4636ceSkrw 	rw_init(&sc->sc_lock, "nvme_lock");
3093f4555f0Sdlg 	SIMPLEQ_INIT(&sc->sc_ccb_list);
3103f4555f0Sdlg 	scsi_iopool_init(&sc->sc_iopool, sc, nvme_ccb_get, nvme_ccb_put);
3117599295eSdlg 	if (sc->sc_ops == NULL)
3127599295eSdlg 		sc->sc_ops = &nvme_ops;
313e9317b4bSkettenis 	if (sc->sc_openings == 0)
314e9317b4bSkettenis 		sc->sc_openings = 64;
3153f4555f0Sdlg 
316282c0692Sdlg 	reg = nvme_read4(sc, NVME_VS);
31798f39564Skrw 	if (reg == 0xffffffff) {
3187317d3e7Sdlg 		printf("invalid mapping\n");
319282c0692Sdlg 		return (1);
320282c0692Sdlg 	}
321282c0692Sdlg 
3227317d3e7Sdlg 	printf("NVMe %d.%d\n", NVME_VS_MJR(reg), NVME_VS_MNR(reg));
323282c0692Sdlg 
324282c0692Sdlg 	cap = nvme_read8(sc, NVME_CAP);
325aa5ddcf9Ssf 	sc->sc_dstrd = NVME_CAP_DSTRD(cap);
3261be1268fSdlg 	if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
3271be1268fSdlg 		printf("%s: NVMe minimum page size %u "
3281be1268fSdlg 		    "is greater than CPU page size %u\n", DEVNAME(sc),
3291be1268fSdlg 		    1 << NVME_CAP_MPSMIN(cap), 1 << PAGE_SHIFT);
3301be1268fSdlg 		return (1);
3311be1268fSdlg 	}
33294a36263Skmos 	if (NVME_CAP_MPSMAX(cap) < PAGE_SHIFT)
33394a36263Skmos 		sc->sc_mps = 1 << NVME_CAP_MPSMAX(cap);
33494a36263Skmos 	else
33594a36263Skmos 		sc->sc_mps = 1 << PAGE_SHIFT;
336282c0692Sdlg 
337282c0692Sdlg 	sc->sc_rdy_to = NVME_CAP_TO(cap);
338448b3c09Sdlg 	sc->sc_mdts = MAXPHYS;
339a8751b7cSkrw 	sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps;
3403f4555f0Sdlg 
3413f4555f0Sdlg 	if (nvme_disable(sc) != 0) {
3423f4555f0Sdlg 		printf("%s: unable to disable controller\n", DEVNAME(sc));
3433f4555f0Sdlg 		return (1);
3443f4555f0Sdlg 	}
345282c0692Sdlg 
346aa5ddcf9Ssf 	sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, 128, sc->sc_dstrd);
347282c0692Sdlg 	if (sc->sc_admin_q == NULL) {
348282c0692Sdlg 		printf("%s: unable to allocate admin queue\n", DEVNAME(sc));
349282c0692Sdlg 		return (1);
350282c0692Sdlg 	}
351282c0692Sdlg 
352448b3c09Sdlg 	if (nvme_ccbs_alloc(sc, 16) != 0) {
353448b3c09Sdlg 		printf("%s: unable to allocate initial ccbs\n", DEVNAME(sc));
354282c0692Sdlg 		goto free_admin_q;
355282c0692Sdlg 	}
356a1141c40Stedu 	nccbs = 16;
357282c0692Sdlg 
358ee8b2d53Skrw 	if (nvme_enable(sc) != 0) {
359448b3c09Sdlg 		printf("%s: unable to enable controller\n", DEVNAME(sc));
360448b3c09Sdlg 		goto free_ccbs;
361448b3c09Sdlg 	}
362448b3c09Sdlg 
36388aa9192Sdlg 	if (nvme_identify(sc, NVME_CAP_MPSMIN(cap)) != 0) {
364448b3c09Sdlg 		printf("%s: unable to identify controller\n", DEVNAME(sc));
365448b3c09Sdlg 		goto disable;
366448b3c09Sdlg 	}
367282c0692Sdlg 
368a8751b7cSkrw 	/* We now know the real values of sc_mdts and sc_max_prpl. */
369a1141c40Stedu 	nvme_ccbs_free(sc, nccbs);
3701926545cSdlg 	if (nvme_ccbs_alloc(sc, 64) != 0) {
3711926545cSdlg 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
3721926545cSdlg 		goto free_admin_q;
3731926545cSdlg 	}
374a1141c40Stedu 	nccbs = 64;
3751926545cSdlg 
37603d86467Sjmatthew 	sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
37760c144b3Sdlg 	if (sc->sc_q == NULL) {
37860c144b3Sdlg 		printf("%s: unable to allocate io q\n", DEVNAME(sc));
37960c144b3Sdlg 		goto disable;
38060c144b3Sdlg 	}
38160c144b3Sdlg 
38260c144b3Sdlg 	if (nvme_q_create(sc, sc->sc_q) != 0) {
38360c144b3Sdlg 		printf("%s: unable to create io q\n", DEVNAME(sc));
38460c144b3Sdlg 		goto free_q;
38560c144b3Sdlg 	}
38660c144b3Sdlg 
3874d4525deSkettenis #ifdef HIBERNATE
38803d86467Sjmatthew 	sc->sc_hib_q = nvme_q_alloc(sc, NVME_HIB_Q, 4, sc->sc_dstrd);
38903d86467Sjmatthew 	if (sc->sc_hib_q == NULL) {
39003d86467Sjmatthew 		printf("%s: unable to allocate hibernate io queue\n", DEVNAME(sc));
39189715fd5Sjsg 		goto free_q;
39203d86467Sjmatthew 	}
3934d4525deSkettenis #endif
39403d86467Sjmatthew 
395ffeaebd1Sdlg 	nvme_write4(sc, NVME_INTMC, 1);
396ffeaebd1Sdlg 
397397f5692Skettenis 	sc->sc_namespaces = mallocarray(sc->sc_nn + 1,
398397f5692Skettenis 	    sizeof(*sc->sc_namespaces), M_DEVBUF, M_WAITOK|M_ZERO);
399bc407f60Sdlg 
400ead808c4Skrw 	saa.saa_adapter = &nvme_switch;
401ead808c4Skrw 	saa.saa_adapter_softc = sc;
402ead808c4Skrw 	saa.saa_adapter_buswidth = sc->sc_nn + 1;
403ead808c4Skrw 	saa.saa_luns = 1;
404ead808c4Skrw 	saa.saa_adapter_target = 0;
405e9317b4bSkettenis 	saa.saa_openings = sc->sc_openings;
406e5eae15dSkrw 	saa.saa_pool = &sc->sc_iopool;
407e5eae15dSkrw 	saa.saa_quirks = saa.saa_flags = 0;
408e5eae15dSkrw 	saa.saa_wwpn = saa.saa_wwnn = 0;
40901b059acSdlg 
4107f4636ceSkrw 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
4117f4636ceSkrw 	    &saa, scsiprint);
4127f4636ceSkrw #if NBIO > 0
4137f4636ceSkrw 	if (bio_register(&sc->sc_dev, nvme_bioctl) != 0)
4147f4636ceSkrw 		printf("%s: unable to register bioctl\n", DEVNAME(sc));
4157f4636ceSkrw #endif	/* NBIO > 0 */
41601b059acSdlg 
417282c0692Sdlg 	return (0);
418282c0692Sdlg 
41960c144b3Sdlg free_q:
42060c144b3Sdlg 	nvme_q_free(sc, sc->sc_q);
421448b3c09Sdlg disable:
422448b3c09Sdlg 	nvme_disable(sc);
423448b3c09Sdlg free_ccbs:
424a1141c40Stedu 	nvme_ccbs_free(sc, nccbs);
425282c0692Sdlg free_admin_q:
426448b3c09Sdlg 	nvme_q_free(sc, sc->sc_admin_q);
427282c0692Sdlg 
428282c0692Sdlg 	return (1);
429282c0692Sdlg }
430282c0692Sdlg 
43111624e4cSdlg int
nvme_resume(struct nvme_softc * sc)432aa5ddcf9Ssf nvme_resume(struct nvme_softc *sc)
433aa5ddcf9Ssf {
434aa5ddcf9Ssf 	if (nvme_disable(sc) != 0) {
435aa5ddcf9Ssf 		printf("%s: unable to disable controller\n", DEVNAME(sc));
436aa5ddcf9Ssf 		return (1);
437aa5ddcf9Ssf 	}
438aa5ddcf9Ssf 
439aa5ddcf9Ssf 	if (nvme_q_reset(sc, sc->sc_admin_q) != 0) {
440aa5ddcf9Ssf 		printf("%s: unable to reset admin queue\n", DEVNAME(sc));
441aa5ddcf9Ssf 		return (1);
442aa5ddcf9Ssf 	}
443aa5ddcf9Ssf 
444ee8b2d53Skrw 	if (nvme_enable(sc) != 0) {
445aa5ddcf9Ssf 		printf("%s: unable to enable controller\n", DEVNAME(sc));
446aa5ddcf9Ssf 		return (1);
447aa5ddcf9Ssf 	}
448aa5ddcf9Ssf 
44903d86467Sjmatthew 	sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
450aa5ddcf9Ssf 	if (sc->sc_q == NULL) {
451aa5ddcf9Ssf 		printf("%s: unable to allocate io q\n", DEVNAME(sc));
452aa5ddcf9Ssf 		goto disable;
453aa5ddcf9Ssf 	}
454aa5ddcf9Ssf 
455aa5ddcf9Ssf 	if (nvme_q_create(sc, sc->sc_q) != 0) {
456aa5ddcf9Ssf 		printf("%s: unable to create io q\n", DEVNAME(sc));
457aa5ddcf9Ssf 		goto free_q;
458aa5ddcf9Ssf 	}
459aa5ddcf9Ssf 
460aa5ddcf9Ssf 	nvme_write4(sc, NVME_INTMC, 1);
461aa5ddcf9Ssf 
462aa5ddcf9Ssf 	return (0);
463aa5ddcf9Ssf 
464aa5ddcf9Ssf free_q:
465aa5ddcf9Ssf 	nvme_q_free(sc, sc->sc_q);
466aa5ddcf9Ssf disable:
467aa5ddcf9Ssf 	nvme_disable(sc);
468aa5ddcf9Ssf 
469aa5ddcf9Ssf 	return (1);
470aa5ddcf9Ssf }
471aa5ddcf9Ssf 
472aa5ddcf9Ssf int
nvme_scsi_probe(struct scsi_link * link)47311624e4cSdlg nvme_scsi_probe(struct scsi_link *link)
47411624e4cSdlg {
4750b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
47665dd51f8Sdlg 	struct nvme_sqe sqe;
47765dd51f8Sdlg 	struct nvm_identify_namespace *identify;
47865dd51f8Sdlg 	struct nvme_dmamem *mem;
47965dd51f8Sdlg 	struct nvme_ccb *ccb;
48065dd51f8Sdlg 	int rv;
48165dd51f8Sdlg 
48265dd51f8Sdlg 	ccb = scsi_io_get(&sc->sc_iopool, 0);
48365dd51f8Sdlg 	KASSERT(ccb != NULL);
48465dd51f8Sdlg 
48565dd51f8Sdlg 	mem = nvme_dmamem_alloc(sc, sizeof(*identify));
48665dd51f8Sdlg 	if (mem == NULL)
48765dd51f8Sdlg 		return (ENOMEM);
48865dd51f8Sdlg 
48965dd51f8Sdlg 	memset(&sqe, 0, sizeof(sqe));
49065dd51f8Sdlg 	sqe.opcode = NVM_ADMIN_IDENTIFY;
491397f5692Skettenis 	htolem32(&sqe.nsid, link->target);
49265dd51f8Sdlg 	htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
49365dd51f8Sdlg 	htolem32(&sqe.cdw10, 0);
49465dd51f8Sdlg 
49565dd51f8Sdlg 	ccb->ccb_done = nvme_empty_done;
49665dd51f8Sdlg 	ccb->ccb_cookie = &sqe;
49765dd51f8Sdlg 
49865dd51f8Sdlg 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
49954904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
50065dd51f8Sdlg 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
50165dd51f8Sdlg 
50265dd51f8Sdlg 	scsi_io_put(&sc->sc_iopool, ccb);
50365dd51f8Sdlg 
5043f48a60eSkrw 	identify = NVME_DMA_KVA(mem);
505badf3925Sjan 	if (rv == 0) {
506daef0c50Skrw 		if (nvme_scsi_size(identify) > 0) {
5073f48a60eSkrw 			/* Commit namespace if it has a size greater than zero. */
5083f48a60eSkrw 			identify = malloc(sizeof(*identify), M_DEVBUF, M_WAITOK);
5093f48a60eSkrw 			memcpy(identify, NVME_DMA_KVA(mem), sizeof(*identify));
5103f48a60eSkrw 			sc->sc_namespaces[link->target].ident = identify;
511badf3925Sjan 		} else {
512badf3925Sjan 			/* Don't attach a namespace if its size is zero. */
513badf3925Sjan 			rv = ENXIO;
514badf3925Sjan 		}
51565dd51f8Sdlg 	}
51665dd51f8Sdlg 
51765dd51f8Sdlg 	nvme_dmamem_free(sc, mem);
51865dd51f8Sdlg 
51965dd51f8Sdlg 	return (rv);
52011624e4cSdlg }
52111624e4cSdlg 
5225313ab17Sdlg int
nvme_shutdown(struct nvme_softc * sc)5235313ab17Sdlg nvme_shutdown(struct nvme_softc *sc)
5245313ab17Sdlg {
5255313ab17Sdlg 	u_int32_t cc, csts;
5265313ab17Sdlg 	int i;
5275313ab17Sdlg 
5285313ab17Sdlg 	nvme_write4(sc, NVME_INTMC, 0);
5295313ab17Sdlg 
5305313ab17Sdlg 	if (nvme_q_delete(sc, sc->sc_q) != 0) {
5315313ab17Sdlg 		printf("%s: unable to delete q, disabling\n", DEVNAME(sc));
5325313ab17Sdlg 		goto disable;
5335313ab17Sdlg 	}
5345313ab17Sdlg 
5355313ab17Sdlg 	cc = nvme_read4(sc, NVME_CC);
5365313ab17Sdlg 	CLR(cc, NVME_CC_SHN_MASK);
5375313ab17Sdlg 	SET(cc, NVME_CC_SHN(NVME_CC_SHN_NORMAL));
5385313ab17Sdlg 	nvme_write4(sc, NVME_CC, cc);
5395313ab17Sdlg 
5405313ab17Sdlg 	for (i = 0; i < 4000; i++) {
5415313ab17Sdlg 		nvme_barrier(sc, 0, sc->sc_ios,
5425313ab17Sdlg 		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
5435313ab17Sdlg 		csts = nvme_read4(sc, NVME_CSTS);
5445313ab17Sdlg 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_DONE)
5455313ab17Sdlg 			return (0);
5465313ab17Sdlg 
5475313ab17Sdlg 		delay(1000);
5485313ab17Sdlg 	}
5495313ab17Sdlg 
5507f0b5f10Sderaadt 	printf("%s: unable to shutdown, disabling\n", DEVNAME(sc));
5515313ab17Sdlg 
5525313ab17Sdlg disable:
5535313ab17Sdlg 	nvme_disable(sc);
5545313ab17Sdlg 	return (0);
5555313ab17Sdlg }
5565313ab17Sdlg 
5575313ab17Sdlg int
nvme_activate(struct nvme_softc * sc,int act)5585313ab17Sdlg nvme_activate(struct nvme_softc *sc, int act)
5595313ab17Sdlg {
5605313ab17Sdlg 	int rv;
5615313ab17Sdlg 
5625313ab17Sdlg 	switch (act) {
5635313ab17Sdlg 	case DVACT_POWERDOWN:
5645313ab17Sdlg 		rv = config_activate_children(&sc->sc_dev, act);
5655313ab17Sdlg 		nvme_shutdown(sc);
5665313ab17Sdlg 		break;
567aa5ddcf9Ssf 	case DVACT_RESUME:
568aa5ddcf9Ssf 		rv = nvme_resume(sc);
569aa5ddcf9Ssf 		if (rv == 0)
570aa5ddcf9Ssf 			rv = config_activate_children(&sc->sc_dev, act);
571aa5ddcf9Ssf 		break;
5725313ab17Sdlg 	default:
5735313ab17Sdlg 		rv = config_activate_children(&sc->sc_dev, act);
5745313ab17Sdlg 		break;
5755313ab17Sdlg 	}
5765313ab17Sdlg 
5775313ab17Sdlg 	return (rv);
5785313ab17Sdlg }
5795313ab17Sdlg 
58011624e4cSdlg void
nvme_scsi_cmd(struct scsi_xfer * xs)58111624e4cSdlg nvme_scsi_cmd(struct scsi_xfer *xs)
58211624e4cSdlg {
583664c6166Skrw 	switch (xs->cmd.opcode) {
584a1e87500Sdlg 	case READ_COMMAND:
585eccd596dSkrw 	case READ_10:
586a1e87500Sdlg 	case READ_12:
587a1e87500Sdlg 	case READ_16:
5881eef25a1Sdlg 		nvme_scsi_io(xs, SCSI_DATA_IN);
589a1e87500Sdlg 		return;
590a1e87500Sdlg 	case WRITE_COMMAND:
591eccd596dSkrw 	case WRITE_10:
592a1e87500Sdlg 	case WRITE_12:
593a1e87500Sdlg 	case WRITE_16:
5941eef25a1Sdlg 		nvme_scsi_io(xs, SCSI_DATA_OUT);
595d3f19a0bSdlg 		return;
596a1e87500Sdlg 
597689c2c68Sdlg 	case SYNCHRONIZE_CACHE:
598689c2c68Sdlg 		nvme_scsi_sync(xs);
599689c2c68Sdlg 		return;
600689c2c68Sdlg 
6019856392eSdlg 	case INQUIRY:
6029856392eSdlg 		nvme_scsi_inq(xs);
6039856392eSdlg 		return;
604c947e044Sdlg 	case READ_CAPACITY_16:
605c947e044Sdlg 		nvme_scsi_capacity16(xs);
606c947e044Sdlg 		return;
607c947e044Sdlg 	case READ_CAPACITY:
608c947e044Sdlg 		nvme_scsi_capacity(xs);
609c947e044Sdlg 		return;
610c947e044Sdlg 
611a50b57eaSdlg 	case TEST_UNIT_READY:
612a50b57eaSdlg 	case PREVENT_ALLOW:
613a50b57eaSdlg 	case START_STOP:
614a50b57eaSdlg 		xs->error = XS_NOERROR;
615a50b57eaSdlg 		scsi_done(xs);
616a50b57eaSdlg 		return;
617a50b57eaSdlg 
6189856392eSdlg 	default:
6199856392eSdlg 		break;
6209856392eSdlg 	}
6219856392eSdlg 
62211624e4cSdlg 	xs->error = XS_DRIVER_STUFFUP;
62311624e4cSdlg 	scsi_done(xs);
62411624e4cSdlg }
62511624e4cSdlg 
62611624e4cSdlg void
nvme_minphys(struct buf * bp,struct scsi_link * link)627767e8532Skrw nvme_minphys(struct buf *bp, struct scsi_link *link)
628767e8532Skrw {
6290b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
630767e8532Skrw 
631767e8532Skrw 	if (bp->b_bcount > sc->sc_mdts)
632767e8532Skrw 		bp->b_bcount = sc->sc_mdts;
633767e8532Skrw }
634767e8532Skrw 
635767e8532Skrw void
nvme_scsi_io(struct scsi_xfer * xs,int dir)6361eef25a1Sdlg nvme_scsi_io(struct scsi_xfer *xs, int dir)
637a1e87500Sdlg {
638a1e87500Sdlg 	struct scsi_link *link = xs->sc_link;
6390b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
640a1e87500Sdlg 	struct nvme_ccb *ccb = xs->io;
641a1e87500Sdlg 	bus_dmamap_t dmap = ccb->ccb_dmamap;
642ecfae151Sdlg 	int i;
643a1e87500Sdlg 
6441eef25a1Sdlg 	if ((xs->flags & (SCSI_DATA_IN|SCSI_DATA_OUT)) != dir)
6451eef25a1Sdlg 		goto stuffup;
6461eef25a1Sdlg 
647a1e87500Sdlg 	ccb->ccb_done = nvme_scsi_io_done;
648a1e87500Sdlg 	ccb->ccb_cookie = xs;
649a1e87500Sdlg 
650a1e87500Sdlg 	if (bus_dmamap_load(sc->sc_dmat, dmap,
651a1e87500Sdlg 	    xs->data, xs->datalen, NULL, ISSET(xs->flags, SCSI_NOSLEEP) ?
652a1e87500Sdlg 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) != 0)
653a1e87500Sdlg 		goto stuffup;
654a1e87500Sdlg 
655a1e87500Sdlg 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
656a1e87500Sdlg 	    ISSET(xs->flags, SCSI_DATA_IN) ?
657a1e87500Sdlg 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
658a1e87500Sdlg 
659ecfae151Sdlg 	if (dmap->dm_nsegs > 2) {
660ecfae151Sdlg 		for (i = 1; i < dmap->dm_nsegs; i++) {
661ecfae151Sdlg 			htolem64(&ccb->ccb_prpl[i - 1],
662ecfae151Sdlg 			    dmap->dm_segs[i].ds_addr);
663ecfae151Sdlg 		}
664ecfae151Sdlg 		bus_dmamap_sync(sc->sc_dmat,
665ecfae151Sdlg 		    NVME_DMA_MAP(sc->sc_ccb_prpls),
666ecfae151Sdlg 		    ccb->ccb_prpl_off,
667e4727810Spatrick 		    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
668ecfae151Sdlg 		    BUS_DMASYNC_PREWRITE);
669ecfae151Sdlg 	}
670ecfae151Sdlg 
671a1e87500Sdlg 	if (ISSET(xs->flags, SCSI_POLL)) {
67254904088Skrw 		nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill, xs->timeout);
673a1e87500Sdlg 		return;
674a1e87500Sdlg 	}
675a1e87500Sdlg 
6761eef25a1Sdlg 	nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_io_fill);
677a1e87500Sdlg 	return;
678a1e87500Sdlg 
679a1e87500Sdlg stuffup:
680a1e87500Sdlg 	xs->error = XS_DRIVER_STUFFUP;
681a1e87500Sdlg 	scsi_done(xs);
682a1e87500Sdlg }
683a1e87500Sdlg 
684a1e87500Sdlg void
nvme_scsi_io_fill(struct nvme_softc * sc,struct nvme_ccb * ccb,void * slot)6851eef25a1Sdlg nvme_scsi_io_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
686a1e87500Sdlg {
687a1e87500Sdlg 	struct nvme_sqe_io *sqe = slot;
688a1e87500Sdlg 	struct scsi_xfer *xs = ccb->ccb_cookie;
689a1e87500Sdlg 	struct scsi_link *link = xs->sc_link;
690a1e87500Sdlg 	bus_dmamap_t dmap = ccb->ccb_dmamap;
691a1e87500Sdlg 	u_int64_t lba;
692a1e87500Sdlg 	u_int32_t blocks;
693a1e87500Sdlg 
694664c6166Skrw 	scsi_cmd_rw_decode(&xs->cmd, &lba, &blocks);
695a1e87500Sdlg 
6961eef25a1Sdlg 	sqe->opcode = ISSET(xs->flags, SCSI_DATA_IN) ?
6971eef25a1Sdlg 	    NVM_CMD_READ : NVM_CMD_WRITE;
698397f5692Skettenis 	htolem32(&sqe->nsid, link->target);
699a1e87500Sdlg 
700a1e87500Sdlg 	htolem64(&sqe->entry.prp[0], dmap->dm_segs[0].ds_addr);
701a1e87500Sdlg 	switch (dmap->dm_nsegs) {
702a1e87500Sdlg 	case 1:
703a1e87500Sdlg 		break;
704a1e87500Sdlg 	case 2:
705a1e87500Sdlg 		htolem64(&sqe->entry.prp[1], dmap->dm_segs[1].ds_addr);
706a1e87500Sdlg 		break;
707a1e87500Sdlg 	default:
708ecfae151Sdlg 		/* the prp list is already set up and synced */
709ecfae151Sdlg 		htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
710ecfae151Sdlg 		break;
711a1e87500Sdlg 	}
712a1e87500Sdlg 
713a1e87500Sdlg 	htolem64(&sqe->slba, lba);
714a1e87500Sdlg 	htolem16(&sqe->nlb, blocks - 1);
715a1e87500Sdlg }
716a1e87500Sdlg 
717a1e87500Sdlg void
nvme_scsi_io_done(struct nvme_softc * sc,struct nvme_ccb * ccb,struct nvme_cqe * cqe)718a1e87500Sdlg nvme_scsi_io_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
719a1e87500Sdlg     struct nvme_cqe *cqe)
720a1e87500Sdlg {
721a1e87500Sdlg 	struct scsi_xfer *xs = ccb->ccb_cookie;
722a1e87500Sdlg 	bus_dmamap_t dmap = ccb->ccb_dmamap;
723a1e87500Sdlg 	u_int16_t flags;
724a1e87500Sdlg 
725ecfae151Sdlg 	if (dmap->dm_nsegs > 2) {
726ecfae151Sdlg 		bus_dmamap_sync(sc->sc_dmat,
727ecfae151Sdlg 		    NVME_DMA_MAP(sc->sc_ccb_prpls),
728ecfae151Sdlg 		    ccb->ccb_prpl_off,
729e4727810Spatrick 		    sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
730ecfae151Sdlg 		    BUS_DMASYNC_POSTWRITE);
731ecfae151Sdlg 	}
732ecfae151Sdlg 
733a1e87500Sdlg 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
734a1e87500Sdlg 	    ISSET(xs->flags, SCSI_DATA_IN) ?
735a1e87500Sdlg 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
736a1e87500Sdlg 
737a1e87500Sdlg 	bus_dmamap_unload(sc->sc_dmat, dmap);
738a1e87500Sdlg 
739a1e87500Sdlg 	flags = lemtoh16(&cqe->flags);
740a1e87500Sdlg 
741a1e87500Sdlg 	xs->error = (NVME_CQE_SC(flags) == NVME_CQE_SC_SUCCESS) ?
742a1e87500Sdlg 	    XS_NOERROR : XS_DRIVER_STUFFUP;
7438cfbc7f4Sdlg 	xs->status = SCSI_OK;
744a1e87500Sdlg 	xs->resid = 0;
745a1e87500Sdlg 	scsi_done(xs);
746a1e87500Sdlg }
747a1e87500Sdlg 
748a1e87500Sdlg void
nvme_scsi_sync(struct scsi_xfer * xs)749689c2c68Sdlg nvme_scsi_sync(struct scsi_xfer *xs)
750689c2c68Sdlg {
751689c2c68Sdlg 	struct scsi_link *link = xs->sc_link;
7520b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
753689c2c68Sdlg 	struct nvme_ccb *ccb = xs->io;
754689c2c68Sdlg 
755689c2c68Sdlg 	ccb->ccb_done = nvme_scsi_sync_done;
756689c2c68Sdlg 	ccb->ccb_cookie = xs;
757689c2c68Sdlg 
758689c2c68Sdlg 	if (ISSET(xs->flags, SCSI_POLL)) {
75954904088Skrw 		nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill, xs->timeout);
760689c2c68Sdlg 		return;
761689c2c68Sdlg 	}
762689c2c68Sdlg 
763689c2c68Sdlg 	nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_sync_fill);
764689c2c68Sdlg }
765689c2c68Sdlg 
766689c2c68Sdlg void
nvme_scsi_sync_fill(struct nvme_softc * sc,struct nvme_ccb * ccb,void * slot)767689c2c68Sdlg nvme_scsi_sync_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
768689c2c68Sdlg {
769689c2c68Sdlg 	struct nvme_sqe *sqe = slot;
770689c2c68Sdlg 	struct scsi_xfer *xs = ccb->ccb_cookie;
771689c2c68Sdlg 	struct scsi_link *link = xs->sc_link;
772689c2c68Sdlg 
773689c2c68Sdlg 	sqe->opcode = NVM_CMD_FLUSH;
774397f5692Skettenis 	htolem32(&sqe->nsid, link->target);
775689c2c68Sdlg }
776689c2c68Sdlg 
777689c2c68Sdlg void
nvme_scsi_sync_done(struct nvme_softc * sc,struct nvme_ccb * ccb,struct nvme_cqe * cqe)778689c2c68Sdlg nvme_scsi_sync_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
779689c2c68Sdlg     struct nvme_cqe *cqe)
780689c2c68Sdlg {
781689c2c68Sdlg 	struct scsi_xfer *xs = ccb->ccb_cookie;
782689c2c68Sdlg 	u_int16_t flags;
783689c2c68Sdlg 
784689c2c68Sdlg 	flags = lemtoh16(&cqe->flags);
785689c2c68Sdlg 
786689c2c68Sdlg 	xs->error = (NVME_CQE_SC(flags) == NVME_CQE_SC_SUCCESS) ?
787689c2c68Sdlg 	    XS_NOERROR : XS_DRIVER_STUFFUP;
788689c2c68Sdlg 	xs->status = SCSI_OK;
789689c2c68Sdlg 	xs->resid = 0;
790689c2c68Sdlg 	scsi_done(xs);
791689c2c68Sdlg }
792689c2c68Sdlg 
793689c2c68Sdlg void
nvme_scsi_inq(struct scsi_xfer * xs)7949856392eSdlg nvme_scsi_inq(struct scsi_xfer *xs)
7959856392eSdlg {
796664c6166Skrw 	struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
7979856392eSdlg 
7989856392eSdlg 	if (!ISSET(inq->flags, SI_EVPD)) {
7999856392eSdlg 		nvme_scsi_inquiry(xs);
8009856392eSdlg 		return;
8019856392eSdlg 	}
8029856392eSdlg 
8039856392eSdlg 	switch (inq->pagecode) {
8049856392eSdlg 	default:
8059856392eSdlg 		/* printf("%s: %d\n", __func__, inq->pagecode); */
8069856392eSdlg 		break;
8079856392eSdlg 	}
8089856392eSdlg 
8099856392eSdlg 	xs->error = XS_DRIVER_STUFFUP;
8109856392eSdlg 	scsi_done(xs);
8119856392eSdlg }
8129856392eSdlg 
8139856392eSdlg void
nvme_scsi_inquiry(struct scsi_xfer * xs)8149856392eSdlg nvme_scsi_inquiry(struct scsi_xfer *xs)
8159856392eSdlg {
8169856392eSdlg 	struct scsi_inquiry_data inq;
8179856392eSdlg 	struct scsi_link *link = xs->sc_link;
8180b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
8199856392eSdlg 	struct nvm_identify_namespace *ns;
8209856392eSdlg 
8219856392eSdlg 	ns = sc->sc_namespaces[link->target].ident;
8229856392eSdlg 
8239856392eSdlg 	memset(&inq, 0, sizeof(inq));
8249856392eSdlg 
8259856392eSdlg 	inq.device = T_DIRECT;
826b291b595Skrw 	inq.version = SCSI_REV_SPC4;
827bb4b71ebSkrw 	inq.response_format = SID_SCSI2_RESPONSE;
8283ca8dabfSkrw 	inq.additional_length = SID_SCSI2_ALEN;
8299856392eSdlg 	inq.flags |= SID_CmdQue;
8309856392eSdlg 	memcpy(inq.vendor, "NVMe    ", sizeof(inq.vendor));
8319856392eSdlg 	memcpy(inq.product, sc->sc_identify.mn, sizeof(inq.product));
8329856392eSdlg 	memcpy(inq.revision, sc->sc_identify.fr, sizeof(inq.revision));
8339856392eSdlg 
834a83ec286Skrw 	scsi_copy_internal_data(xs, &inq, sizeof(inq));
8359856392eSdlg 
8369856392eSdlg 	xs->error = XS_NOERROR;
8379856392eSdlg 	scsi_done(xs);
8389856392eSdlg }
8399856392eSdlg 
8409856392eSdlg void
nvme_scsi_capacity16(struct scsi_xfer * xs)841c947e044Sdlg nvme_scsi_capacity16(struct scsi_xfer *xs)
842c947e044Sdlg {
843c947e044Sdlg 	struct scsi_read_cap_data_16 rcd;
844c947e044Sdlg 	struct scsi_link *link = xs->sc_link;
8450b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
846c947e044Sdlg 	struct nvm_identify_namespace *ns;
847c947e044Sdlg 	struct nvm_namespace_format *f;
848daef0c50Skrw 	u_int64_t addr;
849c947e044Sdlg 	u_int16_t tpe = READ_CAP_16_TPE;
850c947e044Sdlg 
851c947e044Sdlg 	ns = sc->sc_namespaces[link->target].ident;
852c947e044Sdlg 
853c947e044Sdlg 	if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
854c947e044Sdlg 		xs->error = XS_DRIVER_STUFFUP;
855c947e044Sdlg 		scsi_done(xs);
856c947e044Sdlg 		return;
857c947e044Sdlg 	}
858c947e044Sdlg 
859daef0c50Skrw 	addr = nvme_scsi_size(ns) - 1;
860c947e044Sdlg 	f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)];
861c947e044Sdlg 
862c947e044Sdlg 	memset(&rcd, 0, sizeof(rcd));
863daef0c50Skrw 	_lto8b(addr, rcd.addr);
864c947e044Sdlg 	_lto4b(1 << f->lbads, rcd.length);
865c947e044Sdlg 	_lto2b(tpe, rcd.lowest_aligned);
866c947e044Sdlg 
867c947e044Sdlg 	memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen));
868c947e044Sdlg 
869c947e044Sdlg 	xs->error = XS_NOERROR;
870c947e044Sdlg 	scsi_done(xs);
871c947e044Sdlg }
872c947e044Sdlg 
873c947e044Sdlg void
nvme_scsi_capacity(struct scsi_xfer * xs)874c947e044Sdlg nvme_scsi_capacity(struct scsi_xfer *xs)
875c947e044Sdlg {
876c947e044Sdlg 	struct scsi_read_cap_data rcd;
877c947e044Sdlg 	struct scsi_link *link = xs->sc_link;
8780b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
879c947e044Sdlg 	struct nvm_identify_namespace *ns;
880c947e044Sdlg 	struct nvm_namespace_format *f;
881daef0c50Skrw 	u_int64_t addr;
882c947e044Sdlg 
883c947e044Sdlg 	ns = sc->sc_namespaces[link->target].ident;
884c947e044Sdlg 
885c947e044Sdlg 	if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
886c947e044Sdlg 		xs->error = XS_DRIVER_STUFFUP;
887c947e044Sdlg 		scsi_done(xs);
888c947e044Sdlg 		return;
889c947e044Sdlg 	}
890c947e044Sdlg 
891daef0c50Skrw 	addr = nvme_scsi_size(ns) - 1;
892daef0c50Skrw 	if (addr > 0xffffffff)
893daef0c50Skrw 		addr = 0xffffffff;
894c947e044Sdlg 
895c947e044Sdlg 	f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)];
896c947e044Sdlg 
897c947e044Sdlg 	memset(&rcd, 0, sizeof(rcd));
898daef0c50Skrw 	_lto4b(addr, rcd.addr);
899c947e044Sdlg 	_lto4b(1 << f->lbads, rcd.length);
900c947e044Sdlg 
901c947e044Sdlg 	memcpy(xs->data, &rcd, MIN(sizeof(rcd), xs->datalen));
902c947e044Sdlg 
903c947e044Sdlg 	xs->error = XS_NOERROR;
904c947e044Sdlg 	scsi_done(xs);
905c947e044Sdlg }
906c947e044Sdlg 
907c947e044Sdlg void
nvme_scsi_free(struct scsi_link * link)90811624e4cSdlg nvme_scsi_free(struct scsi_link *link)
90911624e4cSdlg {
9100b29cb40Skrw 	struct nvme_softc *sc = link->bus->sb_adapter_softc;
91165dd51f8Sdlg 	struct nvm_identify_namespace *identify;
91211624e4cSdlg 
91365dd51f8Sdlg 	identify = sc->sc_namespaces[link->target].ident;
91465dd51f8Sdlg 	sc->sc_namespaces[link->target].ident = NULL;
91565dd51f8Sdlg 
91665dd51f8Sdlg 	free(identify, M_DEVBUF, sizeof(*identify));
91711624e4cSdlg }
91811624e4cSdlg 
919daef0c50Skrw uint64_t
nvme_scsi_size(const struct nvm_identify_namespace * ns)9207f4636ceSkrw nvme_scsi_size(const struct nvm_identify_namespace *ns)
921daef0c50Skrw {
922daef0c50Skrw 	uint64_t		ncap, nsze;
923daef0c50Skrw 
924daef0c50Skrw 	ncap = lemtoh64(&ns->ncap); /* Max allowed allocation. */
925daef0c50Skrw 	nsze = lemtoh64(&ns->nsze);
926daef0c50Skrw 
927daef0c50Skrw 	if ((ns->nsfeat & NVME_ID_NS_NSFEAT_THIN_PROV) && ncap < nsze)
928daef0c50Skrw 		return ncap;
929daef0c50Skrw 	else
930daef0c50Skrw 		return nsze;
931daef0c50Skrw }
932daef0c50Skrw 
9334e9514d6Skrw int
nvme_passthrough_cmd(struct nvme_softc * sc,struct nvme_pt_cmd * pt,int dv_unit,int nsid)9344e9514d6Skrw nvme_passthrough_cmd(struct nvme_softc *sc, struct nvme_pt_cmd *pt, int dv_unit,
9354e9514d6Skrw     int nsid)
9364e9514d6Skrw {
9374e9514d6Skrw 	struct nvme_pt_status		 pt_status;
9384e9514d6Skrw 	struct nvme_sqe			 sqe;
9394e9514d6Skrw 	struct nvme_dmamem		*mem = NULL;
9404e9514d6Skrw 	struct nvme_ccb			*ccb = NULL;
9414e9514d6Skrw 	int				 flags;
9424e9514d6Skrw 	int				 rv = 0;
9434e9514d6Skrw 
9444e9514d6Skrw 	ccb = nvme_ccb_get(sc);
9454e9514d6Skrw 	if (ccb == NULL)
9464e9514d6Skrw 		panic("nvme_passthrough_cmd: nvme_ccb_get returned NULL");
9474e9514d6Skrw 
9484e9514d6Skrw 	memset(&sqe, 0, sizeof(sqe));
9494e9514d6Skrw 	sqe.opcode = pt->pt_opcode;
9504e9514d6Skrw 	htolem32(&sqe.nsid, pt->pt_nsid);
9514e9514d6Skrw 	htolem32(&sqe.cdw10, pt->pt_cdw10);
9524e9514d6Skrw 	htolem32(&sqe.cdw11, pt->pt_cdw11);
9534e9514d6Skrw 	htolem32(&sqe.cdw12, pt->pt_cdw12);
9544e9514d6Skrw 	htolem32(&sqe.cdw13, pt->pt_cdw13);
9554e9514d6Skrw 	htolem32(&sqe.cdw14, pt->pt_cdw14);
9564e9514d6Skrw 	htolem32(&sqe.cdw15, pt->pt_cdw15);
9574e9514d6Skrw 
9584e9514d6Skrw 	ccb->ccb_done = nvme_empty_done;
9594e9514d6Skrw 	ccb->ccb_cookie = &sqe;
9604e9514d6Skrw 
9614e9514d6Skrw 	switch (pt->pt_opcode) {
9624e9514d6Skrw 	case NVM_ADMIN_IDENTIFY:
9634e9514d6Skrw 	case NVM_ADMIN_GET_LOG_PG:
9644e9514d6Skrw 	case NVM_ADMIN_SELFTEST:
9654e9514d6Skrw 		break;
9664e9514d6Skrw 
9674e9514d6Skrw 	default:
9684e9514d6Skrw 		rv = ENOTTY;
9694e9514d6Skrw 		goto done;
9704e9514d6Skrw 	}
9714e9514d6Skrw 
9724e9514d6Skrw 	if (pt->pt_databuflen > 0) {
9734e9514d6Skrw 		mem = nvme_dmamem_alloc(sc, pt->pt_databuflen);
9744e9514d6Skrw 		if (mem == NULL) {
9754e9514d6Skrw 			rv = ENOMEM;
9764e9514d6Skrw 			goto done;
9774e9514d6Skrw 		}
9784e9514d6Skrw 		htolem64(&sqe.entry.prp[0], NVME_DMA_DVA(mem));
9794e9514d6Skrw 		nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
9804e9514d6Skrw 	}
9814e9514d6Skrw 
9821488c4e9Skrw 	flags = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_PT);
9834e9514d6Skrw 
9844e9514d6Skrw 	if (pt->pt_databuflen > 0) {
9854e9514d6Skrw 		nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
9864e9514d6Skrw 		if (flags == 0)
9874e9514d6Skrw 			rv = copyout(NVME_DMA_KVA(mem), pt->pt_databuf,
9884e9514d6Skrw 			    pt->pt_databuflen);
9894e9514d6Skrw 	}
9904e9514d6Skrw 
9914e9514d6Skrw 	if (rv == 0 && pt->pt_statuslen > 0) {
9924e9514d6Skrw 		pt_status.ps_dv_unit = dv_unit;
9934e9514d6Skrw 		pt_status.ps_nsid = nsid;
9944e9514d6Skrw 		pt_status.ps_flags = flags;
9954e9514d6Skrw 		pt_status.ps_cc = nvme_read4(sc, NVME_CC);
9964e9514d6Skrw 		pt_status.ps_csts = nvme_read4(sc, NVME_CSTS);
9974e9514d6Skrw 		rv = copyout(&pt_status, pt->pt_status, pt->pt_statuslen);
9984e9514d6Skrw 	}
9994e9514d6Skrw 
10004e9514d6Skrw  done:
10014e9514d6Skrw 	if (mem)
10024e9514d6Skrw 		nvme_dmamem_free(sc, mem);
10034e9514d6Skrw 	if (ccb)
10044e9514d6Skrw 		nvme_ccb_put(sc, ccb);
10054e9514d6Skrw 
10064e9514d6Skrw 	return rv;
10074e9514d6Skrw }
10084e9514d6Skrw 
10094e9514d6Skrw int
nvme_scsi_ioctl(struct scsi_link * link,u_long cmd,caddr_t addr,int flag)10104e9514d6Skrw nvme_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
10114e9514d6Skrw {
10124e9514d6Skrw 	struct nvme_softc		*sc = link->bus->sb_adapter_softc;
10134e9514d6Skrw 	struct nvme_pt_cmd		*pt = (struct nvme_pt_cmd *)addr;
10144e9514d6Skrw 	int				 rv;
10154e9514d6Skrw 
10164e9514d6Skrw 	switch (cmd) {
10174e9514d6Skrw 	case NVME_PASSTHROUGH_CMD:
10184e9514d6Skrw 		break;
10194e9514d6Skrw 	default:
10204e9514d6Skrw 		return ENOTTY;
10214e9514d6Skrw 	}
10224e9514d6Skrw 
10234e9514d6Skrw 	if ((pt->pt_cdw10 & 0xff) == 0)
10244e9514d6Skrw 		pt->pt_nsid = link->target;
10254e9514d6Skrw 
10264e9514d6Skrw 	rv = nvme_passthrough_cmd(sc, pt, sc->sc_dev.dv_unit, link->target);
10274e9514d6Skrw 	if (rv)
10284e9514d6Skrw 		goto done;
10294e9514d6Skrw 
10304e9514d6Skrw  done:
10314e9514d6Skrw 	return rv;
10324e9514d6Skrw }
10334e9514d6Skrw 
10347599295eSdlg uint32_t
nvme_op_sq_enter(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)10357599295eSdlg nvme_op_sq_enter(struct nvme_softc *sc,
10367599295eSdlg     struct nvme_queue *q, struct nvme_ccb *ccb)
10377599295eSdlg {
10387599295eSdlg 	mtx_enter(&q->q_sq_mtx);
10397599295eSdlg 	return (nvme_op_sq_enter_locked(sc, q, ccb));
10407599295eSdlg }
10417599295eSdlg 
10427599295eSdlg uint32_t
nvme_op_sq_enter_locked(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)10437599295eSdlg nvme_op_sq_enter_locked(struct nvme_softc *sc,
10447599295eSdlg     struct nvme_queue *q, struct nvme_ccb *ccb)
10457599295eSdlg {
10467599295eSdlg 	return (q->q_sq_tail);
10477599295eSdlg }
10487599295eSdlg 
10497599295eSdlg void
nvme_op_sq_leave_locked(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)10507599295eSdlg nvme_op_sq_leave_locked(struct nvme_softc *sc,
10517599295eSdlg     struct nvme_queue *q, struct nvme_ccb *ccb)
10527599295eSdlg {
10537599295eSdlg 	uint32_t tail;
10547599295eSdlg 
10557599295eSdlg 	tail = ++q->q_sq_tail;
10567599295eSdlg 	if (tail >= q->q_entries)
10577599295eSdlg 		tail = 0;
10587599295eSdlg 	q->q_sq_tail = tail;
10597599295eSdlg 	nvme_write4(sc, q->q_sqtdbl, tail);
10607599295eSdlg }
10617599295eSdlg 
10627599295eSdlg void
nvme_op_sq_leave(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)10637599295eSdlg nvme_op_sq_leave(struct nvme_softc *sc,
10647599295eSdlg     struct nvme_queue *q, struct nvme_ccb *ccb)
10657599295eSdlg {
10667599295eSdlg 	nvme_op_sq_leave_locked(sc, q, ccb);
10677599295eSdlg 	mtx_leave(&q->q_sq_mtx);
10687599295eSdlg }
10697599295eSdlg 
1070448b3c09Sdlg void
nvme_q_submit(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb,void (* fill)(struct nvme_softc *,struct nvme_ccb *,void *))1071448b3c09Sdlg nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
1072448b3c09Sdlg     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *))
1073448b3c09Sdlg {
1074448b3c09Sdlg 	struct nvme_sqe *sqe = NVME_DMA_KVA(q->q_sq_dmamem);
1075448b3c09Sdlg 	u_int32_t tail;
1076448b3c09Sdlg 
10777599295eSdlg 	tail = sc->sc_ops->op_sq_enter(sc, q, ccb);
1078448b3c09Sdlg 
1079448b3c09Sdlg 	sqe += tail;
1080448b3c09Sdlg 
1081448b3c09Sdlg 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
1082448b3c09Sdlg 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
1083448b3c09Sdlg 	memset(sqe, 0, sizeof(*sqe));
1084448b3c09Sdlg 	(*fill)(sc, ccb, sqe);
108524aede5fSdlg 	sqe->cid = ccb->ccb_id;
1086448b3c09Sdlg 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
1087448b3c09Sdlg 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
1088448b3c09Sdlg 
10897599295eSdlg 	sc->sc_ops->op_sq_leave(sc, q, ccb);
1090448b3c09Sdlg }
1091448b3c09Sdlg 
1092448b3c09Sdlg struct nvme_poll_state {
1093448b3c09Sdlg 	struct nvme_sqe s;
1094448b3c09Sdlg 	struct nvme_cqe c;
1095448b3c09Sdlg };
1096448b3c09Sdlg 
1097448b3c09Sdlg int
nvme_poll(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb,void (* fill)(struct nvme_softc *,struct nvme_ccb *,void *),u_int32_t ms)1098448b3c09Sdlg nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
109954904088Skrw     void (*fill)(struct nvme_softc *, struct nvme_ccb *, void *), u_int32_t ms)
1100448b3c09Sdlg {
1101448b3c09Sdlg 	struct nvme_poll_state state;
1102448b3c09Sdlg 	void (*done)(struct nvme_softc *, struct nvme_ccb *, struct nvme_cqe *);
1103448b3c09Sdlg 	void *cookie;
110454904088Skrw 	int64_t us;
11059f81ea3bSdlg 	u_int16_t flags;
1106448b3c09Sdlg 
1107448b3c09Sdlg 	memset(&state, 0, sizeof(state));
1108448b3c09Sdlg 	(*fill)(sc, ccb, &state.s);
1109448b3c09Sdlg 
1110448b3c09Sdlg 	done = ccb->ccb_done;
1111448b3c09Sdlg 	cookie = ccb->ccb_cookie;
1112448b3c09Sdlg 
1113448b3c09Sdlg 	ccb->ccb_done = nvme_poll_done;
1114448b3c09Sdlg 	ccb->ccb_cookie = &state;
1115448b3c09Sdlg 
1116448b3c09Sdlg 	nvme_q_submit(sc, q, ccb, nvme_poll_fill);
111754904088Skrw 	for (us = ms * 1000; ms == 0 || us > 0; us -= NVME_TIMO_DELAYNS) {
111854904088Skrw 		if (ISSET(state.c.flags, htole16(NVME_CQE_PHASE)))
111954904088Skrw 			break;
11205fac9627Sdlg 		if (nvme_q_complete(sc, q) == 0)
112154904088Skrw 			delay(NVME_TIMO_DELAYNS);
1122b6ac76ceSkrw 		nvme_barrier(sc, NVME_CSTS, 4, BUS_SPACE_BARRIER_READ);
1123448b3c09Sdlg 	}
1124448b3c09Sdlg 
1125448b3c09Sdlg 	ccb->ccb_cookie = cookie;
1126448b3c09Sdlg 	done(sc, ccb, &state.c);
1127448b3c09Sdlg 
11289f81ea3bSdlg 	flags = lemtoh16(&state.c.flags);
11299f81ea3bSdlg 
1130c05818fdSdlg 	return (flags & ~NVME_CQE_PHASE);
1131448b3c09Sdlg }
1132448b3c09Sdlg 
1133448b3c09Sdlg void
nvme_poll_fill(struct nvme_softc * sc,struct nvme_ccb * ccb,void * slot)1134448b3c09Sdlg nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1135448b3c09Sdlg {
1136448b3c09Sdlg 	struct nvme_sqe *sqe = slot;
1137448b3c09Sdlg 	struct nvme_poll_state *state = ccb->ccb_cookie;
1138448b3c09Sdlg 
1139448b3c09Sdlg 	*sqe = state->s;
1140448b3c09Sdlg }
1141448b3c09Sdlg 
1142448b3c09Sdlg void
nvme_poll_done(struct nvme_softc * sc,struct nvme_ccb * ccb,struct nvme_cqe * cqe)1143448b3c09Sdlg nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
1144448b3c09Sdlg     struct nvme_cqe *cqe)
1145448b3c09Sdlg {
1146448b3c09Sdlg 	struct nvme_poll_state *state = ccb->ccb_cookie;
1147448b3c09Sdlg 
1148448b3c09Sdlg 	state->c = *cqe;
1149300a229bSyasuoka 	SET(state->c.flags, htole16(NVME_CQE_PHASE));
1150448b3c09Sdlg }
1151448b3c09Sdlg 
11529f81ea3bSdlg void
nvme_sqe_fill(struct nvme_softc * sc,struct nvme_ccb * ccb,void * slot)1153ad9e5681Sdlg nvme_sqe_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1154ad9e5681Sdlg {
1155ad9e5681Sdlg 	struct nvme_sqe *src = ccb->ccb_cookie;
1156ad9e5681Sdlg 	struct nvme_sqe *dst = slot;
1157ad9e5681Sdlg 
1158ad9e5681Sdlg 	*dst = *src;
1159ad9e5681Sdlg }
1160ad9e5681Sdlg 
1161ad9e5681Sdlg void
nvme_empty_done(struct nvme_softc * sc,struct nvme_ccb * ccb,struct nvme_cqe * cqe)11629f81ea3bSdlg nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
11639f81ea3bSdlg     struct nvme_cqe *cqe)
11649f81ea3bSdlg {
11659f81ea3bSdlg }
11669f81ea3bSdlg 
11677599295eSdlg void
nvme_op_cq_done(struct nvme_softc * sc,struct nvme_queue * q,struct nvme_ccb * ccb)11687599295eSdlg nvme_op_cq_done(struct nvme_softc *sc,
11697599295eSdlg     struct nvme_queue *q, struct nvme_ccb *ccb)
11707599295eSdlg {
11717599295eSdlg 	/* nop */
11727599295eSdlg }
11737599295eSdlg 
1174448b3c09Sdlg int
nvme_q_complete(struct nvme_softc * sc,struct nvme_queue * q)1175448b3c09Sdlg nvme_q_complete(struct nvme_softc *sc, struct nvme_queue *q)
1176448b3c09Sdlg {
1177448b3c09Sdlg 	struct nvme_ccb *ccb;
1178448b3c09Sdlg 	struct nvme_cqe *ring = NVME_DMA_KVA(q->q_cq_dmamem), *cqe;
1179448b3c09Sdlg 	u_int32_t head;
1180448b3c09Sdlg 	u_int16_t flags;
1181448b3c09Sdlg 	int rv = 0;
1182448b3c09Sdlg 
1183448b3c09Sdlg 	if (!mtx_enter_try(&q->q_cq_mtx))
1184448b3c09Sdlg 		return (-1);
1185448b3c09Sdlg 
1186448b3c09Sdlg 	head = q->q_cq_head;
11870ee935ccSdlg 
11880ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
1189448b3c09Sdlg 	for (;;) {
1190448b3c09Sdlg 		cqe = &ring[head];
1191448b3c09Sdlg 		flags = lemtoh16(&cqe->flags);
1192448b3c09Sdlg 		if ((flags & NVME_CQE_PHASE) != q->q_cq_phase)
1193448b3c09Sdlg 			break;
1194448b3c09Sdlg 
1195ee570821Sjmatthew 		membar_consumer();
1196ee570821Sjmatthew 
1197448b3c09Sdlg 		ccb = &sc->sc_ccbs[cqe->cid];
11987599295eSdlg 		sc->sc_ops->op_cq_done(sc, q, ccb);
1199448b3c09Sdlg 		ccb->ccb_done(sc, ccb, cqe);
1200448b3c09Sdlg 
1201448b3c09Sdlg 		if (++head >= q->q_entries) {
1202448b3c09Sdlg 			head = 0;
1203448b3c09Sdlg 			q->q_cq_phase ^= NVME_CQE_PHASE;
1204448b3c09Sdlg 		}
1205448b3c09Sdlg 
1206448b3c09Sdlg 		rv = 1;
1207448b3c09Sdlg 	}
12080ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1209448b3c09Sdlg 
1210448b3c09Sdlg 	if (rv)
121120386a6cSdlg 		nvme_write4(sc, q->q_cqhdbl, q->q_cq_head = head);
1212448b3c09Sdlg 	mtx_leave(&q->q_cq_mtx);
1213448b3c09Sdlg 
1214448b3c09Sdlg 	return (rv);
1215448b3c09Sdlg }
1216448b3c09Sdlg 
1217448b3c09Sdlg int
nvme_identify(struct nvme_softc * sc,u_int mpsmin)121898b2ae82Skrw nvme_identify(struct nvme_softc *sc, u_int mpsmin)
1219448b3c09Sdlg {
12209f81ea3bSdlg 	char sn[41], mn[81], fr[17];
12219f81ea3bSdlg 	struct nvm_identify_controller *identify;
1222448b3c09Sdlg 	struct nvme_dmamem *mem;
12239f81ea3bSdlg 	struct nvme_ccb *ccb;
1224448b3c09Sdlg 	int rv = 1;
1225448b3c09Sdlg 
1226448b3c09Sdlg 	ccb = nvme_ccb_get(sc);
1227448b3c09Sdlg 	if (ccb == NULL)
1228448b3c09Sdlg 		panic("nvme_identify: nvme_ccb_get returned NULL");
1229448b3c09Sdlg 
12309f81ea3bSdlg 	mem = nvme_dmamem_alloc(sc, sizeof(*identify));
1231448b3c09Sdlg 	if (mem == NULL)
1232448b3c09Sdlg 		return (1);
1233448b3c09Sdlg 
12349f81ea3bSdlg 	ccb->ccb_done = nvme_empty_done;
1235448b3c09Sdlg 	ccb->ccb_cookie = mem;
1236448b3c09Sdlg 
12370ee935ccSdlg 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_PREREAD);
123854904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
123954904088Skrw 	    NVME_TIMO_IDENT);
12400ee935ccSdlg 	nvme_dmamem_sync(sc, mem, BUS_DMASYNC_POSTREAD);
1241448b3c09Sdlg 
1242b62ffe02Sdlg 	nvme_ccb_put(sc, ccb);
1243b62ffe02Sdlg 
12449f81ea3bSdlg 	if (rv != 0)
12459f81ea3bSdlg 		goto done;
12469f81ea3bSdlg 
124717756a2bSdlg 	identify = NVME_DMA_KVA(mem);
124817756a2bSdlg 
12499f81ea3bSdlg 	scsi_strvis(sn, identify->sn, sizeof(identify->sn));
12509f81ea3bSdlg 	scsi_strvis(mn, identify->mn, sizeof(identify->mn));
12519f81ea3bSdlg 	scsi_strvis(fr, identify->fr, sizeof(identify->fr));
12529f81ea3bSdlg 
12539f81ea3bSdlg 	printf("%s: %s, firmware %s, serial %s\n", DEVNAME(sc), mn, fr, sn);
12549f81ea3bSdlg 
125588aa9192Sdlg 	if (identify->mdts > 0) {
125698b2ae82Skrw 		sc->sc_mdts = (1 << identify->mdts) * (1 << mpsmin);
1257767e8532Skrw 		if (sc->sc_mdts > NVME_MAXPHYS)
1258767e8532Skrw 			sc->sc_mdts = NVME_MAXPHYS;
1259a8751b7cSkrw 		sc->sc_max_prpl = sc->sc_mdts / sc->sc_mps;
126088aa9192Sdlg 	}
126188aa9192Sdlg 
126217756a2bSdlg 	sc->sc_nn = lemtoh32(&identify->nn);
126317756a2bSdlg 
1264e276f6b1Sjcs 	/*
1265e276f6b1Sjcs 	 * At least one Apple NVMe device presents a second, bogus disk that is
1266e276f6b1Sjcs 	 * inaccessible, so cap targets at 1.
1267e276f6b1Sjcs 	 *
1268397f5692Skettenis 	 * sd1 at scsibus1 targ 2 lun 0: <NVMe, APPLE SSD AP0512, 16.1> [..]
1269e276f6b1Sjcs 	 * sd1: 0MB, 4096 bytes/sector, 2 sectors
1270e276f6b1Sjcs 	 */
1271e276f6b1Sjcs 	if (sc->sc_nn > 1 &&
1272e276f6b1Sjcs 	    mn[0] == 'A' && mn[1] == 'P' && mn[2] == 'P' && mn[3] == 'L' &&
1273e276f6b1Sjcs 	    mn[4] == 'E')
1274e276f6b1Sjcs 		sc->sc_nn = 1;
1275e276f6b1Sjcs 
127617756a2bSdlg 	memcpy(&sc->sc_identify, identify, sizeof(sc->sc_identify));
127717756a2bSdlg 
12789f81ea3bSdlg done:
1279448b3c09Sdlg 	nvme_dmamem_free(sc, mem);
1280448b3c09Sdlg 
1281448b3c09Sdlg 	return (rv);
1282448b3c09Sdlg }
1283448b3c09Sdlg 
1284cd15a86fSdlg int
nvme_q_create(struct nvme_softc * sc,struct nvme_queue * q)1285cd15a86fSdlg nvme_q_create(struct nvme_softc *sc, struct nvme_queue *q)
1286cd15a86fSdlg {
1287cd15a86fSdlg 	struct nvme_sqe_q sqe;
1288cd15a86fSdlg 	struct nvme_ccb *ccb;
1289cd15a86fSdlg 	int rv;
1290cd15a86fSdlg 
1291cd15a86fSdlg 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1292cd15a86fSdlg 	KASSERT(ccb != NULL);
1293cd15a86fSdlg 
1294cd15a86fSdlg 	ccb->ccb_done = nvme_empty_done;
1295cd15a86fSdlg 	ccb->ccb_cookie = &sqe;
1296cd15a86fSdlg 
1297cd15a86fSdlg 	memset(&sqe, 0, sizeof(sqe));
1298cd15a86fSdlg 	sqe.opcode = NVM_ADMIN_ADD_IOCQ;
1299cd15a86fSdlg 	htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_cq_dmamem));
1300cd15a86fSdlg 	htolem16(&sqe.qsize, q->q_entries - 1);
1301cd15a86fSdlg 	htolem16(&sqe.qid, q->q_id);
1302cd15a86fSdlg 	sqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
1303cd15a86fSdlg 
130454904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1305cd15a86fSdlg 	if (rv != 0)
1306cd15a86fSdlg 		goto fail;
1307cd15a86fSdlg 
1308cd15a86fSdlg 	ccb->ccb_done = nvme_empty_done;
1309cd15a86fSdlg 	ccb->ccb_cookie = &sqe;
1310cd15a86fSdlg 
1311cd15a86fSdlg 	memset(&sqe, 0, sizeof(sqe));
1312cd15a86fSdlg 	sqe.opcode = NVM_ADMIN_ADD_IOSQ;
1313cd15a86fSdlg 	htolem64(&sqe.prp1, NVME_DMA_DVA(q->q_sq_dmamem));
1314cd15a86fSdlg 	htolem16(&sqe.qsize, q->q_entries - 1);
1315cd15a86fSdlg 	htolem16(&sqe.qid, q->q_id);
1316cd15a86fSdlg 	htolem16(&sqe.cqid, q->q_id);
1317cd15a86fSdlg 	sqe.qflags = NVM_SQE_Q_PC;
1318cd15a86fSdlg 
131954904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1320cd15a86fSdlg 	if (rv != 0)
1321cd15a86fSdlg 		goto fail;
1322cd15a86fSdlg 
1323cd15a86fSdlg fail:
1324cd15a86fSdlg 	scsi_io_put(&sc->sc_iopool, ccb);
1325cd15a86fSdlg 	return (rv);
1326cd15a86fSdlg }
1327cd15a86fSdlg 
13285313ab17Sdlg int
nvme_q_delete(struct nvme_softc * sc,struct nvme_queue * q)13295313ab17Sdlg nvme_q_delete(struct nvme_softc *sc, struct nvme_queue *q)
13305313ab17Sdlg {
13315313ab17Sdlg 	struct nvme_sqe_q sqe;
13325313ab17Sdlg 	struct nvme_ccb *ccb;
13335313ab17Sdlg 	int rv;
13345313ab17Sdlg 
13355313ab17Sdlg 	ccb = scsi_io_get(&sc->sc_iopool, 0);
13365313ab17Sdlg 	KASSERT(ccb != NULL);
13375313ab17Sdlg 
13385313ab17Sdlg 	ccb->ccb_done = nvme_empty_done;
13395313ab17Sdlg 	ccb->ccb_cookie = &sqe;
13405313ab17Sdlg 
13415313ab17Sdlg 	memset(&sqe, 0, sizeof(sqe));
13425313ab17Sdlg 	sqe.opcode = NVM_ADMIN_DEL_IOSQ;
13435313ab17Sdlg 	htolem16(&sqe.qid, q->q_id);
13445313ab17Sdlg 
134554904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
13465313ab17Sdlg 	if (rv != 0)
13475313ab17Sdlg 		goto fail;
13485313ab17Sdlg 
13495313ab17Sdlg 	ccb->ccb_done = nvme_empty_done;
13505313ab17Sdlg 	ccb->ccb_cookie = &sqe;
13515313ab17Sdlg 
13525313ab17Sdlg 	memset(&sqe, 0, sizeof(sqe));
13535313ab17Sdlg 	sqe.opcode = NVM_ADMIN_DEL_IOCQ;
13545313ab17Sdlg 	htolem16(&sqe.qid, q->q_id);
13555313ab17Sdlg 
135654904088Skrw 	rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
13575313ab17Sdlg 	if (rv != 0)
13585313ab17Sdlg 		goto fail;
13595313ab17Sdlg 
1360aa5ddcf9Ssf 	nvme_q_free(sc, q);
1361aa5ddcf9Ssf 
13625313ab17Sdlg fail:
13635313ab17Sdlg 	scsi_io_put(&sc->sc_iopool, ccb);
13645313ab17Sdlg 	return (rv);
13655313ab17Sdlg 
13665313ab17Sdlg }
13675313ab17Sdlg 
1368448b3c09Sdlg void
nvme_fill_identify(struct nvme_softc * sc,struct nvme_ccb * ccb,void * slot)1369448b3c09Sdlg nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1370448b3c09Sdlg {
1371448b3c09Sdlg 	struct nvme_sqe *sqe = slot;
1372448b3c09Sdlg 	struct nvme_dmamem *mem = ccb->ccb_cookie;
1373448b3c09Sdlg 
1374448b3c09Sdlg 	sqe->opcode = NVM_ADMIN_IDENTIFY;
1375448b3c09Sdlg 	htolem64(&sqe->entry.prp[0], NVME_DMA_DVA(mem));
1376448b3c09Sdlg 	htolem32(&sqe->cdw10, 1);
1377448b3c09Sdlg }
1378448b3c09Sdlg 
1379448b3c09Sdlg int
nvme_ccbs_alloc(struct nvme_softc * sc,u_int nccbs)1380448b3c09Sdlg nvme_ccbs_alloc(struct nvme_softc *sc, u_int nccbs)
1381448b3c09Sdlg {
1382448b3c09Sdlg 	struct nvme_ccb *ccb;
1383b1699ccdSdlg 	bus_addr_t off;
1384b1699ccdSdlg 	u_int64_t *prpl;
1385448b3c09Sdlg 	u_int i;
1386448b3c09Sdlg 
13879f6fb5c7Sderaadt 	sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF,
1388448b3c09Sdlg 	    M_WAITOK | M_CANFAIL);
1389448b3c09Sdlg 	if (sc->sc_ccbs == NULL)
1390448b3c09Sdlg 		return (1);
1391448b3c09Sdlg 
1392b1699ccdSdlg 	sc->sc_ccb_prpls = nvme_dmamem_alloc(sc,
1393a8751b7cSkrw 	    sizeof(*prpl) * sc->sc_max_prpl * nccbs);
1394b1699ccdSdlg 
1395b1699ccdSdlg 	prpl = NVME_DMA_KVA(sc->sc_ccb_prpls);
1396b1699ccdSdlg 	off = 0;
1397b1699ccdSdlg 
1398448b3c09Sdlg 	for (i = 0; i < nccbs; i++) {
1399448b3c09Sdlg 		ccb = &sc->sc_ccbs[i];
1400448b3c09Sdlg 
1401b1699ccdSdlg 		if (bus_dmamap_create(sc->sc_dmat, sc->sc_mdts,
1402a8751b7cSkrw 		    sc->sc_max_prpl + 1, /* we get a free prp in the sqe */
14032a4e68fdSdlg 		    sc->sc_mps, sc->sc_mps,
14042a4e68fdSdlg 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1405448b3c09Sdlg 		    &ccb->ccb_dmamap) != 0)
1406448b3c09Sdlg 			goto free_maps;
1407448b3c09Sdlg 
1408448b3c09Sdlg 		ccb->ccb_id = i;
1409b1699ccdSdlg 		ccb->ccb_prpl = prpl;
1410b1699ccdSdlg 		ccb->ccb_prpl_off = off;
1411b1699ccdSdlg 		ccb->ccb_prpl_dva = NVME_DMA_DVA(sc->sc_ccb_prpls) + off;
1412b1699ccdSdlg 
1413448b3c09Sdlg 		SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry);
1414b1699ccdSdlg 
1415a8751b7cSkrw 		prpl += sc->sc_max_prpl;
1416a8751b7cSkrw 		off += sizeof(*prpl) * sc->sc_max_prpl;
1417448b3c09Sdlg 	}
1418448b3c09Sdlg 
1419448b3c09Sdlg 	return (0);
1420448b3c09Sdlg 
1421448b3c09Sdlg free_maps:
1422a1141c40Stedu 	nvme_ccbs_free(sc, nccbs);
1423448b3c09Sdlg 	return (1);
1424448b3c09Sdlg }
1425448b3c09Sdlg 
1426448b3c09Sdlg void *
nvme_ccb_get(void * cookie)1427448b3c09Sdlg nvme_ccb_get(void *cookie)
1428448b3c09Sdlg {
1429448b3c09Sdlg 	struct nvme_softc *sc = cookie;
1430448b3c09Sdlg 	struct nvme_ccb *ccb;
1431448b3c09Sdlg 
1432448b3c09Sdlg 	mtx_enter(&sc->sc_ccb_mtx);
1433448b3c09Sdlg 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
1434448b3c09Sdlg 	if (ccb != NULL)
1435448b3c09Sdlg 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
1436448b3c09Sdlg 	mtx_leave(&sc->sc_ccb_mtx);
1437448b3c09Sdlg 
1438448b3c09Sdlg 	return (ccb);
1439448b3c09Sdlg }
1440448b3c09Sdlg 
1441448b3c09Sdlg void
nvme_ccb_put(void * cookie,void * io)1442448b3c09Sdlg nvme_ccb_put(void *cookie, void *io)
1443448b3c09Sdlg {
1444448b3c09Sdlg 	struct nvme_softc *sc = cookie;
1445448b3c09Sdlg 	struct nvme_ccb *ccb = io;
1446448b3c09Sdlg 
1447448b3c09Sdlg 	mtx_enter(&sc->sc_ccb_mtx);
1448448b3c09Sdlg 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
1449448b3c09Sdlg 	mtx_leave(&sc->sc_ccb_mtx);
1450448b3c09Sdlg }
1451448b3c09Sdlg 
1452448b3c09Sdlg void
nvme_ccbs_free(struct nvme_softc * sc,unsigned int nccbs)1453a1141c40Stedu nvme_ccbs_free(struct nvme_softc *sc, unsigned int nccbs)
1454448b3c09Sdlg {
1455448b3c09Sdlg 	struct nvme_ccb *ccb;
1456448b3c09Sdlg 
1457448b3c09Sdlg 	while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)) != NULL) {
1458448b3c09Sdlg 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
1459448b3c09Sdlg 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1460448b3c09Sdlg 	}
1461448b3c09Sdlg 
1462b1699ccdSdlg 	nvme_dmamem_free(sc, sc->sc_ccb_prpls);
1463a1141c40Stedu 	free(sc->sc_ccbs, M_DEVBUF, nccbs * sizeof(*ccb));
1464448b3c09Sdlg }
1465448b3c09Sdlg 
1466282c0692Sdlg struct nvme_queue *
nvme_q_alloc(struct nvme_softc * sc,u_int16_t id,u_int entries,u_int dstrd)146759968badSdlg nvme_q_alloc(struct nvme_softc *sc, u_int16_t id, u_int entries, u_int dstrd)
1468282c0692Sdlg {
1469282c0692Sdlg 	struct nvme_queue *q;
1470282c0692Sdlg 
1471282c0692Sdlg 	q = malloc(sizeof(*q), M_DEVBUF, M_WAITOK | M_CANFAIL);
1472282c0692Sdlg 	if (q == NULL)
1473282c0692Sdlg 		return (NULL);
1474282c0692Sdlg 
1475448b3c09Sdlg 	q->q_sq_dmamem = nvme_dmamem_alloc(sc,
147669136b8eSdlg 	    sizeof(struct nvme_sqe) * entries);
1477282c0692Sdlg 	if (q->q_sq_dmamem == NULL)
1478282c0692Sdlg 		goto free;
1479282c0692Sdlg 
1480448b3c09Sdlg 	q->q_cq_dmamem = nvme_dmamem_alloc(sc,
148169136b8eSdlg 	    sizeof(struct nvme_cqe) * entries);
1482638d48daSdlg 	if (q->q_cq_dmamem == NULL)
1483282c0692Sdlg 		goto free_sq;
1484282c0692Sdlg 
1485448b3c09Sdlg 	memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1486448b3c09Sdlg 	memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1487448b3c09Sdlg 
1488448b3c09Sdlg 	mtx_init(&q->q_sq_mtx, IPL_BIO);
1489448b3c09Sdlg 	mtx_init(&q->q_cq_mtx, IPL_BIO);
149059968badSdlg 	q->q_sqtdbl = NVME_SQTDBL(id, dstrd);
149159968badSdlg 	q->q_cqhdbl = NVME_CQHDBL(id, dstrd);
1492aa5ddcf9Ssf 
14938f4e0057Sdlg 	q->q_id = id;
1494282c0692Sdlg 	q->q_entries = entries;
1495448b3c09Sdlg 	q->q_sq_tail = 0;
1496448b3c09Sdlg 	q->q_cq_head = 0;
1497448b3c09Sdlg 	q->q_cq_phase = NVME_CQE_PHASE;
1498448b3c09Sdlg 
14997599295eSdlg 	if (sc->sc_ops->op_q_alloc != NULL) {
15007599295eSdlg 		if (sc->sc_ops->op_q_alloc(sc, q) != 0)
15017599295eSdlg 			goto free_cq;
15027599295eSdlg 	}
15037599295eSdlg 
15040ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
15050ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1506282c0692Sdlg 
1507282c0692Sdlg 	return (q);
1508282c0692Sdlg 
15097599295eSdlg free_cq:
15107599295eSdlg 	nvme_dmamem_free(sc, q->q_cq_dmamem);
1511282c0692Sdlg free_sq:
1512282c0692Sdlg 	nvme_dmamem_free(sc, q->q_sq_dmamem);
1513282c0692Sdlg free:
1514234dfda1Sderaadt 	free(q, M_DEVBUF, sizeof *q);
1515282c0692Sdlg 
1516282c0692Sdlg 	return (NULL);
1517282c0692Sdlg }
1518282c0692Sdlg 
1519aa5ddcf9Ssf int
nvme_q_reset(struct nvme_softc * sc,struct nvme_queue * q)1520aa5ddcf9Ssf nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q)
1521aa5ddcf9Ssf {
1522aa5ddcf9Ssf 	memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
1523aa5ddcf9Ssf 	memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
1524aa5ddcf9Ssf 
1525aa5ddcf9Ssf 	q->q_sq_tail = 0;
1526aa5ddcf9Ssf 	q->q_cq_head = 0;
1527aa5ddcf9Ssf 	q->q_cq_phase = NVME_CQE_PHASE;
1528aa5ddcf9Ssf 
1529aa5ddcf9Ssf 	nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
1530aa5ddcf9Ssf 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
1531aa5ddcf9Ssf 
1532aa5ddcf9Ssf 	return (0);
1533aa5ddcf9Ssf }
1534aa5ddcf9Ssf 
1535282c0692Sdlg void
nvme_q_free(struct nvme_softc * sc,struct nvme_queue * q)1536448b3c09Sdlg nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
1537282c0692Sdlg {
15380ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
15390ee935ccSdlg 	nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_POSTWRITE);
15407599295eSdlg 
15417599295eSdlg 	if (sc->sc_ops->op_q_alloc != NULL)
15427599295eSdlg 		sc->sc_ops->op_q_free(sc, q);
15437599295eSdlg 
1544282c0692Sdlg 	nvme_dmamem_free(sc, q->q_cq_dmamem);
1545282c0692Sdlg 	nvme_dmamem_free(sc, q->q_sq_dmamem);
1546234dfda1Sderaadt 	free(q, M_DEVBUF, sizeof *q);
1547282c0692Sdlg }
1548282c0692Sdlg 
1549282c0692Sdlg int
nvme_intr(void * xsc)1550282c0692Sdlg nvme_intr(void *xsc)
1551282c0692Sdlg {
1552448b3c09Sdlg 	struct nvme_softc *sc = xsc;
155387cbc23fSdlg 	int rv = 0;
1554448b3c09Sdlg 
155587cbc23fSdlg 	if (nvme_q_complete(sc, sc->sc_q))
155687cbc23fSdlg 		rv = 1;
155787cbc23fSdlg 	if (nvme_q_complete(sc, sc->sc_admin_q))
155887cbc23fSdlg 		rv = 1;
155987cbc23fSdlg 
156087cbc23fSdlg 	return (rv);
1561282c0692Sdlg }
1562282c0692Sdlg 
1563eb77e636Sdlg int
nvme_intr_intx(void * xsc)1564eb77e636Sdlg nvme_intr_intx(void *xsc)
1565eb77e636Sdlg {
1566eb77e636Sdlg 	struct nvme_softc *sc = xsc;
1567eb77e636Sdlg 	int rv;
1568eb77e636Sdlg 
1569eb77e636Sdlg 	nvme_write4(sc, NVME_INTMS, 1);
1570eb77e636Sdlg 	rv = nvme_intr(sc);
1571eb77e636Sdlg 	nvme_write4(sc, NVME_INTMC, 1);
1572eb77e636Sdlg 
1573eb77e636Sdlg 	return (rv);
1574eb77e636Sdlg }
1575eb77e636Sdlg 
1576282c0692Sdlg struct nvme_dmamem *
nvme_dmamem_alloc(struct nvme_softc * sc,size_t size)1577282c0692Sdlg nvme_dmamem_alloc(struct nvme_softc *sc, size_t size)
1578282c0692Sdlg {
1579282c0692Sdlg 	struct nvme_dmamem *ndm;
1580282c0692Sdlg 	int nsegs;
1581282c0692Sdlg 
158272f0e87bSdlg 	ndm = malloc(sizeof(*ndm), M_DEVBUF, M_WAITOK | M_ZERO);
1583282c0692Sdlg 	if (ndm == NULL)
1584282c0692Sdlg 		return (NULL);
1585282c0692Sdlg 
1586282c0692Sdlg 	ndm->ndm_size = size;
1587282c0692Sdlg 
1588282c0692Sdlg 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
15892a4e68fdSdlg 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
15902a4e68fdSdlg 	    &ndm->ndm_map) != 0)
1591282c0692Sdlg 		goto ndmfree;
1592282c0692Sdlg 
1593448b3c09Sdlg 	if (bus_dmamem_alloc(sc->sc_dmat, size, sc->sc_mps, 0, &ndm->ndm_seg,
159472f0e87bSdlg 	    1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
1595282c0692Sdlg 		goto destroy;
1596282c0692Sdlg 
1597282c0692Sdlg 	if (bus_dmamem_map(sc->sc_dmat, &ndm->ndm_seg, nsegs, size,
159872f0e87bSdlg 	    &ndm->ndm_kva, BUS_DMA_WAITOK) != 0)
1599282c0692Sdlg 		goto free;
1600282c0692Sdlg 
1601282c0692Sdlg 	if (bus_dmamap_load(sc->sc_dmat, ndm->ndm_map, ndm->ndm_kva, size,
160272f0e87bSdlg 	    NULL, BUS_DMA_WAITOK) != 0)
1603282c0692Sdlg 		goto unmap;
1604282c0692Sdlg 
1605282c0692Sdlg 	return (ndm);
1606282c0692Sdlg 
1607282c0692Sdlg unmap:
1608282c0692Sdlg 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, size);
1609282c0692Sdlg free:
1610282c0692Sdlg 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1611282c0692Sdlg destroy:
1612282c0692Sdlg 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1613282c0692Sdlg ndmfree:
1614234dfda1Sderaadt 	free(ndm, M_DEVBUF, sizeof *ndm);
1615282c0692Sdlg 
1616282c0692Sdlg 	return (NULL);
1617282c0692Sdlg }
1618282c0692Sdlg 
1619282c0692Sdlg void
nvme_dmamem_sync(struct nvme_softc * sc,struct nvme_dmamem * mem,int ops)16200ee935ccSdlg nvme_dmamem_sync(struct nvme_softc *sc, struct nvme_dmamem *mem, int ops)
16210ee935ccSdlg {
16220ee935ccSdlg 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(mem),
16230ee935ccSdlg 	    0, NVME_DMA_LEN(mem), ops);
16240ee935ccSdlg }
16250ee935ccSdlg 
16260ee935ccSdlg void
nvme_dmamem_free(struct nvme_softc * sc,struct nvme_dmamem * ndm)1627282c0692Sdlg nvme_dmamem_free(struct nvme_softc *sc, struct nvme_dmamem *ndm)
1628282c0692Sdlg {
1629282c0692Sdlg 	bus_dmamap_unload(sc->sc_dmat, ndm->ndm_map);
1630282c0692Sdlg 	bus_dmamem_unmap(sc->sc_dmat, ndm->ndm_kva, ndm->ndm_size);
1631282c0692Sdlg 	bus_dmamem_free(sc->sc_dmat, &ndm->ndm_seg, 1);
1632282c0692Sdlg 	bus_dmamap_destroy(sc->sc_dmat, ndm->ndm_map);
1633234dfda1Sderaadt 	free(ndm, M_DEVBUF, sizeof *ndm);
1634282c0692Sdlg }
1635282c0692Sdlg 
163603d86467Sjmatthew #ifdef HIBERNATE
163703d86467Sjmatthew 
163803d86467Sjmatthew int
nvme_hibernate_admin_cmd(struct nvme_softc * sc,struct nvme_sqe * sqe,struct nvme_cqe * cqe,int cid)163903d86467Sjmatthew nvme_hibernate_admin_cmd(struct nvme_softc *sc, struct nvme_sqe *sqe,
164003d86467Sjmatthew     struct nvme_cqe *cqe, int cid)
164103d86467Sjmatthew {
164203d86467Sjmatthew 	struct nvme_sqe *asqe = NVME_DMA_KVA(sc->sc_admin_q->q_sq_dmamem);
164303d86467Sjmatthew 	struct nvme_cqe *acqe = NVME_DMA_KVA(sc->sc_admin_q->q_cq_dmamem);
164403d86467Sjmatthew 	struct nvme_queue *q = sc->sc_admin_q;
164503d86467Sjmatthew 	int tail;
164603d86467Sjmatthew 	u_int16_t flags;
164703d86467Sjmatthew 
164803d86467Sjmatthew 	/* submit command */
16497599295eSdlg 	tail = sc->sc_ops->op_sq_enter_locked(sc, q, /* XXX ccb */ NULL);
165003d86467Sjmatthew 
165103d86467Sjmatthew 	asqe += tail;
165203d86467Sjmatthew 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
165303d86467Sjmatthew 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
165403d86467Sjmatthew 	*asqe = *sqe;
165503d86467Sjmatthew 	asqe->cid = cid;
165603d86467Sjmatthew 	bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
165703d86467Sjmatthew 	    sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
165803d86467Sjmatthew 
16597599295eSdlg 	sc->sc_ops->op_sq_leave_locked(sc, q, /* XXX ccb */ NULL);
166003d86467Sjmatthew 
166103d86467Sjmatthew 	/* wait for completion */
166203d86467Sjmatthew 	acqe += q->q_cq_head;
166303d86467Sjmatthew 	for (;;) {
166403d86467Sjmatthew 		nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
166503d86467Sjmatthew 		flags = lemtoh16(&acqe->flags);
166603d86467Sjmatthew 		if ((flags & NVME_CQE_PHASE) == q->q_cq_phase)
166703d86467Sjmatthew 			break;
166803d86467Sjmatthew 
166903d86467Sjmatthew 		delay(10);
167003d86467Sjmatthew 	}
167103d86467Sjmatthew 
167203d86467Sjmatthew 	if (++q->q_cq_head >= q->q_entries) {
167303d86467Sjmatthew 		q->q_cq_head = 0;
167403d86467Sjmatthew 		q->q_cq_phase ^= NVME_CQE_PHASE;
167503d86467Sjmatthew 	}
167603d86467Sjmatthew 	nvme_write4(sc, q->q_cqhdbl, q->q_cq_head);
167703d86467Sjmatthew 	if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) || (acqe->cid != cid))
167803d86467Sjmatthew 		return (EIO);
167903d86467Sjmatthew 
168003d86467Sjmatthew 	return (0);
168103d86467Sjmatthew }
168203d86467Sjmatthew 
168303d86467Sjmatthew int
nvme_hibernate_io(dev_t dev,daddr_t blkno,vaddr_t addr,size_t size,int op,void * page)168403d86467Sjmatthew nvme_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size,
168503d86467Sjmatthew     int op, void *page)
168603d86467Sjmatthew {
168703d86467Sjmatthew 	struct nvme_hibernate_page {
168803d86467Sjmatthew 		u_int64_t		prpl[MAXPHYS / PAGE_SIZE];
168903d86467Sjmatthew 
169003d86467Sjmatthew 		struct nvme_softc	*sc;
169103d86467Sjmatthew 		int			nsid;
169203d86467Sjmatthew 		int			sq_tail;
169303d86467Sjmatthew 		int			cq_head;
169403d86467Sjmatthew 		int			cqe_phase;
169503d86467Sjmatthew 
169603d86467Sjmatthew 		daddr_t			poffset;
169703d86467Sjmatthew 		size_t			psize;
1698*fc6d48fdSkrw 		u_int32_t		secsize;
169903d86467Sjmatthew 	} *my = page;
170003d86467Sjmatthew 	struct nvme_sqe_io *isqe;
170103d86467Sjmatthew 	struct nvme_cqe *icqe;
170203d86467Sjmatthew 	paddr_t data_phys, page_phys;
170303d86467Sjmatthew 	u_int64_t data_bus_phys, page_bus_phys;
170403d86467Sjmatthew 	u_int16_t flags;
170503d86467Sjmatthew 	int i;
1706cad24394Sdlg 	int error;
170703d86467Sjmatthew 
170803d86467Sjmatthew 	if (op == HIB_INIT) {
170903d86467Sjmatthew 		struct device *disk;
171003d86467Sjmatthew 		struct device *scsibus;
1711*fc6d48fdSkrw 		struct nvm_identify_namespace *ns;
1712*fc6d48fdSkrw 		struct nvm_namespace_format *f;
171303d86467Sjmatthew 		extern struct cfdriver sd_cd;
171403d86467Sjmatthew 		struct scsi_link *link;
171503d86467Sjmatthew 		struct scsibus_softc *bus_sc;
171603d86467Sjmatthew 		struct nvme_sqe_q qsqe;
171703d86467Sjmatthew 		struct nvme_cqe qcqe;
171803d86467Sjmatthew 
171903d86467Sjmatthew 		/* find nvme softc */
172003d86467Sjmatthew 		disk = disk_lookup(&sd_cd, DISKUNIT(dev));
172103d86467Sjmatthew 		scsibus = disk->dv_parent;
172203d86467Sjmatthew 		my->sc = (struct nvme_softc *)disk->dv_parent->dv_parent;
172303d86467Sjmatthew 
172403d86467Sjmatthew 		/* find scsi_link, which tells us the target */
172503d86467Sjmatthew 		my->nsid = 0;
172603d86467Sjmatthew 		bus_sc = (struct scsibus_softc *)scsibus;
172703d86467Sjmatthew 		SLIST_FOREACH(link, &bus_sc->sc_link_list, bus_list) {
172803d86467Sjmatthew 			if (link->device_softc == disk) {
1729397f5692Skettenis 				my->nsid = link->target;
173003d86467Sjmatthew 				break;
173103d86467Sjmatthew 			}
173203d86467Sjmatthew 		}
173303d86467Sjmatthew 		if (my->nsid == 0)
173403d86467Sjmatthew 			return (EIO);
1735*fc6d48fdSkrw 		ns = my->sc->sc_namespaces[my->nsid].ident;
1736*fc6d48fdSkrw 		f = &ns->lbaf[NVME_ID_NS_FLBAS(ns->flbas)];
173703d86467Sjmatthew 
173803d86467Sjmatthew 		my->poffset = blkno;
173903d86467Sjmatthew 		my->psize = size;
1740*fc6d48fdSkrw 		my->secsize = 1 << f->lbads;
174103d86467Sjmatthew 
174203d86467Sjmatthew 		memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem), 0,
174303d86467Sjmatthew 		    my->sc->sc_hib_q->q_entries * sizeof(struct nvme_cqe));
174403d86467Sjmatthew 		memset(NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem), 0,
174503d86467Sjmatthew 		    my->sc->sc_hib_q->q_entries * sizeof(struct nvme_sqe));
174603d86467Sjmatthew 
174703d86467Sjmatthew 		my->sq_tail = 0;
174803d86467Sjmatthew 		my->cq_head = 0;
174903d86467Sjmatthew 		my->cqe_phase = NVME_CQE_PHASE;
175003d86467Sjmatthew 
175103d86467Sjmatthew 		memset(&qsqe, 0, sizeof(qsqe));
175203d86467Sjmatthew 		qsqe.opcode = NVM_ADMIN_ADD_IOCQ;
175303d86467Sjmatthew 		htolem64(&qsqe.prp1,
175403d86467Sjmatthew 		    NVME_DMA_DVA(my->sc->sc_hib_q->q_cq_dmamem));
175503d86467Sjmatthew 		htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1);
175603d86467Sjmatthew 		htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id);
175703d86467Sjmatthew 		qsqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
175803d86467Sjmatthew 		if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
175903d86467Sjmatthew 		    &qcqe, 1) != 0)
176003d86467Sjmatthew 			return (EIO);
176103d86467Sjmatthew 
176203d86467Sjmatthew 		memset(&qsqe, 0, sizeof(qsqe));
176303d86467Sjmatthew 		qsqe.opcode = NVM_ADMIN_ADD_IOSQ;
176403d86467Sjmatthew 		htolem64(&qsqe.prp1,
176503d86467Sjmatthew 		    NVME_DMA_DVA(my->sc->sc_hib_q->q_sq_dmamem));
176603d86467Sjmatthew 		htolem16(&qsqe.qsize, my->sc->sc_hib_q->q_entries - 1);
176703d86467Sjmatthew 		htolem16(&qsqe.qid, my->sc->sc_hib_q->q_id);
176803d86467Sjmatthew 		htolem16(&qsqe.cqid, my->sc->sc_hib_q->q_id);
176903d86467Sjmatthew 		qsqe.qflags = NVM_SQE_Q_PC;
177003d86467Sjmatthew 		if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
177103d86467Sjmatthew 		    &qcqe, 2) != 0)
177203d86467Sjmatthew 			return (EIO);
177303d86467Sjmatthew 
177403d86467Sjmatthew 		return (0);
177503d86467Sjmatthew 	}
177603d86467Sjmatthew 
177703d86467Sjmatthew 	if (op != HIB_W)
177803d86467Sjmatthew 		return (0);
177903d86467Sjmatthew 
1780*fc6d48fdSkrw 	if (blkno + (size / DEV_BSIZE) > my->psize)
1781*fc6d48fdSkrw 		return E2BIG;
1782*fc6d48fdSkrw 
178303d86467Sjmatthew 	isqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_sq_dmamem);
178403d86467Sjmatthew 	isqe += my->sq_tail;
178503d86467Sjmatthew 	if (++my->sq_tail == my->sc->sc_hib_q->q_entries)
178603d86467Sjmatthew 		my->sq_tail = 0;
178703d86467Sjmatthew 
178803d86467Sjmatthew 	memset(isqe, 0, sizeof(*isqe));
178903d86467Sjmatthew 	isqe->opcode = NVM_CMD_WRITE;
179003d86467Sjmatthew 	htolem32(&isqe->nsid, my->nsid);
179103d86467Sjmatthew 
179203d86467Sjmatthew 	pmap_extract(pmap_kernel(), addr, &data_phys);
179303d86467Sjmatthew 	data_bus_phys = data_phys;
179403d86467Sjmatthew 	htolem64(&isqe->entry.prp[0], data_bus_phys);
179503d86467Sjmatthew 	if ((size > my->sc->sc_mps) && (size <= my->sc->sc_mps * 2)) {
179603d86467Sjmatthew 		htolem64(&isqe->entry.prp[1], data_bus_phys + my->sc->sc_mps);
179703d86467Sjmatthew 	} else if (size > my->sc->sc_mps * 2) {
179803d86467Sjmatthew 		pmap_extract(pmap_kernel(), (vaddr_t)page, &page_phys);
179903d86467Sjmatthew 		page_bus_phys = page_phys;
180003d86467Sjmatthew 		htolem64(&isqe->entry.prp[1], page_bus_phys +
180103d86467Sjmatthew 		    offsetof(struct nvme_hibernate_page, prpl));
180203d86467Sjmatthew 		for (i = 1; i < (size / my->sc->sc_mps); i++) {
180303d86467Sjmatthew 			htolem64(&my->prpl[i - 1], data_bus_phys +
180403d86467Sjmatthew 			    (i * my->sc->sc_mps));
180503d86467Sjmatthew 		}
180603d86467Sjmatthew 	}
180703d86467Sjmatthew 
1808*fc6d48fdSkrw 	isqe->slba = (blkno + my->poffset) / (my->secsize / DEV_BSIZE);
1809*fc6d48fdSkrw 	isqe->nlb = (size / my->secsize) - 1;
181003d86467Sjmatthew 	isqe->cid = blkno % 0xffff;
181103d86467Sjmatthew 
181203d86467Sjmatthew 	nvme_write4(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd),
181303d86467Sjmatthew 	    my->sq_tail);
1814cad24394Sdlg 	nvme_barrier(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,
1815cad24394Sdlg 	    BUS_SPACE_BARRIER_WRITE);
1816cad24394Sdlg 
1817cad24394Sdlg 	error = 0;
181803d86467Sjmatthew 
181903d86467Sjmatthew 	icqe = NVME_DMA_KVA(my->sc->sc_hib_q->q_cq_dmamem);
182003d86467Sjmatthew 	icqe += my->cq_head;
1821cad24394Sdlg 
1822cad24394Sdlg 	nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem,
1823cad24394Sdlg 	    BUS_DMASYNC_POSTREAD);
182403d86467Sjmatthew 	for (;;) {
182503d86467Sjmatthew 		flags = lemtoh16(&icqe->flags);
1826cad24394Sdlg 		if ((flags & NVME_CQE_PHASE) == my->cqe_phase) {
1827cad24394Sdlg 			if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) ||
1828cad24394Sdlg 			    (icqe->cid != blkno % 0xffff))
1829cad24394Sdlg 				error = EIO;
183003d86467Sjmatthew 
1831cad24394Sdlg 			break;
183203d86467Sjmatthew 		}
183303d86467Sjmatthew 
1834cad24394Sdlg 		delay(1);
1835cad24394Sdlg 		nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem,
1836cad24394Sdlg 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD);
1837cad24394Sdlg 	}
1838cad24394Sdlg 	nvme_dmamem_sync(my->sc, my->sc->sc_hib_q->q_cq_dmamem,
1839cad24394Sdlg 	    BUS_DMASYNC_PREREAD);
1840cad24394Sdlg 
184103d86467Sjmatthew 	if (++my->cq_head == my->sc->sc_hib_q->q_entries) {
184203d86467Sjmatthew 		my->cq_head = 0;
184303d86467Sjmatthew 		my->cqe_phase ^= NVME_CQE_PHASE;
184403d86467Sjmatthew 	}
1845cad24394Sdlg 
184603d86467Sjmatthew 	nvme_write4(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd),
184703d86467Sjmatthew 	    my->cq_head);
1848cad24394Sdlg 	nvme_barrier(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd), 4,
1849cad24394Sdlg 	    BUS_SPACE_BARRIER_WRITE);
185003d86467Sjmatthew 
1851cad24394Sdlg 	return (error);
185203d86467Sjmatthew }
185303d86467Sjmatthew 
185403d86467Sjmatthew #endif
18557f4636ceSkrw 
18567f4636ceSkrw #if NBIO > 0
18577f4636ceSkrw int
nvme_bioctl(struct device * self,u_long cmd,caddr_t data)18587f4636ceSkrw nvme_bioctl(struct device *self, u_long cmd, caddr_t data)
18597f4636ceSkrw {
18607f4636ceSkrw 	struct nvme_softc	*sc = (struct nvme_softc *)self;
18614e9514d6Skrw 	struct nvme_pt_cmd	*pt;
18627f4636ceSkrw 	int			 error = 0;
18637f4636ceSkrw 
18647f4636ceSkrw 	rw_enter_write(&sc->sc_lock);
18657f4636ceSkrw 
18667f4636ceSkrw 	switch (cmd) {
18677f4636ceSkrw 	case BIOCINQ:
18687f4636ceSkrw 		error = nvme_bioctl_inq(sc, (struct bioc_inq *)data);
18697f4636ceSkrw 		break;
18707f4636ceSkrw 	case BIOCVOL:
18717f4636ceSkrw 		error = nvme_bioctl_vol(sc, (struct bioc_vol *)data);
18727f4636ceSkrw 		break;
18737f4636ceSkrw 	case BIOCDISK:
18747f4636ceSkrw 		error = nvme_bioctl_disk(sc, (struct bioc_disk *)data);
18757f4636ceSkrw 		break;
18764e9514d6Skrw 	case NVME_PASSTHROUGH_CMD:
18774e9514d6Skrw 		pt = (struct nvme_pt_cmd *)data;
18784e9514d6Skrw 		error = nvme_passthrough_cmd(sc, pt, sc->sc_dev.dv_unit, -1);
18794e9514d6Skrw 		break;
18807f4636ceSkrw 	default:
18817f4636ceSkrw 		printf("nvme_bioctl() Unknown command (%lu)\n", cmd);
18827f4636ceSkrw 		error = ENOTTY;
18837f4636ceSkrw 	}
18847f4636ceSkrw 
18857f4636ceSkrw 	rw_exit_write(&sc->sc_lock);
18867f4636ceSkrw 
18877f4636ceSkrw 	return error;
18887f4636ceSkrw }
18897f4636ceSkrw 
18907f4636ceSkrw void
nvme_bio_status(struct bio_status * bs,const char * fmt,...)18917f4636ceSkrw nvme_bio_status(struct bio_status *bs, const char *fmt, ...)
18927f4636ceSkrw {
18937f4636ceSkrw 	va_list			ap;
18947f4636ceSkrw 
18957f4636ceSkrw 	va_start(ap, fmt);
18967f4636ceSkrw 	bio_status(bs, 0, BIO_MSG_INFO, fmt, &ap);
18977f4636ceSkrw 	va_end(ap);
18987f4636ceSkrw }
18997f4636ceSkrw 
19007f4636ceSkrw const char *
nvme_bioctl_sdname(const struct nvme_softc * sc,int target)19017f4636ceSkrw nvme_bioctl_sdname(const struct nvme_softc *sc, int target)
19027f4636ceSkrw {
19037f4636ceSkrw 	const struct scsi_link		*link;
19047f4636ceSkrw 	const struct sd_softc		*sd;
19057f4636ceSkrw 
19067f4636ceSkrw 	link = scsi_get_link(sc->sc_scsibus, target, 0);
19079a0b8a7eSjsg 	if (link == NULL)
19089a0b8a7eSjsg 		return NULL;
19097f4636ceSkrw 	sd = (struct sd_softc *)(link->device_softc);
19107f4636ceSkrw 	if (ISSET(link->state, SDEV_S_DYING) || sd == NULL ||
19117f4636ceSkrw 	    ISSET(sd->flags, SDF_DYING))
19127f4636ceSkrw 		return NULL;
19137f4636ceSkrw 
191498f39564Skrw 	if (nvme_read4(sc, NVME_VS) == 0xffffffff)
19157f4636ceSkrw 		return NULL;
19167f4636ceSkrw 
19177f4636ceSkrw 	return DEVNAME(sd);
19187f4636ceSkrw }
19197f4636ceSkrw 
19207f4636ceSkrw int
nvme_bioctl_inq(struct nvme_softc * sc,struct bioc_inq * bi)19217f4636ceSkrw nvme_bioctl_inq(struct nvme_softc *sc, struct bioc_inq *bi)
19227f4636ceSkrw {
19237f4636ceSkrw 	char				 sn[41], mn[81], fr[17];
19247f4636ceSkrw 	struct nvm_identify_controller	*idctrl = &sc->sc_identify;
19257f4636ceSkrw 	struct bio_status		*bs;
19267f4636ceSkrw 	unsigned int			 nn;
19277f4636ceSkrw 	uint32_t			 cc, csts, vs;
19287f4636ceSkrw 
19297f4636ceSkrw 	/* Don't tell bioctl about namespaces > last configured namespace. */
19307f4636ceSkrw 	for (nn = sc->sc_nn; nn > 0; nn--) {
19317f4636ceSkrw 		if (sc->sc_namespaces[nn].ident)
19327f4636ceSkrw 			break;
19337f4636ceSkrw 	}
19347f4636ceSkrw 	bi->bi_novol = bi->bi_nodisk = nn;
19357f4636ceSkrw 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
19367f4636ceSkrw 
19377f4636ceSkrw 	bs = &bi->bi_bio.bio_status;
19387f4636ceSkrw 	bio_status_init(bs, &sc->sc_dev);
19397f4636ceSkrw 	bs->bs_status = BIO_STATUS_SUCCESS;
19407f4636ceSkrw 
19417f4636ceSkrw 	scsi_strvis(sn, idctrl->sn, sizeof(idctrl->sn));
19427f4636ceSkrw 	scsi_strvis(mn, idctrl->mn, sizeof(idctrl->mn));
19437f4636ceSkrw 	scsi_strvis(fr, idctrl->fr, sizeof(idctrl->fr));
19447f4636ceSkrw 
19457f4636ceSkrw 	nvme_bio_status(bs, "%s, %s, %s", mn, fr, sn);
19467f4636ceSkrw 	nvme_bio_status(bs, "Max i/o %zu bytes%s%s%s, Sanitize 0x%b",
19477f4636ceSkrw 	    sc->sc_mdts,
19487f4636ceSkrw 	    ISSET(idctrl->lpa, NVM_ID_CTRL_LPA_PE) ?
19497f4636ceSkrw 	    ", Persisent Event Log" : "",
19507f4636ceSkrw 	    ISSET(idctrl->fna, NVM_ID_CTRL_FNA_CRYPTOFORMAT) ?
19517f4636ceSkrw 	    ", CryptoFormat" : "",
19527f4636ceSkrw 	    ISSET(idctrl->vwc, NVM_ID_CTRL_VWC_PRESENT) ?
19537f4636ceSkrw 	    ", Volatile Write Cache" : "",
19547f4636ceSkrw 	    lemtoh32(&idctrl->sanicap), NVM_ID_CTRL_SANICAP_FMT
19557f4636ceSkrw 	);
19567f4636ceSkrw 
19577f4636ceSkrw 	if (idctrl->ctratt != 0)
19587f4636ceSkrw 		nvme_bio_status(bs, "Features 0x%b", lemtoh32(&idctrl->ctratt),
19597f4636ceSkrw 		    NVM_ID_CTRL_CTRATT_FMT);
19607f4636ceSkrw 
19617f4636ceSkrw 	if (idctrl->oacs || idctrl->oncs) {
19627f4636ceSkrw 		nvme_bio_status(bs, "Admin commands 0x%b, NVM commands 0x%b",
19637f4636ceSkrw 		    lemtoh16(&idctrl->oacs), NVM_ID_CTRL_OACS_FMT,
19647f4636ceSkrw 		    lemtoh16(&idctrl->oncs), NVM_ID_CTRL_ONCS_FMT);
19657f4636ceSkrw 	}
19667f4636ceSkrw 
19677f4636ceSkrw 	cc = nvme_read4(sc, NVME_CC);
19687f4636ceSkrw 	csts = nvme_read4(sc, NVME_CSTS);
19697f4636ceSkrw 	vs = nvme_read4(sc, NVME_VS);
19707f4636ceSkrw 
197198f39564Skrw 	if (vs == 0xffffffff) {
19727f4636ceSkrw 		nvme_bio_status(bs, "Invalid PCIe register mapping");
19737f4636ceSkrw 		return 0;
19747f4636ceSkrw 	}
19757f4636ceSkrw 
19767f4636ceSkrw 	nvme_bio_status(bs, "NVMe %u.%u%s%s%sabled, %sReady%s%s%s%s",
19777f4636ceSkrw 	    NVME_VS_MJR(vs), NVME_VS_MNR(vs),
19787f4636ceSkrw 	    (NVME_CC_CSS_R(cc) == NVME_CC_CSS_NVM) ? ", NVM I/O command set" : "",
19797f4636ceSkrw 	    (NVME_CC_CSS_R(cc) == 0x7) ? ", Admin command set only" : "",
19807f4636ceSkrw 	    ISSET(cc, NVME_CC_EN) ? ", En" : "Dis",
19817f4636ceSkrw 	    ISSET(csts, NVME_CSTS_RDY) ? "" : "Not ",
19827f4636ceSkrw 	    ISSET(csts, NVME_CSTS_CFS) ? ", Fatal Error, " : "",
19837f4636ceSkrw 	    (NVME_CC_SHN_R(cc) == NVME_CC_SHN_NORMAL) ? ", Normal shutdown" : "",
19847f4636ceSkrw 	    (NVME_CC_SHN_R(cc) == NVME_CC_SHN_ABRUPT) ? ", Abrupt shutdown" : "",
19857f4636ceSkrw 	    ISSET(csts, NVME_CSTS_SHST_DONE) ? " complete" : "");
19867f4636ceSkrw 
19877f4636ceSkrw 	return 0;
19887f4636ceSkrw }
19897f4636ceSkrw 
19907f4636ceSkrw int
nvme_bioctl_vol(struct nvme_softc * sc,struct bioc_vol * bv)19917f4636ceSkrw nvme_bioctl_vol(struct nvme_softc *sc, struct bioc_vol *bv)
19927f4636ceSkrw {
19937f4636ceSkrw 	const struct nvm_identify_namespace	*idns;
19947f4636ceSkrw 	const char				*sd;
19957f4636ceSkrw 	int					 target;
19967f4636ceSkrw 	unsigned int 				 lbaf;
19977f4636ceSkrw 
19987f4636ceSkrw 	target = bv->bv_volid + 1;
19997f4636ceSkrw 	if (target > sc->sc_nn) {
20007f4636ceSkrw 		bv->bv_status = BIOC_SVINVALID;
20017f4636ceSkrw 		return 0;
20027f4636ceSkrw 	}
20037f4636ceSkrw 
20047f4636ceSkrw 	bv->bv_level = 'c';
20057f4636ceSkrw 	bv->bv_nodisk = 1;
20067f4636ceSkrw 
20077f4636ceSkrw 	idns = sc->sc_namespaces[target].ident;
20087f4636ceSkrw 	if (idns == NULL) {
20097f4636ceSkrw 		bv->bv_status = BIOC_SVINVALID;
20107f4636ceSkrw 		return 0;
20117f4636ceSkrw 	}
20127f4636ceSkrw 
20137f4636ceSkrw 	lbaf = NVME_ID_NS_FLBAS(idns->flbas);
20147f4636ceSkrw 	if (idns->nlbaf > 16)
20157f4636ceSkrw 		lbaf |= (idns->flbas >> 1) & 0x3f;
20167f4636ceSkrw 	bv->bv_size = nvme_scsi_size(idns) << idns->lbaf[lbaf].lbads;
20177f4636ceSkrw 
20187f4636ceSkrw 	sd = nvme_bioctl_sdname(sc, target);
20197f4636ceSkrw 	if (sd) {
20207f4636ceSkrw 		strlcpy(bv->bv_dev, sd, sizeof(bv->bv_dev));
20217f4636ceSkrw 		bv->bv_status = BIOC_SVONLINE;
20227f4636ceSkrw 	} else
20237f4636ceSkrw 		bv->bv_status = BIOC_SVOFFLINE;
20247f4636ceSkrw 
20257f4636ceSkrw 	return 0;
20267f4636ceSkrw }
20277f4636ceSkrw 
20287f4636ceSkrw int
nvme_bioctl_disk(struct nvme_softc * sc,struct bioc_disk * bd)20297f4636ceSkrw nvme_bioctl_disk(struct nvme_softc *sc, struct bioc_disk *bd)
20307f4636ceSkrw {
20317f4636ceSkrw 	const char 			*rpdesc[4] = {
20327f4636ceSkrw 		" (Best)",
20337f4636ceSkrw 		" (Better)",
20347f4636ceSkrw 		" (Good)",
20357f4636ceSkrw 		" (Degraded)"
20367f4636ceSkrw 	};
20377f4636ceSkrw 	const char			*protection[4] = {
20387f4636ceSkrw 		"not enabled",
20397f4636ceSkrw 		"Type 1",
20407f4636ceSkrw 		"Type 2",
20417f4636ceSkrw 		"Type 3",
20427f4636ceSkrw 	};
20437f4636ceSkrw 	char				 buf[32], msg[BIO_MSG_LEN];
20447f4636ceSkrw 	struct nvm_identify_namespace	*idns;
20457f4636ceSkrw 	struct bio_status		*bs;
20467f4636ceSkrw 	uint64_t			 id1, id2;
20477f4636ceSkrw 	unsigned int			 i, lbaf, target;
20487f4636ceSkrw 	uint16_t			 ms;
20497f4636ceSkrw 	uint8_t				 dps;
20507f4636ceSkrw 
20517f4636ceSkrw 	target = bd->bd_volid + 1;
20527f4636ceSkrw 	if (target > sc->sc_nn)
20537f4636ceSkrw 		return EINVAL;
20547f4636ceSkrw 	bd->bd_channel = sc->sc_scsibus->sc_dev.dv_unit;
20557f4636ceSkrw 	bd->bd_target = target;
20567f4636ceSkrw 	bd->bd_lun = 0;
20577f4636ceSkrw 	snprintf(bd->bd_procdev, sizeof(bd->bd_procdev), "Namespace %u", target);
20587f4636ceSkrw 
20597f4636ceSkrw 	bs = &bd->bd_bio.bio_status;
20607f4636ceSkrw 	bs->bs_status = BIO_STATUS_SUCCESS;
20617f4636ceSkrw 	snprintf(bs->bs_controller, sizeof(bs->bs_controller), "%11u",
20627f4636ceSkrw 	    bd->bd_diskid);
20637f4636ceSkrw 
20647f4636ceSkrw 	idns = sc->sc_namespaces[target].ident;
20657f4636ceSkrw 	if (idns == NULL) {
20667f4636ceSkrw 		bd->bd_status = BIOC_SDUNUSED;
20677f4636ceSkrw 		return 0;
20687f4636ceSkrw 	}
20697f4636ceSkrw 
20707f4636ceSkrw 	lbaf = NVME_ID_NS_FLBAS(idns->flbas);
20717f4636ceSkrw 	if (idns->nlbaf > nitems(idns->lbaf))
20727f4636ceSkrw 		lbaf |= (idns->flbas >> 1) & 0x3f;
20737f4636ceSkrw 	bd->bd_size = lemtoh64(&idns->nsze) << idns->lbaf[lbaf].lbads;
20747f4636ceSkrw 
20757f4636ceSkrw 	if (memcmp(idns->nguid, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 16)) {
20767f4636ceSkrw 		memcpy(&id1, idns->nguid, sizeof(uint64_t));
20777f4636ceSkrw 		memcpy(&id2, idns->nguid + sizeof(uint64_t), sizeof(uint64_t));
20787f4636ceSkrw 		snprintf(bd->bd_serial, sizeof(bd->bd_serial), "%08llx%08llx",
20797f4636ceSkrw 		    id1, id2);
20807f4636ceSkrw 	} else if (memcmp(idns->eui64, "\0\0\0\0\0\0\0\0", 8)) {
20817f4636ceSkrw 		memcpy(&id1, idns->eui64, sizeof(uint64_t));
20827f4636ceSkrw 		snprintf(bd->bd_serial, sizeof(bd->bd_serial), "%08llx", id1);
20837f4636ceSkrw 	}
20847f4636ceSkrw 
20857f4636ceSkrw 	msg[0] = '\0';
20867f4636ceSkrw 	for (i = 0; i <= idns->nlbaf; i++) {
20877f4636ceSkrw 		if (idns->lbaf[i].lbads == 0)
20887f4636ceSkrw 			continue;
20897f4636ceSkrw 		snprintf(buf, sizeof(buf), "%s%s%u",
20907f4636ceSkrw 		    strlen(msg) ? ", " : "", (i == lbaf) ? "*" : "",
20917f4636ceSkrw 		    1 << idns->lbaf[i].lbads);
20927f4636ceSkrw 		strlcat(msg, buf, sizeof(msg));
20937f4636ceSkrw 		ms = lemtoh16(&idns->lbaf[i].ms);
20947f4636ceSkrw 		if (ms) {
20957f4636ceSkrw 			snprintf(buf, sizeof(buf), "+%u", ms);
20967f4636ceSkrw 			strlcat(msg, buf, sizeof(msg));
20977f4636ceSkrw 		}
20987f4636ceSkrw 		strlcat(msg, rpdesc[idns->lbaf[i].rp], sizeof(msg));
20997f4636ceSkrw 	}
21007f4636ceSkrw 	nvme_bio_status(bs, "Formats %s", msg);
21017f4636ceSkrw 
21027f4636ceSkrw 	if (idns->nsfeat)
21037f4636ceSkrw 		nvme_bio_status(bs, "Features 0x%b", idns->nsfeat,
21047f4636ceSkrw 		    NVME_ID_NS_NSFEAT_FMT);
21057f4636ceSkrw 
21067f4636ceSkrw 	if (idns->dps) {
21077f4636ceSkrw 		dps = idns->dps;
21087f4636ceSkrw 		snprintf(msg, sizeof(msg), "Data Protection (0x%02x) "
21097f4636ceSkrw 		    "Protection Data in ", dps);
21107f4636ceSkrw 		if (ISSET(dps, NVME_ID_NS_DPS_PIP))
21117f4636ceSkrw 			strlcat(msg, "first", sizeof(msg));
21127f4636ceSkrw 		else
21137f4636ceSkrw 			strlcat(msg, "last", sizeof(msg));
21147f4636ceSkrw 		strlcat(msg, "bytes of metadata, Protection ", sizeof(msg));
21157f4636ceSkrw 		if (NVME_ID_NS_DPS_TYPE(dps) >= nitems(protection))
21167f4636ceSkrw 			strlcat(msg, "Type unknown", sizeof(msg));
21177f4636ceSkrw 		else
21187f4636ceSkrw 			strlcat(msg, protection[NVME_ID_NS_DPS_TYPE(dps)],
21197f4636ceSkrw 			    sizeof(msg));
21207f4636ceSkrw 		nvme_bio_status(bs, "%s", msg);
21217f4636ceSkrw 	}
21227f4636ceSkrw 
21237f4636ceSkrw 	if (nvme_bioctl_sdname(sc, target) == NULL)
21247f4636ceSkrw 		bd->bd_status = BIOC_SDOFFLINE;
21257f4636ceSkrw 	else
21267f4636ceSkrw 		bd->bd_status = BIOC_SDONLINE;
21277f4636ceSkrw 
21287f4636ceSkrw 	return 0;
21297f4636ceSkrw }
21307f4636ceSkrw #endif	/* NBIO > 0 */
2131