xref: /freebsd/sys/dev/nvme/nvme_ns_cmd.c (revision 685dc743)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "nvme_private.h"
31 
32 int
nvme_ns_cmd_read(struct nvme_namespace * ns,void * payload,uint64_t lba,uint32_t lba_count,nvme_cb_fn_t cb_fn,void * cb_arg)33 nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
34     uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
35 {
36 	struct nvme_request	*req;
37 
38 	req = nvme_allocate_request_vaddr(payload,
39 	    lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
40 
41 	if (req == NULL)
42 		return (ENOMEM);
43 
44 	nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
45 
46 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
47 
48 	return (0);
49 }
50 
51 int
nvme_ns_cmd_read_bio(struct nvme_namespace * ns,struct bio * bp,nvme_cb_fn_t cb_fn,void * cb_arg)52 nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
53     nvme_cb_fn_t cb_fn, void *cb_arg)
54 {
55 	struct nvme_request	*req;
56 	uint64_t		lba;
57 	uint64_t		lba_count;
58 
59 	req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
60 
61 	if (req == NULL)
62 		return (ENOMEM);
63 
64 	lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
65 	lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
66 	nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
67 
68 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
69 
70 	return (0);
71 }
72 
73 int
nvme_ns_cmd_write(struct nvme_namespace * ns,void * payload,uint64_t lba,uint32_t lba_count,nvme_cb_fn_t cb_fn,void * cb_arg)74 nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
75     uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
76 {
77 	struct nvme_request	*req;
78 
79 	req = nvme_allocate_request_vaddr(payload,
80 	    lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
81 
82 	if (req == NULL)
83 		return (ENOMEM);
84 
85 	nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
86 
87 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
88 
89 	return (0);
90 }
91 
92 int
nvme_ns_cmd_write_bio(struct nvme_namespace * ns,struct bio * bp,nvme_cb_fn_t cb_fn,void * cb_arg)93 nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
94     nvme_cb_fn_t cb_fn, void *cb_arg)
95 {
96 	struct nvme_request	*req;
97 	uint64_t		lba;
98 	uint64_t		lba_count;
99 
100 	req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
101 
102 	if (req == NULL)
103 		return (ENOMEM);
104 	lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
105 	lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
106 	nvme_ns_write_cmd(&req->cmd, ns->id, lba, lba_count);
107 
108 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
109 
110 	return (0);
111 }
112 
113 int
nvme_ns_cmd_deallocate(struct nvme_namespace * ns,void * payload,uint8_t num_ranges,nvme_cb_fn_t cb_fn,void * cb_arg)114 nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
115     uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
116 {
117 	struct nvme_request	*req;
118 	struct nvme_command	*cmd;
119 
120 	req = nvme_allocate_request_vaddr(payload,
121 	    num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
122 
123 	if (req == NULL)
124 		return (ENOMEM);
125 
126 	cmd = &req->cmd;
127 	cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
128 	cmd->nsid = htole32(ns->id);
129 
130 	/* TODO: create a delete command data structure */
131 	cmd->cdw10 = htole32(num_ranges - 1);
132 	cmd->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
133 
134 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
135 
136 	return (0);
137 }
138 
139 int
nvme_ns_cmd_flush(struct nvme_namespace * ns,nvme_cb_fn_t cb_fn,void * cb_arg)140 nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
141 {
142 	struct nvme_request	*req;
143 
144 	req = nvme_allocate_request_null(cb_fn, cb_arg);
145 
146 	if (req == NULL)
147 		return (ENOMEM);
148 
149 	nvme_ns_flush_cmd(&req->cmd, ns->id);
150 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
151 
152 	return (0);
153 }
154 
155 /* Timeout = 1 sec */
156 #define NVD_DUMP_TIMEOUT	200000
157 
158 int
nvme_ns_dump(struct nvme_namespace * ns,void * virt,off_t offset,size_t len)159 nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
160 {
161 	struct nvme_completion_poll_status status;
162 	struct nvme_request *req;
163 	struct nvme_command *cmd;
164 	uint64_t lba, lba_count;
165 	int i;
166 
167 	status.done = FALSE;
168 	req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb,
169 	    &status);
170 	if (req == NULL)
171 		return (ENOMEM);
172 
173 	cmd = &req->cmd;
174 
175 	if (len > 0) {
176 		lba = offset / nvme_ns_get_sector_size(ns);
177 		lba_count = len / nvme_ns_get_sector_size(ns);
178 		nvme_ns_write_cmd(cmd, ns->id, lba, lba_count);
179 	} else
180 		nvme_ns_flush_cmd(cmd, ns->id);
181 
182 	nvme_ctrlr_submit_io_request(ns->ctrlr, req);
183 	if (req->qpair == NULL)
184 		return (ENXIO);
185 
186 	i = 0;
187 	while ((i++ < NVD_DUMP_TIMEOUT) && (status.done == FALSE)) {
188 		DELAY(5);
189 		nvme_qpair_process_completions(req->qpair);
190 	}
191 
192 	/*
193 	 * Normally, when using the polling interface, we can't return a
194 	 * timeout error because we don't know when the completion routines
195 	 * will be called if the command later completes. However, in this
196 	 * case we're running a system dump, so all interrupts are turned
197 	 * off, the scheduler isn't running so there's nothing to complete
198 	 * the transaction.
199 	 */
200 	if (status.done == FALSE)
201 		return (ETIMEDOUT);
202 
203 	return (0);
204 }
205