xref: /freebsd/sys/dev/nvme/nvme_test.c (revision 315ee00f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2013 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/conf.h>
33 #include <sys/fcntl.h>
34 #include <sys/kthread.h>
35 #include <sys/module.h>
36 #include <sys/proc.h>
37 #include <sys/syscallsubr.h>
38 #include <sys/sysctl.h>
39 #include <sys/sysproto.h>
40 #include <sys/systm.h>
41 #include <sys/unistd.h>
42 
43 #include <geom/geom.h>
44 
45 #include "nvme_private.h"
46 
47 struct nvme_io_test_thread {
48 	uint32_t		idx;
49 	struct nvme_namespace	*ns;
50 	enum nvme_nvm_opcode	opc;
51 	struct timeval		start;
52 	void			*buf;
53 	uint32_t		size;
54 	uint32_t		time;
55 	uint64_t		io_completed;
56 };
57 
58 struct nvme_io_test_internal {
59 	struct nvme_namespace	*ns;
60 	enum nvme_nvm_opcode	opc;
61 	struct timeval		start;
62 	uint32_t		time;
63 	uint32_t		size;
64 	uint32_t		td_active;
65 	uint32_t		td_idx;
66 	uint32_t		flags;
67 	uint64_t		io_completed[NVME_TEST_MAX_THREADS];
68 };
69 
70 static void
71 nvme_ns_bio_test_cb(struct bio *bio)
72 {
73 	struct mtx *mtx;
74 
75 	mtx = mtx_pool_find(mtxpool_sleep, bio);
76 	mtx_lock(mtx);
77 	wakeup(bio);
78 	mtx_unlock(mtx);
79 }
80 
81 static void
82 nvme_ns_bio_test(void *arg)
83 {
84 	struct nvme_io_test_internal	*io_test = arg;
85 	struct cdevsw			*csw;
86 	struct mtx			*mtx;
87 	struct bio			*bio;
88 	struct cdev			*dev;
89 	void				*buf;
90 	struct timeval			t;
91 	uint64_t			io_completed = 0, offset;
92 	uint32_t			idx;
93 	int				ref;
94 
95 	buf = malloc(io_test->size, M_NVME, M_WAITOK);
96 	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
97 	dev = io_test->ns->cdev;
98 
99 	offset = idx * 2048ULL * nvme_ns_get_sector_size(io_test->ns);
100 
101 	while (1) {
102 		bio = g_alloc_bio();
103 
104 		memset(bio, 0, sizeof(*bio));
105 		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
106 		    BIO_READ : BIO_WRITE;
107 		bio->bio_done = nvme_ns_bio_test_cb;
108 		bio->bio_dev = dev;
109 		bio->bio_offset = offset;
110 		bio->bio_data = buf;
111 		bio->bio_bcount = io_test->size;
112 
113 		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
114 			csw = dev_refthread(dev, &ref);
115 		} else
116 			csw = dev->si_devsw;
117 
118 		if (csw == NULL)
119 			panic("Unable to retrieve device switch");
120 		mtx = mtx_pool_find(mtxpool_sleep, bio);
121 		mtx_lock(mtx);
122 		(*csw->d_strategy)(bio);
123 		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
124 		mtx_unlock(mtx);
125 
126 		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
127 			dev_relthread(dev, ref);
128 		}
129 
130 		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
131 			break;
132 
133 		g_destroy_bio(bio);
134 
135 		io_completed++;
136 
137 		getmicrouptime(&t);
138 		timevalsub(&t, &io_test->start);
139 
140 		if (t.tv_sec >= io_test->time)
141 			break;
142 
143 		offset += io_test->size;
144 		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
145 			offset = 0;
146 	}
147 
148 	io_test->io_completed[idx] = io_completed;
149 	wakeup_one(io_test);
150 
151 	free(buf, M_NVME);
152 
153 	atomic_subtract_int(&io_test->td_active, 1);
154 	mb();
155 
156 	kthread_exit();
157 }
158 
159 static void
160 nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
161 {
162 	struct nvme_io_test_thread	*tth = arg;
163 	struct timeval			t;
164 
165 	tth->io_completed++;
166 
167 	if (nvme_completion_is_error(cpl)) {
168 		printf("%s: error occurred\n", __func__);
169 		wakeup_one(tth);
170 		return;
171 	}
172 
173 	getmicrouptime(&t);
174 	timevalsub(&t, &tth->start);
175 
176 	if (t.tv_sec >= tth->time) {
177 		wakeup_one(tth);
178 		return;
179 	}
180 
181 	switch (tth->opc) {
182 	case NVME_OPC_WRITE:
183 		nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
184 		    tth->size/nvme_ns_get_sector_size(tth->ns),
185 		    nvme_ns_io_test_cb, tth);
186 		break;
187 	case NVME_OPC_READ:
188 		nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
189 		    tth->size/nvme_ns_get_sector_size(tth->ns),
190 		    nvme_ns_io_test_cb, tth);
191 		break;
192 	default:
193 		break;
194 	}
195 }
196 
197 static void
198 nvme_ns_io_test(void *arg)
199 {
200 	struct nvme_io_test_internal	*io_test = arg;
201 	struct nvme_io_test_thread	*tth;
202 	struct nvme_completion		cpl;
203 	int				error;
204 
205 	tth = malloc(sizeof(*tth), M_NVME, M_WAITOK | M_ZERO);
206 	tth->ns = io_test->ns;
207 	tth->opc = io_test->opc;
208 	memcpy(&tth->start, &io_test->start, sizeof(tth->start));
209 	tth->buf = malloc(io_test->size, M_NVME, M_WAITOK);
210 	tth->size = io_test->size;
211 	tth->time = io_test->time;
212 	tth->idx = atomic_fetchadd_int(&io_test->td_idx, 1);
213 
214 	memset(&cpl, 0, sizeof(cpl));
215 
216 	nvme_ns_io_test_cb(tth, &cpl);
217 
218 	error = tsleep(tth, 0, "test_wait", tth->time*hz*2);
219 
220 	if (error)
221 		printf("%s: error = %d\n", __func__, error);
222 
223 	io_test->io_completed[tth->idx] = tth->io_completed;
224 	wakeup_one(io_test);
225 
226 	free(tth->buf, M_NVME);
227 	free(tth, M_NVME);
228 
229 	atomic_subtract_int(&io_test->td_active, 1);
230 	mb();
231 
232 	kthread_exit();
233 }
234 
235 void
236 nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
237 {
238 	struct nvme_io_test		*io_test;
239 	struct nvme_io_test_internal	*io_test_internal;
240 	void				(*fn)(void *);
241 	int				i;
242 
243 	io_test = (struct nvme_io_test *)arg;
244 
245 	if ((io_test->opc != NVME_OPC_READ) &&
246 	    (io_test->opc != NVME_OPC_WRITE))
247 		return;
248 
249 	if (io_test->size % nvme_ns_get_sector_size(ns))
250 		return;
251 
252 	io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
253 	    M_WAITOK | M_ZERO);
254 	io_test_internal->opc = io_test->opc;
255 	io_test_internal->ns = ns;
256 	io_test_internal->td_active = io_test->num_threads;
257 	io_test_internal->time = io_test->time;
258 	io_test_internal->size = io_test->size;
259 	io_test_internal->flags = io_test->flags;
260 
261 	if (cmd == NVME_IO_TEST)
262 		fn = nvme_ns_io_test;
263 	else
264 		fn = nvme_ns_bio_test;
265 
266 	getmicrouptime(&io_test_internal->start);
267 
268 	for (i = 0; i < io_test->num_threads; i++)
269 		kthread_add(fn, io_test_internal,
270 		    NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
271 
272 	tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);
273 
274 	while (io_test_internal->td_active > 0)
275 		DELAY(10);
276 
277 	memcpy(io_test->io_completed, io_test_internal->io_completed,
278 	    sizeof(io_test->io_completed));
279 
280 	free(io_test_internal, M_NVME);
281 }
282