xref: /freebsd/sys/kern/subr_devstat.c (revision 81ad6265)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/disk.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/devicestat.h>
40 #include <sys/sdt.h>
41 #include <sys/sysctl.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/conf.h>
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 
49 #include <machine/atomic.h>
50 
51 SDT_PROVIDER_DEFINE(io);
52 
53 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
54 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
55 
56 #define	DTRACE_DEVSTAT_BIO_START()	SDT_PROBE2(io, , , start, bp, ds)
57 #define	DTRACE_DEVSTAT_BIO_DONE()	SDT_PROBE2(io, , , done, bp, ds)
58 
59 static int devstat_num_devs;
60 static long devstat_generation = 1;
61 static int devstat_version = DEVSTAT_VERSION;
62 static int devstat_current_devnumber;
63 static struct mtx devstat_mutex;
64 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
65 
66 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
67 static struct devstat *devstat_alloc(void);
68 static void devstat_free(struct devstat *);
69 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
70 		       int unit_number, uint32_t block_size,
71 		       devstat_support_flags flags,
72 		       devstat_type_flags device_type,
73 		       devstat_priority priority);
74 
75 /*
76  * Allocate a devstat and initialize it
77  */
78 struct devstat *
79 devstat_new_entry(const void *dev_name,
80 		  int unit_number, uint32_t block_size,
81 		  devstat_support_flags flags,
82 		  devstat_type_flags device_type,
83 		  devstat_priority priority)
84 {
85 	struct devstat *ds;
86 
87 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
88 
89 	ds = devstat_alloc();
90 	mtx_lock(&devstat_mutex);
91 	if (unit_number == -1) {
92 		ds->unit_number = unit_number;
93 		ds->id = dev_name;
94 		binuptime(&ds->creation_time);
95 		devstat_generation++;
96 	} else {
97 		devstat_add_entry(ds, dev_name, unit_number, block_size,
98 				  flags, device_type, priority);
99 	}
100 	mtx_unlock(&devstat_mutex);
101 	return (ds);
102 }
103 
104 /*
105  * Take a malloced and zeroed devstat structure given to us, fill it in
106  * and add it to the queue of devices.
107  */
108 static void
109 devstat_add_entry(struct devstat *ds, const void *dev_name,
110 		  int unit_number, uint32_t block_size,
111 		  devstat_support_flags flags,
112 		  devstat_type_flags device_type,
113 		  devstat_priority priority)
114 {
115 	struct devstatlist *devstat_head;
116 	struct devstat *ds_tmp;
117 
118 	mtx_assert(&devstat_mutex, MA_OWNED);
119 	devstat_num_devs++;
120 
121 	devstat_head = &device_statq;
122 
123 	/*
124 	 * Priority sort.  Each driver passes in its priority when it adds
125 	 * its devstat entry.  Drivers are sorted first by priority, and
126 	 * then by probe order.
127 	 *
128 	 * For the first device, we just insert it, since the priority
129 	 * doesn't really matter yet.  Subsequent devices are inserted into
130 	 * the list using the order outlined above.
131 	 */
132 	if (devstat_num_devs == 1)
133 		STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
134 	else {
135 		STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
136 			struct devstat *ds_next;
137 
138 			ds_next = STAILQ_NEXT(ds_tmp, dev_links);
139 
140 			/*
141 			 * If we find a break between higher and lower
142 			 * priority items, and if this item fits in the
143 			 * break, insert it.  This also applies if the
144 			 * "lower priority item" is the end of the list.
145 			 */
146 			if ((priority <= ds_tmp->priority)
147 			 && ((ds_next == NULL)
148 			   || (priority > ds_next->priority))) {
149 				STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
150 						    dev_links);
151 				break;
152 			} else if (priority > ds_tmp->priority) {
153 				/*
154 				 * If this is the case, we should be able
155 				 * to insert ourselves at the head of the
156 				 * list.  If we can't, something is wrong.
157 				 */
158 				if (ds_tmp == STAILQ_FIRST(devstat_head)) {
159 					STAILQ_INSERT_HEAD(devstat_head,
160 							   ds, dev_links);
161 					break;
162 				} else {
163 					STAILQ_INSERT_TAIL(devstat_head,
164 							   ds, dev_links);
165 					printf("devstat_add_entry: HELP! "
166 					       "sorting problem detected "
167 					       "for name %p unit %d\n",
168 					       dev_name, unit_number);
169 					break;
170 				}
171 			}
172 		}
173 	}
174 
175 	ds->device_number = devstat_current_devnumber++;
176 	ds->unit_number = unit_number;
177 	strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
178 	ds->block_size = block_size;
179 	ds->flags = flags;
180 	ds->device_type = device_type;
181 	ds->priority = priority;
182 	binuptime(&ds->creation_time);
183 	devstat_generation++;
184 }
185 
186 /*
187  * Remove a devstat structure from the list of devices.
188  */
189 void
190 devstat_remove_entry(struct devstat *ds)
191 {
192 	struct devstatlist *devstat_head;
193 
194 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
195 	if (ds == NULL)
196 		return;
197 
198 	mtx_lock(&devstat_mutex);
199 
200 	devstat_head = &device_statq;
201 
202 	/* Remove this entry from the devstat queue */
203 	atomic_add_acq_int(&ds->sequence1, 1);
204 	if (ds->unit_number != -1) {
205 		devstat_num_devs--;
206 		STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
207 	}
208 	devstat_free(ds);
209 	devstat_generation++;
210 	mtx_unlock(&devstat_mutex);
211 }
212 
213 /*
214  * Record a transaction start.
215  *
216  * See comments for devstat_end_transaction().  Ordering is very important
217  * here.
218  */
219 void
220 devstat_start_transaction(struct devstat *ds, const struct bintime *now)
221 {
222 
223 	/* sanity check */
224 	if (ds == NULL)
225 		return;
226 
227 	atomic_add_acq_int(&ds->sequence1, 1);
228 	/*
229 	 * We only want to set the start time when we are going from idle
230 	 * to busy.  The start time is really the start of the latest busy
231 	 * period.
232 	 */
233 	if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) {
234 		if (now != NULL)
235 			ds->busy_from = *now;
236 		else
237 			binuptime(&ds->busy_from);
238 	}
239 	atomic_add_rel_int(&ds->sequence0, 1);
240 }
241 
242 void
243 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
244 {
245 
246 	/* sanity check */
247 	if (ds == NULL)
248 		return;
249 
250 	binuptime(&bp->bio_t0);
251 	devstat_start_transaction_bio_t0(ds, bp);
252 }
253 
254 void
255 devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp)
256 {
257 
258 	/* sanity check */
259 	if (ds == NULL)
260 		return;
261 
262 	devstat_start_transaction(ds, &bp->bio_t0);
263 	DTRACE_DEVSTAT_BIO_START();
264 }
265 
266 /*
267  * Record the ending of a transaction, and incrment the various counters.
268  *
269  * Ordering in this function, and in devstat_start_transaction() is VERY
270  * important.  The idea here is to run without locks, so we are very
271  * careful to only modify some fields on the way "down" (i.e. at
272  * transaction start) and some fields on the way "up" (i.e. at transaction
273  * completion).  One exception is busy_from, which we only modify in
274  * devstat_start_transaction() when there are no outstanding transactions,
275  * and thus it can't be modified in devstat_end_transaction()
276  * simultaneously.
277  *
278  * The sequence0 and sequence1 fields are provided to enable an application
279  * spying on the structures with mmap(2) to tell when a structure is in a
280  * consistent state or not.
281  *
282  * For this to work 100% reliably, it is important that the two fields
283  * are at opposite ends of the structure and that they are incremented
284  * in the opposite order of how a memcpy(3) in userland would copy them.
285  * We assume that the copying happens front to back, but there is actually
286  * no way short of writing your own memcpy(3) replacement to guarantee
287  * this will be the case.
288  *
289  * In addition to this, being a kind of locks, they must be updated with
290  * atomic instructions using appropriate memory barriers.
291  */
292 void
293 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
294 			devstat_tag_type tag_type, devstat_trans_flags flags,
295 			const struct bintime *now, const struct bintime *then)
296 {
297 	struct bintime dt, lnow;
298 
299 	/* sanity check */
300 	if (ds == NULL)
301 		return;
302 
303 	if (now == NULL) {
304 		binuptime(&lnow);
305 		now = &lnow;
306 	}
307 
308 	atomic_add_acq_int(&ds->sequence1, 1);
309 	/* Update byte and operations counts */
310 	ds->bytes[flags] += bytes;
311 	ds->operations[flags]++;
312 
313 	/*
314 	 * Keep a count of the various tag types sent.
315 	 */
316 	if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
317 	    tag_type != DEVSTAT_TAG_NONE)
318 		ds->tag_types[tag_type]++;
319 
320 	if (then != NULL) {
321 		/* Update duration of operations */
322 		dt = *now;
323 		bintime_sub(&dt, then);
324 		bintime_add(&ds->duration[flags], &dt);
325 	}
326 
327 	/* Accumulate busy time */
328 	dt = *now;
329 	bintime_sub(&dt, &ds->busy_from);
330 	bintime_add(&ds->busy_time, &dt);
331 	ds->busy_from = *now;
332 
333 	ds->end_count++;
334 	atomic_add_rel_int(&ds->sequence0, 1);
335 }
336 
337 void
338 devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp)
339 {
340 
341 	devstat_end_transaction_bio_bt(ds, bp, NULL);
342 }
343 
344 void
345 devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp,
346     const struct bintime *now)
347 {
348 	devstat_trans_flags flg;
349 	devstat_tag_type tag;
350 
351 	/* sanity check */
352 	if (ds == NULL)
353 		return;
354 
355 	if (bp->bio_flags & BIO_ORDERED)
356 		tag = DEVSTAT_TAG_ORDERED;
357 	else
358 		tag = DEVSTAT_TAG_SIMPLE;
359 	if (bp->bio_cmd == BIO_DELETE)
360 		flg = DEVSTAT_FREE;
361 	else if ((bp->bio_cmd == BIO_READ)
362 	      || ((bp->bio_cmd == BIO_ZONE)
363 	       && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
364 		flg = DEVSTAT_READ;
365 	else if (bp->bio_cmd == BIO_WRITE)
366 		flg = DEVSTAT_WRITE;
367 	else
368 		flg = DEVSTAT_NO_DATA;
369 
370 	devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
371 				tag, flg, now, &bp->bio_t0);
372 	DTRACE_DEVSTAT_BIO_DONE();
373 }
374 
375 /*
376  * This is the sysctl handler for the devstat package.  The data pushed out
377  * on the kern.devstat.all sysctl variable consists of the current devstat
378  * generation number, and then an array of devstat structures, one for each
379  * device in the system.
380  *
381  * This is more cryptic that obvious, but basically we neither can nor
382  * want to hold the devstat_mutex for any amount of time, so we grab it
383  * only when we need to and keep an eye on devstat_generation all the time.
384  */
385 static int
386 sysctl_devstat(SYSCTL_HANDLER_ARGS)
387 {
388 	int error;
389 	long mygen;
390 	struct devstat *nds;
391 
392 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
393 
394 	/*
395 	 * XXX devstat_generation should really be "volatile" but that
396 	 * XXX freaks out the sysctl macro below.  The places where we
397 	 * XXX change it and inspect it are bracketed in the mutex which
398 	 * XXX guarantees us proper write barriers.  I don't believe the
399 	 * XXX compiler is allowed to optimize mygen away across calls
400 	 * XXX to other functions, so the following is belived to be safe.
401 	 */
402 	mygen = devstat_generation;
403 
404 	error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
405 
406 	if (devstat_num_devs == 0)
407 		return(0);
408 
409 	if (error != 0)
410 		return (error);
411 
412 	mtx_lock(&devstat_mutex);
413 	nds = STAILQ_FIRST(&device_statq);
414 	if (mygen != devstat_generation)
415 		error = EBUSY;
416 	mtx_unlock(&devstat_mutex);
417 
418 	if (error != 0)
419 		return (error);
420 
421 	for (;nds != NULL;) {
422 		error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
423 		if (error != 0)
424 			return (error);
425 		mtx_lock(&devstat_mutex);
426 		if (mygen != devstat_generation)
427 			error = EBUSY;
428 		else
429 			nds = STAILQ_NEXT(nds, dev_links);
430 		mtx_unlock(&devstat_mutex);
431 		if (error != 0)
432 			return (error);
433 	}
434 	return(error);
435 }
436 
437 /*
438  * Sysctl entries for devstat.  The first one is a node that all the rest
439  * hang off of.
440  */
441 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
442     "Device Statistics");
443 
444 SYSCTL_PROC(_kern_devstat, OID_AUTO, all,
445     CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
446     sysctl_devstat, "S,devstat",
447     "All devices in the devstat list");
448 /*
449  * Export the number of devices in the system so that userland utilities
450  * can determine how much memory to allocate to hold all the devices.
451  */
452 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
453     &devstat_num_devs, 0, "Number of devices in the devstat list");
454 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
455     &devstat_generation, 0, "Devstat list generation");
456 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
457     &devstat_version, 0, "Devstat list version number");
458 
459 /*
460  * Allocator for struct devstat structures.  We sub-allocate these from pages
461  * which we get from malloc.  These pages are exported for mmap(2)'ing through
462  * a miniature device driver
463  */
464 
465 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
466 
467 static d_ioctl_t devstat_ioctl;
468 static d_mmap_t devstat_mmap;
469 
470 static struct cdevsw devstat_cdevsw = {
471 	.d_version =	D_VERSION,
472 	.d_ioctl =	devstat_ioctl,
473 	.d_mmap =	devstat_mmap,
474 	.d_name =	"devstat",
475 };
476 
477 struct statspage {
478 	TAILQ_ENTRY(statspage)	list;
479 	struct devstat		*stat;
480 	u_int			nfree;
481 };
482 
483 static size_t pagelist_pages = 0;
484 static TAILQ_HEAD(, statspage)	pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
485 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
486 
487 static int
488 devstat_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
489     struct thread *td)
490 {
491 	int error = ENOTTY;
492 
493 	switch (cmd) {
494 	case DIOCGMEDIASIZE:
495 		error = 0;
496 		*(off_t *)data = pagelist_pages * PAGE_SIZE;
497 		break;
498 	}
499 
500 	return (error);
501 }
502 
503 static int
504 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
505     int nprot, vm_memattr_t *memattr)
506 {
507 	struct statspage *spp;
508 
509 	if (nprot != VM_PROT_READ)
510 		return (-1);
511 	mtx_lock(&devstat_mutex);
512 	TAILQ_FOREACH(spp, &pagelist, list) {
513 		if (offset == 0) {
514 			*paddr = vtophys(spp->stat);
515 			mtx_unlock(&devstat_mutex);
516 			return (0);
517 		}
518 		offset -= PAGE_SIZE;
519 	}
520 	mtx_unlock(&devstat_mutex);
521 	return (-1);
522 }
523 
524 static struct devstat *
525 devstat_alloc(void)
526 {
527 	struct devstat *dsp;
528 	struct statspage *spp, *spp2;
529 	u_int u;
530 	static int once;
531 
532 	mtx_assert(&devstat_mutex, MA_NOTOWNED);
533 	if (!once) {
534 		make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
535 		    &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
536 		    DEVSTAT_DEVICE_NAME);
537 		once = 1;
538 	}
539 	spp2 = NULL;
540 	mtx_lock(&devstat_mutex);
541 	for (;;) {
542 		TAILQ_FOREACH(spp, &pagelist, list) {
543 			if (spp->nfree > 0)
544 				break;
545 		}
546 		if (spp != NULL)
547 			break;
548 		mtx_unlock(&devstat_mutex);
549 		spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
550 		spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
551 		spp2->nfree = statsperpage;
552 
553 		/*
554 		 * If free statspages were added while the lock was released
555 		 * just reuse them.
556 		 */
557 		mtx_lock(&devstat_mutex);
558 		TAILQ_FOREACH(spp, &pagelist, list)
559 			if (spp->nfree > 0)
560 				break;
561 		if (spp == NULL) {
562 			spp = spp2;
563 
564 			/*
565 			 * It would make more sense to add the new page at the
566 			 * head but the order on the list determine the
567 			 * sequence of the mapping so we can't do that.
568 			 */
569 			pagelist_pages++;
570 			TAILQ_INSERT_TAIL(&pagelist, spp, list);
571 		} else
572 			break;
573 	}
574 	dsp = spp->stat;
575 	for (u = 0; u < statsperpage; u++) {
576 		if (dsp->allocated == 0)
577 			break;
578 		dsp++;
579 	}
580 	spp->nfree--;
581 	dsp->allocated = 1;
582 	mtx_unlock(&devstat_mutex);
583 	if (spp2 != NULL && spp2 != spp) {
584 		free(spp2->stat, M_DEVSTAT);
585 		free(spp2, M_DEVSTAT);
586 	}
587 	return (dsp);
588 }
589 
590 static void
591 devstat_free(struct devstat *dsp)
592 {
593 	struct statspage *spp;
594 
595 	mtx_assert(&devstat_mutex, MA_OWNED);
596 	bzero(dsp, sizeof *dsp);
597 	TAILQ_FOREACH(spp, &pagelist, list) {
598 		if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
599 			spp->nfree++;
600 			return;
601 		}
602 	}
603 }
604 
605 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
606     SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
607