xref: /freebsd/sys/geom/geom_io.c (revision 2a58b312)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Poul-Henning Kamp
5  * Copyright (c) 2002 Networks Associates Technology, Inc.
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10  * and NAI Labs, the Security Research Division of Network Associates, Inc.
11  * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12  * DARPA CHATS research program.
13  *
14  * Portions of this software were developed by Konstantin Belousov
15  * under sponsorship from the FreeBSD Foundation.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. The names of the authors may not be used to endorse or promote
26  *    products derived from this software without specific prior written
27  *    permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/bio.h>
50 #include <sys/ktr.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/stack.h>
54 #include <sys/sysctl.h>
55 #include <sys/vmem.h>
56 #include <machine/stack.h>
57 #include <machine/stdarg.h>
58 
59 #include <sys/errno.h>
60 #include <geom/geom.h>
61 #include <geom/geom_int.h>
62 #include <sys/devicestat.h>
63 
64 #include <vm/uma.h>
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 
73 static int	g_io_transient_map_bio(struct bio *bp);
74 
75 static struct g_bioq g_bio_run_down;
76 static struct g_bioq g_bio_run_up;
77 
78 /*
79  * Pace is a hint that we've had some trouble recently allocating
80  * bios, so we should back off trying to send I/O down the stack
81  * a bit to let the problem resolve. When pacing, we also turn
82  * off direct dispatch to also reduce memory pressure from I/Os
83  * there, at the expxense of some added latency while the memory
84  * pressures exist. See g_io_schedule_down() for more details
85  * and limitations.
86  */
87 static volatile u_int __read_mostly pace;
88 
89 static uma_zone_t __read_mostly biozone;
90 
91 #include <machine/atomic.h>
92 
93 static void
94 g_bioq_lock(struct g_bioq *bq)
95 {
96 
97 	mtx_lock(&bq->bio_queue_lock);
98 }
99 
100 static void
101 g_bioq_unlock(struct g_bioq *bq)
102 {
103 
104 	mtx_unlock(&bq->bio_queue_lock);
105 }
106 
107 #if 0
108 static void
109 g_bioq_destroy(struct g_bioq *bq)
110 {
111 
112 	mtx_destroy(&bq->bio_queue_lock);
113 }
114 #endif
115 
116 static void
117 g_bioq_init(struct g_bioq *bq)
118 {
119 
120 	TAILQ_INIT(&bq->bio_queue);
121 	mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
122 }
123 
124 static struct bio *
125 g_bioq_first(struct g_bioq *bq)
126 {
127 	struct bio *bp;
128 
129 	bp = TAILQ_FIRST(&bq->bio_queue);
130 	if (bp != NULL) {
131 		KASSERT((bp->bio_flags & BIO_ONQUEUE),
132 		    ("Bio not on queue bp=%p target %p", bp, bq));
133 		bp->bio_flags &= ~BIO_ONQUEUE;
134 		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
135 		bq->bio_queue_length--;
136 	}
137 	return (bp);
138 }
139 
140 struct bio *
141 g_new_bio(void)
142 {
143 	struct bio *bp;
144 
145 	bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
146 #ifdef KTR
147 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
148 		struct stack st;
149 
150 		CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
151 		stack_save(&st);
152 		CTRSTACK(KTR_GEOM, &st, 3);
153 	}
154 #endif
155 	return (bp);
156 }
157 
158 struct bio *
159 g_alloc_bio(void)
160 {
161 	struct bio *bp;
162 
163 	bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
164 #ifdef KTR
165 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
166 		struct stack st;
167 
168 		CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
169 		stack_save(&st);
170 		CTRSTACK(KTR_GEOM, &st, 3);
171 	}
172 #endif
173 	return (bp);
174 }
175 
176 void
177 g_destroy_bio(struct bio *bp)
178 {
179 #ifdef KTR
180 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
181 		struct stack st;
182 
183 		CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
184 		stack_save(&st);
185 		CTRSTACK(KTR_GEOM, &st, 3);
186 	}
187 #endif
188 	uma_zfree(biozone, bp);
189 }
190 
191 struct bio *
192 g_clone_bio(struct bio *bp)
193 {
194 	struct bio *bp2;
195 
196 	bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
197 	if (bp2 != NULL) {
198 		bp2->bio_parent = bp;
199 		bp2->bio_cmd = bp->bio_cmd;
200 		/*
201 		 *  BIO_ORDERED flag may be used by disk drivers to enforce
202 		 *  ordering restrictions, so this flag needs to be cloned.
203 		 *  BIO_UNMAPPED, BIO_VLIST, and BIO_SWAP should be inherited,
204 		 *  to properly indicate which way the buffer is passed.
205 		 *  Other bio flags are not suitable for cloning.
206 		 */
207 		bp2->bio_flags = bp->bio_flags &
208 		    (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
209 		bp2->bio_length = bp->bio_length;
210 		bp2->bio_offset = bp->bio_offset;
211 		bp2->bio_data = bp->bio_data;
212 		bp2->bio_ma = bp->bio_ma;
213 		bp2->bio_ma_n = bp->bio_ma_n;
214 		bp2->bio_ma_offset = bp->bio_ma_offset;
215 		bp2->bio_attribute = bp->bio_attribute;
216 		if (bp->bio_cmd == BIO_ZONE)
217 			bcopy(&bp->bio_zone, &bp2->bio_zone,
218 			    sizeof(bp->bio_zone));
219 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
220 		bp2->bio_track_bp = bp->bio_track_bp;
221 #endif
222 		bp->bio_children++;
223 	}
224 #ifdef KTR
225 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
226 		struct stack st;
227 
228 		CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
229 		stack_save(&st);
230 		CTRSTACK(KTR_GEOM, &st, 3);
231 	}
232 #endif
233 	return(bp2);
234 }
235 
236 struct bio *
237 g_duplicate_bio(struct bio *bp)
238 {
239 	struct bio *bp2;
240 
241 	bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
242 	bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST | BIO_SWAP);
243 	bp2->bio_parent = bp;
244 	bp2->bio_cmd = bp->bio_cmd;
245 	bp2->bio_length = bp->bio_length;
246 	bp2->bio_offset = bp->bio_offset;
247 	bp2->bio_data = bp->bio_data;
248 	bp2->bio_ma = bp->bio_ma;
249 	bp2->bio_ma_n = bp->bio_ma_n;
250 	bp2->bio_ma_offset = bp->bio_ma_offset;
251 	bp2->bio_attribute = bp->bio_attribute;
252 	bp->bio_children++;
253 #ifdef KTR
254 	if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
255 		struct stack st;
256 
257 		CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
258 		stack_save(&st);
259 		CTRSTACK(KTR_GEOM, &st, 3);
260 	}
261 #endif
262 	return(bp2);
263 }
264 
265 void
266 g_reset_bio(struct bio *bp)
267 {
268 
269 	bzero(bp, sizeof(*bp));
270 }
271 
272 void
273 g_io_init(void)
274 {
275 
276 	g_bioq_init(&g_bio_run_down);
277 	g_bioq_init(&g_bio_run_up);
278 	biozone = uma_zcreate("g_bio", sizeof (struct bio),
279 	    NULL, NULL,
280 	    NULL, NULL,
281 	    0, 0);
282 }
283 
284 int
285 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
286 {
287 	struct bio *bp;
288 	int error;
289 
290 	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
291 	bp = g_alloc_bio();
292 	bp->bio_cmd = BIO_GETATTR;
293 	bp->bio_done = NULL;
294 	bp->bio_attribute = attr;
295 	bp->bio_length = *len;
296 	bp->bio_data = ptr;
297 	g_io_request(bp, cp);
298 	error = biowait(bp, "ggetattr");
299 	*len = bp->bio_completed;
300 	g_destroy_bio(bp);
301 	return (error);
302 }
303 
304 int
305 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
306 {
307 	struct bio *bp;
308 	int error;
309 
310 	g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
311 	bp = g_alloc_bio();
312 	bp->bio_cmd = BIO_ZONE;
313 	bp->bio_done = NULL;
314 	/*
315 	 * XXX KDM need to handle report zone data.
316 	 */
317 	bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
318 	if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
319 		bp->bio_length =
320 		    zone_args->zone_params.report.entries_allocated *
321 		    sizeof(struct disk_zone_rep_entry);
322 	else
323 		bp->bio_length = 0;
324 
325 	g_io_request(bp, cp);
326 	error = biowait(bp, "gzone");
327 	bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
328 	g_destroy_bio(bp);
329 	return (error);
330 }
331 
332 /*
333  * Send a BIO_SPEEDUP down the stack. This is used to tell the lower layers that
334  * the upper layers have detected a resource shortage. The lower layers are
335  * advised to stop delaying I/O that they might be holding for performance
336  * reasons and to schedule it (non-trims) or complete it successfully (trims) as
337  * quickly as it can. bio_length is the amount of the shortage.  This call
338  * should be non-blocking. bio_resid is used to communicate back if the lower
339  * layers couldn't find bio_length worth of I/O to schedule or discard. A length
340  * of 0 means to do as much as you can (schedule the h/w queues full, discard
341  * all trims). flags are a hint from the upper layers to the lower layers what
342  * operation should be done.
343  */
344 int
345 g_io_speedup(off_t shortage, u_int flags, size_t *resid, struct g_consumer *cp)
346 {
347 	struct bio *bp;
348 	int error;
349 
350 	KASSERT((flags & (BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE)) != 0,
351 	    ("Invalid flags passed to g_io_speedup: %#x", flags));
352 	g_trace(G_T_BIO, "bio_speedup(%s, %jd, %#x)", cp->provider->name,
353 	    (intmax_t)shortage, flags);
354 	bp = g_new_bio();
355 	if (bp == NULL)
356 		return (ENOMEM);
357 	bp->bio_cmd = BIO_SPEEDUP;
358 	bp->bio_length = shortage;
359 	bp->bio_done = NULL;
360 	bp->bio_flags |= flags;
361 	g_io_request(bp, cp);
362 	error = biowait(bp, "gflush");
363 	*resid = bp->bio_resid;
364 	g_destroy_bio(bp);
365 	return (error);
366 }
367 
368 int
369 g_io_flush(struct g_consumer *cp)
370 {
371 	struct bio *bp;
372 	int error;
373 
374 	g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
375 	bp = g_alloc_bio();
376 	bp->bio_cmd = BIO_FLUSH;
377 	bp->bio_flags |= BIO_ORDERED;
378 	bp->bio_done = NULL;
379 	bp->bio_attribute = NULL;
380 	bp->bio_offset = cp->provider->mediasize;
381 	bp->bio_length = 0;
382 	bp->bio_data = NULL;
383 	g_io_request(bp, cp);
384 	error = biowait(bp, "gflush");
385 	g_destroy_bio(bp);
386 	return (error);
387 }
388 
389 static int
390 g_io_check(struct bio *bp)
391 {
392 	struct g_consumer *cp;
393 	struct g_provider *pp;
394 	off_t excess;
395 	int error;
396 
397 	biotrack(bp, __func__);
398 
399 	cp = bp->bio_from;
400 	pp = bp->bio_to;
401 
402 	/* Fail if access counters dont allow the operation */
403 	switch(bp->bio_cmd) {
404 	case BIO_READ:
405 	case BIO_GETATTR:
406 		if (cp->acr == 0)
407 			return (EPERM);
408 		break;
409 	case BIO_WRITE:
410 	case BIO_DELETE:
411 	case BIO_SPEEDUP:
412 	case BIO_FLUSH:
413 		if (cp->acw == 0)
414 			return (EPERM);
415 		break;
416 	case BIO_ZONE:
417 		if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
418 		    (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
419 			if (cp->acr == 0)
420 				return (EPERM);
421 		} else if (cp->acw == 0)
422 			return (EPERM);
423 		break;
424 	default:
425 		return (EPERM);
426 	}
427 	/* if provider is marked for error, don't disturb. */
428 	if (pp->error)
429 		return (pp->error);
430 	if (cp->flags & G_CF_ORPHAN)
431 		return (ENXIO);
432 
433 	switch(bp->bio_cmd) {
434 	case BIO_READ:
435 	case BIO_WRITE:
436 	case BIO_DELETE:
437 		/* Zero sectorsize or mediasize is probably a lack of media. */
438 		if (pp->sectorsize == 0 || pp->mediasize == 0)
439 			return (ENXIO);
440 		/* Reject I/O not on sector boundary */
441 		if (bp->bio_offset % pp->sectorsize)
442 			return (EINVAL);
443 		/* Reject I/O not integral sector long */
444 		if (bp->bio_length % pp->sectorsize)
445 			return (EINVAL);
446 		/* Reject requests before or past the end of media. */
447 		if (bp->bio_offset < 0)
448 			return (EIO);
449 		if (bp->bio_offset > pp->mediasize)
450 			return (EIO);
451 
452 		/* Truncate requests to the end of providers media. */
453 		excess = bp->bio_offset + bp->bio_length;
454 		if (excess > bp->bio_to->mediasize) {
455 			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
456 			    round_page(bp->bio_ma_offset +
457 			    bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
458 			    ("excess bio %p too short", bp));
459 			excess -= bp->bio_to->mediasize;
460 			bp->bio_length -= excess;
461 			if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
462 				bp->bio_ma_n = round_page(bp->bio_ma_offset +
463 				    bp->bio_length) / PAGE_SIZE;
464 			}
465 			if (excess > 0)
466 				CTR3(KTR_GEOM, "g_down truncated bio "
467 				    "%p provider %s by %d", bp,
468 				    bp->bio_to->name, excess);
469 		}
470 
471 		/* Deliver zero length transfers right here. */
472 		if (bp->bio_length == 0) {
473 			CTR2(KTR_GEOM, "g_down terminated 0-length "
474 			    "bp %p provider %s", bp, bp->bio_to->name);
475 			return (0);
476 		}
477 
478 		if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
479 		    (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
480 		    (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
481 			if ((error = g_io_transient_map_bio(bp)) >= 0)
482 				return (error);
483 		}
484 		break;
485 	default:
486 		break;
487 	}
488 	return (EJUSTRETURN);
489 }
490 
491 void
492 g_io_request(struct bio *bp, struct g_consumer *cp)
493 {
494 	struct g_provider *pp;
495 	int direct, error, first;
496 	uint8_t cmd;
497 
498 	biotrack(bp, __func__);
499 
500 	KASSERT(cp != NULL, ("NULL cp in g_io_request"));
501 	KASSERT(bp != NULL, ("NULL bp in g_io_request"));
502 	pp = cp->provider;
503 	KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
504 #ifdef DIAGNOSTIC
505 	KASSERT(bp->bio_driver1 == NULL,
506 	    ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
507 	KASSERT(bp->bio_driver2 == NULL,
508 	    ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
509 	KASSERT(bp->bio_pflags == 0,
510 	    ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
511 	/*
512 	 * Remember consumer's private fields, so we can detect if they were
513 	 * modified by the provider.
514 	 */
515 	bp->_bio_caller1 = bp->bio_caller1;
516 	bp->_bio_caller2 = bp->bio_caller2;
517 	bp->_bio_cflags = bp->bio_cflags;
518 #endif
519 
520 	cmd = bp->bio_cmd;
521 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
522 		KASSERT(bp->bio_data != NULL,
523 		    ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
524 	}
525 	if (cmd == BIO_DELETE || cmd == BIO_FLUSH || cmd == BIO_SPEEDUP) {
526 		KASSERT(bp->bio_data == NULL,
527 		    ("non-NULL bp->data in g_io_request(cmd=%hu)",
528 		    bp->bio_cmd));
529 	}
530 	if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
531 		KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
532 		    ("wrong offset %jd for sectorsize %u",
533 		    bp->bio_offset, cp->provider->sectorsize));
534 		KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
535 		    ("wrong length %jd for sectorsize %u",
536 		    bp->bio_length, cp->provider->sectorsize));
537 	}
538 
539 	g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
540 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
541 
542 	bp->bio_from = cp;
543 	bp->bio_to = pp;
544 	bp->bio_error = 0;
545 	bp->bio_completed = 0;
546 
547 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
548 	    ("Bio already on queue bp=%p", bp));
549 
550 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
551 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
552 		binuptime(&bp->bio_t0);
553 	else
554 		getbinuptime(&bp->bio_t0);
555 	if (g_collectstats & G_STATS_CONSUMERS)
556 		devstat_start_transaction_bio_t0(cp->stat, bp);
557 	if (g_collectstats & G_STATS_PROVIDERS)
558 		devstat_start_transaction_bio_t0(pp->stat, bp);
559 #ifdef INVARIANTS
560 	atomic_add_int(&cp->nstart, 1);
561 #endif
562 
563 	direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
564 	    (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
565 	    curthread != g_down_td &&
566 	    ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
567 	    (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
568 	    pace == 0;
569 	if (direct) {
570 		/* Block direct execution if less then half of stack left. */
571 		size_t	st, su;
572 		GET_STACK_USAGE(st, su);
573 		if (su * 2 > st)
574 			direct = 0;
575 	}
576 
577 	if (direct) {
578 		error = g_io_check(bp);
579 		if (error >= 0) {
580 			CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
581 			    "provider %s returned %d", bp, bp->bio_to->name,
582 			    error);
583 			g_io_deliver(bp, error);
584 			return;
585 		}
586 		bp->bio_to->geom->start(bp);
587 	} else {
588 		g_bioq_lock(&g_bio_run_down);
589 		first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
590 		TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
591 		bp->bio_flags |= BIO_ONQUEUE;
592 		g_bio_run_down.bio_queue_length++;
593 		g_bioq_unlock(&g_bio_run_down);
594 		/* Pass it on down. */
595 		if (first)
596 			wakeup(&g_wait_down);
597 	}
598 }
599 
600 void
601 g_io_deliver(struct bio *bp, int error)
602 {
603 	struct bintime now;
604 	struct g_consumer *cp;
605 	struct g_provider *pp;
606 	struct mtx *mtxp;
607 	int direct, first;
608 
609 	biotrack(bp, __func__);
610 
611 	KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
612 	pp = bp->bio_to;
613 	KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
614 	cp = bp->bio_from;
615 	if (cp == NULL) {
616 		bp->bio_error = error;
617 		bp->bio_done(bp);
618 		return;
619 	}
620 	KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
621 	KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
622 #ifdef DIAGNOSTIC
623 	/*
624 	 * Some classes - GJournal in particular - can modify bio's
625 	 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
626 	 * flag means it's an expected behaviour for that particular geom.
627 	 */
628 	if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
629 		KASSERT(bp->bio_caller1 == bp->_bio_caller1,
630 		    ("bio_caller1 used by the provider %s", pp->name));
631 		KASSERT(bp->bio_caller2 == bp->_bio_caller2,
632 		    ("bio_caller2 used by the provider %s", pp->name));
633 		KASSERT(bp->bio_cflags == bp->_bio_cflags,
634 		    ("bio_cflags used by the provider %s", pp->name));
635 	}
636 #endif
637 	KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
638 	KASSERT(bp->bio_completed <= bp->bio_length,
639 	    ("bio_completed can't be greater than bio_length"));
640 
641 	g_trace(G_T_BIO,
642 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
643 	    bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
644 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
645 
646 	KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
647 	    ("Bio already on queue bp=%p", bp));
648 
649 	/*
650 	 * XXX: next two doesn't belong here
651 	 */
652 	bp->bio_bcount = bp->bio_length;
653 	bp->bio_resid = bp->bio_bcount - bp->bio_completed;
654 
655 	direct = (pp->flags & G_PF_DIRECT_SEND) &&
656 		 (cp->flags & G_CF_DIRECT_RECEIVE) &&
657 		 curthread != g_up_td;
658 	if (direct) {
659 		/* Block direct execution if less then half of stack left. */
660 		size_t	st, su;
661 		GET_STACK_USAGE(st, su);
662 		if (su * 2 > st)
663 			direct = 0;
664 	}
665 
666 	/*
667 	 * The statistics collection is lockless, as such, but we
668 	 * can not update one instance of the statistics from more
669 	 * than one thread at a time, so grab the lock first.
670 	 */
671 	if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
672 	    ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
673 		binuptime(&now);
674 	mtxp = mtx_pool_find(mtxpool_sleep, pp);
675 	mtx_lock(mtxp);
676 	if (g_collectstats & G_STATS_PROVIDERS)
677 		devstat_end_transaction_bio_bt(pp->stat, bp, &now);
678 	if (g_collectstats & G_STATS_CONSUMERS)
679 		devstat_end_transaction_bio_bt(cp->stat, bp, &now);
680 #ifdef INVARIANTS
681 	cp->nend++;
682 #endif
683 	mtx_unlock(mtxp);
684 
685 	if (error != ENOMEM) {
686 		bp->bio_error = error;
687 		if (direct) {
688 			biodone(bp);
689 		} else {
690 			g_bioq_lock(&g_bio_run_up);
691 			first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
692 			TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
693 			bp->bio_flags |= BIO_ONQUEUE;
694 			g_bio_run_up.bio_queue_length++;
695 			g_bioq_unlock(&g_bio_run_up);
696 			if (first)
697 				wakeup(&g_wait_up);
698 		}
699 		return;
700 	}
701 
702 	if (bootverbose)
703 		printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
704 	bp->bio_children = 0;
705 	bp->bio_inbed = 0;
706 	bp->bio_driver1 = NULL;
707 	bp->bio_driver2 = NULL;
708 	bp->bio_pflags = 0;
709 	g_io_request(bp, cp);
710 	pace = 1;
711 	return;
712 }
713 
714 SYSCTL_DECL(_kern_geom);
715 
716 static long transient_maps;
717 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
718     &transient_maps, 0,
719     "Total count of the transient mapping requests");
720 u_int transient_map_retries = 10;
721 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
722     &transient_map_retries, 0,
723     "Max count of retries used before giving up on creating transient map");
724 int transient_map_hard_failures;
725 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
726     &transient_map_hard_failures, 0,
727     "Failures to establish the transient mapping due to retry attempts "
728     "exhausted");
729 int transient_map_soft_failures;
730 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
731     &transient_map_soft_failures, 0,
732     "Count of retried failures to establish the transient mapping");
733 int inflight_transient_maps;
734 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
735     &inflight_transient_maps, 0,
736     "Current count of the active transient maps");
737 
738 static int
739 g_io_transient_map_bio(struct bio *bp)
740 {
741 	vm_offset_t addr;
742 	long size;
743 	u_int retried;
744 
745 	KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
746 
747 	size = round_page(bp->bio_ma_offset + bp->bio_length);
748 	KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
749 	addr = 0;
750 	retried = 0;
751 	atomic_add_long(&transient_maps, 1);
752 retry:
753 	if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
754 		if (transient_map_retries != 0 &&
755 		    retried >= transient_map_retries) {
756 			CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
757 			    bp, bp->bio_to->name);
758 			atomic_add_int(&transient_map_hard_failures, 1);
759 			return (EDEADLK/* XXXKIB */);
760 		} else {
761 			/*
762 			 * Naive attempt to quisce the I/O to get more
763 			 * in-flight requests completed and defragment
764 			 * the transient_arena.
765 			 */
766 			CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
767 			    bp, bp->bio_to->name, retried);
768 			pause("g_d_tra", hz / 10);
769 			retried++;
770 			atomic_add_int(&transient_map_soft_failures, 1);
771 			goto retry;
772 		}
773 	}
774 	atomic_add_int(&inflight_transient_maps, 1);
775 	pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
776 	bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
777 	bp->bio_flags |= BIO_TRANSIENT_MAPPING;
778 	bp->bio_flags &= ~BIO_UNMAPPED;
779 	return (EJUSTRETURN);
780 }
781 
782 void
783 g_io_schedule_down(struct thread *tp __unused)
784 {
785 	struct bio *bp;
786 	int error;
787 
788 	for(;;) {
789 		g_bioq_lock(&g_bio_run_down);
790 		bp = g_bioq_first(&g_bio_run_down);
791 		if (bp == NULL) {
792 			CTR0(KTR_GEOM, "g_down going to sleep");
793 			msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
794 			    PRIBIO | PDROP, "-", 0);
795 			continue;
796 		}
797 		CTR0(KTR_GEOM, "g_down has work to do");
798 		g_bioq_unlock(&g_bio_run_down);
799 		biotrack(bp, __func__);
800 		if (pace != 0) {
801 			/*
802 			 * There has been at least one memory allocation
803 			 * failure since the last I/O completed. Pause 1ms to
804 			 * give the system a chance to free up memory. We only
805 			 * do this once because a large number of allocations
806 			 * can fail in the direct dispatch case and there's no
807 			 * relationship between the number of these failures and
808 			 * the length of the outage. If there's still an outage,
809 			 * we'll pause again and again until it's
810 			 * resolved. Older versions paused longer and once per
811 			 * allocation failure. This was OK for a single threaded
812 			 * g_down, but with direct dispatch would lead to max of
813 			 * 10 IOPs for minutes at a time when transient memory
814 			 * issues prevented allocation for a batch of requests
815 			 * from the upper layers.
816 			 *
817 			 * XXX This pacing is really lame. It needs to be solved
818 			 * by other methods. This is OK only because the worst
819 			 * case scenario is so rare. In the worst case scenario
820 			 * all memory is tied up waiting for I/O to complete
821 			 * which can never happen since we can't allocate bios
822 			 * for that I/O.
823 			 */
824 			CTR0(KTR_GEOM, "g_down pacing self");
825 			pause("g_down", min(hz/1000, 1));
826 			pace = 0;
827 		}
828 		CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
829 		    bp->bio_to->name);
830 		error = g_io_check(bp);
831 		if (error >= 0) {
832 			CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
833 			    "%s returned %d", bp, bp->bio_to->name, error);
834 			g_io_deliver(bp, error);
835 			continue;
836 		}
837 		THREAD_NO_SLEEPING();
838 		CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
839 		    "len %ld", bp, bp->bio_to->name, bp->bio_offset,
840 		    bp->bio_length);
841 		bp->bio_to->geom->start(bp);
842 		THREAD_SLEEPING_OK();
843 	}
844 }
845 
846 void
847 g_io_schedule_up(struct thread *tp __unused)
848 {
849 	struct bio *bp;
850 
851 	for(;;) {
852 		g_bioq_lock(&g_bio_run_up);
853 		bp = g_bioq_first(&g_bio_run_up);
854 		if (bp == NULL) {
855 			CTR0(KTR_GEOM, "g_up going to sleep");
856 			msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
857 			    PRIBIO | PDROP, "-", 0);
858 			continue;
859 		}
860 		g_bioq_unlock(&g_bio_run_up);
861 		THREAD_NO_SLEEPING();
862 		CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
863 		    "%jd len %ld", bp, bp->bio_to->name,
864 		    bp->bio_offset, bp->bio_length);
865 		biodone(bp);
866 		THREAD_SLEEPING_OK();
867 	}
868 }
869 
870 void *
871 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
872 {
873 	struct bio *bp;
874 	void *ptr;
875 	int errorc;
876 
877 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
878 	    length <= maxphys, ("g_read_data(): invalid length %jd",
879 	    (intmax_t)length));
880 
881 	bp = g_alloc_bio();
882 	bp->bio_cmd = BIO_READ;
883 	bp->bio_done = NULL;
884 	bp->bio_offset = offset;
885 	bp->bio_length = length;
886 	ptr = g_malloc(length, M_WAITOK);
887 	bp->bio_data = ptr;
888 	g_io_request(bp, cp);
889 	errorc = biowait(bp, "gread");
890 	if (errorc == 0 && bp->bio_completed != length)
891 		errorc = EIO;
892 	if (error != NULL)
893 		*error = errorc;
894 	g_destroy_bio(bp);
895 	if (errorc) {
896 		g_free(ptr);
897 		ptr = NULL;
898 	}
899 	return (ptr);
900 }
901 
902 /*
903  * A read function for use by ffs_sbget when used by GEOM-layer routines.
904  */
905 int
906 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
907 {
908 	struct g_consumer *cp;
909 
910 	KASSERT(*bufp == NULL,
911 	    ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
912 
913 	cp = (struct g_consumer *)devfd;
914 	/*
915 	 * Take care not to issue an invalid I/O request. The offset of
916 	 * the superblock candidate must be multiples of the provider's
917 	 * sector size, otherwise an FFS can't exist on the provider
918 	 * anyway.
919 	 */
920 	if (loc % cp->provider->sectorsize != 0)
921 		return (ENOENT);
922 	*bufp = g_read_data(cp, loc, size, NULL);
923 	if (*bufp == NULL)
924 		return (ENOENT);
925 	return (0);
926 }
927 
928 int
929 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
930 {
931 	struct bio *bp;
932 	int error;
933 
934 	KASSERT(length > 0 && length >= cp->provider->sectorsize &&
935 	    length <= maxphys, ("g_write_data(): invalid length %jd",
936 	    (intmax_t)length));
937 
938 	bp = g_alloc_bio();
939 	bp->bio_cmd = BIO_WRITE;
940 	bp->bio_done = NULL;
941 	bp->bio_offset = offset;
942 	bp->bio_length = length;
943 	bp->bio_data = ptr;
944 	g_io_request(bp, cp);
945 	error = biowait(bp, "gwrite");
946 	if (error == 0 && bp->bio_completed != length)
947 		error = EIO;
948 	g_destroy_bio(bp);
949 	return (error);
950 }
951 
952 /*
953  * A write function for use by ffs_sbput when used by GEOM-layer routines.
954  */
955 int
956 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
957 {
958 
959 	return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
960 }
961 
962 int
963 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
964 {
965 	struct bio *bp;
966 	int error;
967 
968 	KASSERT(length > 0 && length >= cp->provider->sectorsize,
969 	    ("g_delete_data(): invalid length %jd", (intmax_t)length));
970 
971 	bp = g_alloc_bio();
972 	bp->bio_cmd = BIO_DELETE;
973 	bp->bio_done = NULL;
974 	bp->bio_offset = offset;
975 	bp->bio_length = length;
976 	bp->bio_data = NULL;
977 	g_io_request(bp, cp);
978 	error = biowait(bp, "gdelete");
979 	if (error == 0 && bp->bio_completed != length)
980 		error = EIO;
981 	g_destroy_bio(bp);
982 	return (error);
983 }
984 
985 void
986 g_print_bio(const char *prefix, const struct bio *bp, const char *fmtsuffix,
987     ...)
988 {
989 #ifndef PRINTF_BUFR_SIZE
990 #define PRINTF_BUFR_SIZE 64
991 #endif
992 	char bufr[PRINTF_BUFR_SIZE];
993 	struct sbuf sb, *sbp __unused;
994 	va_list ap;
995 
996 	sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN);
997 	KASSERT(sbp != NULL, ("sbuf_new misused?"));
998 
999 	sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1000 
1001 	sbuf_cat(&sb, prefix);
1002 	g_format_bio(&sb, bp);
1003 
1004 	va_start(ap, fmtsuffix);
1005 	sbuf_vprintf(&sb, fmtsuffix, ap);
1006 	va_end(ap);
1007 
1008 	sbuf_nl_terminate(&sb);
1009 
1010 	sbuf_finish(&sb);
1011 	sbuf_delete(&sb);
1012 }
1013 
1014 void
1015 g_format_bio(struct sbuf *sb, const struct bio *bp)
1016 {
1017 	const char *pname, *cmd = NULL;
1018 
1019 	if (bp->bio_to != NULL)
1020 		pname = bp->bio_to->name;
1021 	else if (bp->bio_parent != NULL && bp->bio_parent->bio_to != NULL)
1022 		pname = bp->bio_parent->bio_to->name;
1023 	else
1024 		pname = "[unknown]";
1025 
1026 	switch (bp->bio_cmd) {
1027 	case BIO_GETATTR:
1028 		cmd = "GETATTR";
1029 		sbuf_printf(sb, "%s[%s(attr=%s)]", pname, cmd,
1030 		    bp->bio_attribute);
1031 		return;
1032 	case BIO_FLUSH:
1033 		cmd = "FLUSH";
1034 		sbuf_printf(sb, "%s[%s]", pname, cmd);
1035 		return;
1036 	case BIO_ZONE: {
1037 		char *subcmd = NULL;
1038 		cmd = "ZONE";
1039 		switch (bp->bio_zone.zone_cmd) {
1040 		case DISK_ZONE_OPEN:
1041 			subcmd = "OPEN";
1042 			break;
1043 		case DISK_ZONE_CLOSE:
1044 			subcmd = "CLOSE";
1045 			break;
1046 		case DISK_ZONE_FINISH:
1047 			subcmd = "FINISH";
1048 			break;
1049 		case DISK_ZONE_RWP:
1050 			subcmd = "RWP";
1051 			break;
1052 		case DISK_ZONE_REPORT_ZONES:
1053 			subcmd = "REPORT ZONES";
1054 			break;
1055 		case DISK_ZONE_GET_PARAMS:
1056 			subcmd = "GET PARAMS";
1057 			break;
1058 		default:
1059 			subcmd = "UNKNOWN";
1060 			break;
1061 		}
1062 		sbuf_printf(sb, "%s[%s,%s]", pname, cmd, subcmd);
1063 		return;
1064 	}
1065 	case BIO_READ:
1066 		cmd = "READ";
1067 		break;
1068 	case BIO_WRITE:
1069 		cmd = "WRITE";
1070 		break;
1071 	case BIO_DELETE:
1072 		cmd = "DELETE";
1073 		break;
1074 	default:
1075 		cmd = "UNKNOWN";
1076 		sbuf_printf(sb, "%s[%s()]", pname, cmd);
1077 		return;
1078 	}
1079 	sbuf_printf(sb, "%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1080 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1081 }
1082