FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * Copyright (c) 2013 The FreeBSD Foundation
7 * All rights reserved.
8 *
9 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
10 * and NAI Labs, the Security Research Division of Network Associates, Inc.
11 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
12 * DARPA CHATS research program.
13 *
14 * Portions of this software were developed by Konstantin Belousov
15 * under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The names of the authors may not be used to endorse or promote
26 * products derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 */
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/bio.h>
50 #include <sys/ktr.h>
51 #include <sys/proc.h>
52 #include <sys/stack.h>
53 #include <sys/sysctl.h>
54 #include <sys/vmem.h>
55
56 #include <sys/errno.h>
57 #include <geom/geom.h>
58 #include <geom/geom_int.h>
59 #include <sys/devicestat.h>
60
61 #include <vm/uma.h>
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_extern.h>
68 #include <vm/vm_map.h>
69
70 static int g_io_transient_map_bio(struct bio *bp);
71
72 static struct g_bioq g_bio_run_down;
73 static struct g_bioq g_bio_run_up;
74
75 /*
76 * Pace is a hint that we've had some trouble recently allocating
77 * bios, so we should back off trying to send I/O down the stack
78 * a bit to let the problem resolve. When pacing, we also turn
79 * off direct dispatch to also reduce memory pressure from I/Os
80 * there, at the expxense of some added latency while the memory
81 * pressures exist. See g_io_schedule_down() for more details
82 * and limitations.
83 */
84 static volatile u_int __read_mostly pace;
85
86 static uma_zone_t __read_mostly biozone;
87
88 /*
89 * The head of the list of classifiers used in g_io_request.
90 * Use g_register_classifier() and g_unregister_classifier()
91 * to add/remove entries to the list.
92 * Classifiers are invoked in registration order.
93 */
94 static TAILQ_HEAD(, g_classifier_hook) g_classifier_tailq __read_mostly =
95 TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
96
97 #include <machine/atomic.h>
98
99 static void
100 g_bioq_lock(struct g_bioq *bq)
101 {
102
103 mtx_lock(&bq->bio_queue_lock);
104 }
105
106 static void
107 g_bioq_unlock(struct g_bioq *bq)
108 {
109
110 mtx_unlock(&bq->bio_queue_lock);
111 }
112
113 #if 0
114 static void
115 g_bioq_destroy(struct g_bioq *bq)
116 {
117
118 mtx_destroy(&bq->bio_queue_lock);
119 }
120 #endif
121
122 static void
123 g_bioq_init(struct g_bioq *bq)
124 {
125
126 TAILQ_INIT(&bq->bio_queue);
127 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
128 }
129
130 static struct bio *
131 g_bioq_first(struct g_bioq *bq)
132 {
133 struct bio *bp;
134
135 bp = TAILQ_FIRST(&bq->bio_queue);
136 if (bp != NULL) {
137 KASSERT((bp->bio_flags & BIO_ONQUEUE),
138 ("Bio not on queue bp=%p target %p", bp, bq));
139 bp->bio_flags &= ~BIO_ONQUEUE;
140 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
141 bq->bio_queue_length--;
142 }
143 return (bp);
144 }
145
146 struct bio *
147 g_new_bio(void)
148 {
149 struct bio *bp;
150
151 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
152 #ifdef KTR
153 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
154 struct stack st;
155
156 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
157 stack_save(&st);
158 CTRSTACK(KTR_GEOM, &st, 3, 0);
159 }
160 #endif
161 return (bp);
162 }
163
164 struct bio *
165 g_alloc_bio(void)
166 {
167 struct bio *bp;
168
169 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
170 #ifdef KTR
171 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
172 struct stack st;
173
174 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
175 stack_save(&st);
176 CTRSTACK(KTR_GEOM, &st, 3, 0);
177 }
178 #endif
179 return (bp);
180 }
181
182 void
183 g_destroy_bio(struct bio *bp)
184 {
185 #ifdef KTR
186 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
187 struct stack st;
188
189 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
190 stack_save(&st);
191 CTRSTACK(KTR_GEOM, &st, 3, 0);
192 }
193 #endif
194 uma_zfree(biozone, bp);
195 }
196
197 struct bio *
198 g_clone_bio(struct bio *bp)
199 {
200 struct bio *bp2;
201
202 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
203 if (bp2 != NULL) {
204 bp2->bio_parent = bp;
205 bp2->bio_cmd = bp->bio_cmd;
206 /*
207 * BIO_ORDERED flag may be used by disk drivers to enforce
208 * ordering restrictions, so this flag needs to be cloned.
209 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
210 * indicate which way the buffer is passed.
211 * Other bio flags are not suitable for cloning.
212 */
213 bp2->bio_flags = bp->bio_flags &
214 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
215 bp2->bio_length = bp->bio_length;
216 bp2->bio_offset = bp->bio_offset;
217 bp2->bio_data = bp->bio_data;
218 bp2->bio_ma = bp->bio_ma;
219 bp2->bio_ma_n = bp->bio_ma_n;
220 bp2->bio_ma_offset = bp->bio_ma_offset;
221 bp2->bio_attribute = bp->bio_attribute;
222 if (bp->bio_cmd == BIO_ZONE)
223 bcopy(&bp->bio_zone, &bp2->bio_zone,
224 sizeof(bp->bio_zone));
225 /* Inherit classification info from the parent */
226 bp2->bio_classifier1 = bp->bio_classifier1;
227 bp2->bio_classifier2 = bp->bio_classifier2;
228 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
229 bp2->bio_track_bp = bp->bio_track_bp;
230 #endif
231 bp->bio_children++;
232 }
233 #ifdef KTR
234 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
235 struct stack st;
236
237 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
238 stack_save(&st);
239 CTRSTACK(KTR_GEOM, &st, 3, 0);
240 }
241 #endif
242 return(bp2);
243 }
244
245 struct bio *
246 g_duplicate_bio(struct bio *bp)
247 {
248 struct bio *bp2;
249
250 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
251 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
252 bp2->bio_parent = bp;
253 bp2->bio_cmd = bp->bio_cmd;
254 bp2->bio_length = bp->bio_length;
255 bp2->bio_offset = bp->bio_offset;
256 bp2->bio_data = bp->bio_data;
257 bp2->bio_ma = bp->bio_ma;
258 bp2->bio_ma_n = bp->bio_ma_n;
259 bp2->bio_ma_offset = bp->bio_ma_offset;
260 bp2->bio_attribute = bp->bio_attribute;
261 bp->bio_children++;
262 #ifdef KTR
263 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
264 struct stack st;
265
266 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
267 stack_save(&st);
268 CTRSTACK(KTR_GEOM, &st, 3, 0);
269 }
270 #endif
271 return(bp2);
272 }
273
274 void
275 g_reset_bio(struct bio *bp)
276 {
277
278 bzero(bp, sizeof(*bp));
279 }
280
281 void
282 g_io_init(void)
283 {
284
285 g_bioq_init(&g_bio_run_down);
286 g_bioq_init(&g_bio_run_up);
287 biozone = uma_zcreate("g_bio", sizeof (struct bio),
288 NULL, NULL,
289 NULL, NULL,
290 0, 0);
291 }
292
293 int
294 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
295 {
296 struct bio *bp;
297 int error;
298
299 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
300 bp = g_alloc_bio();
301 bp->bio_cmd = BIO_GETATTR;
302 bp->bio_done = NULL;
303 bp->bio_attribute = attr;
304 bp->bio_length = *len;
305 bp->bio_data = ptr;
306 g_io_request(bp, cp);
307 error = biowait(bp, "ggetattr");
308 *len = bp->bio_completed;
309 g_destroy_bio(bp);
310 return (error);
311 }
312
313 int
314 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
315 {
316 struct bio *bp;
317 int error;
318
319 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
320 bp = g_alloc_bio();
321 bp->bio_cmd = BIO_ZONE;
322 bp->bio_done = NULL;
323 /*
324 * XXX KDM need to handle report zone data.
325 */
326 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
327 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
328 bp->bio_length =
329 zone_args->zone_params.report.entries_allocated *
330 sizeof(struct disk_zone_rep_entry);
331 else
332 bp->bio_length = 0;
333
334 g_io_request(bp, cp);
335 error = biowait(bp, "gzone");
336 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
337 g_destroy_bio(bp);
338 return (error);
339 }
340
341 int
342 g_io_flush(struct g_consumer *cp)
343 {
344 struct bio *bp;
345 int error;
346
347 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
348 bp = g_alloc_bio();
349 bp->bio_cmd = BIO_FLUSH;
350 bp->bio_flags |= BIO_ORDERED;
351 bp->bio_done = NULL;
352 bp->bio_attribute = NULL;
353 bp->bio_offset = cp->provider->mediasize;
354 bp->bio_length = 0;
355 bp->bio_data = NULL;
356 g_io_request(bp, cp);
357 error = biowait(bp, "gflush");
358 g_destroy_bio(bp);
359 return (error);
360 }
361
362 static int
363 g_io_check(struct bio *bp)
364 {
365 struct g_consumer *cp;
366 struct g_provider *pp;
367 off_t excess;
368 int error;
369
370 biotrack(bp, __func__);
371
372 cp = bp->bio_from;
373 pp = bp->bio_to;
374
375 /* Fail if access counters dont allow the operation */
376 switch(bp->bio_cmd) {
377 case BIO_READ:
378 case BIO_GETATTR:
379 if (cp->acr == 0)
380 return (EPERM);
381 break;
382 case BIO_WRITE:
383 case BIO_DELETE:
384 case BIO_FLUSH:
385 if (cp->acw == 0)
386 return (EPERM);
387 break;
388 case BIO_ZONE:
389 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
390 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
391 if (cp->acr == 0)
392 return (EPERM);
393 } else if (cp->acw == 0)
394 return (EPERM);
395 break;
396 default:
397 return (EPERM);
398 }
399 /* if provider is marked for error, don't disturb. */
400 if (pp->error)
401 return (pp->error);
402 if (cp->flags & G_CF_ORPHAN)
403 return (ENXIO);
404
405 switch(bp->bio_cmd) {
406 case BIO_READ:
407 case BIO_WRITE:
408 case BIO_DELETE:
409 /* Zero sectorsize or mediasize is probably a lack of media. */
410 if (pp->sectorsize == 0 || pp->mediasize == 0)
411 return (ENXIO);
412 /* Reject I/O not on sector boundary */
413 if (bp->bio_offset % pp->sectorsize)
414 return (EINVAL);
415 /* Reject I/O not integral sector long */
416 if (bp->bio_length % pp->sectorsize)
417 return (EINVAL);
418 /* Reject requests before or past the end of media. */
419 if (bp->bio_offset < 0)
420 return (EIO);
421 if (bp->bio_offset > pp->mediasize)
422 return (EIO);
423
424 /* Truncate requests to the end of providers media. */
425 excess = bp->bio_offset + bp->bio_length;
426 if (excess > bp->bio_to->mediasize) {
427 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
428 round_page(bp->bio_ma_offset +
429 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
430 ("excess bio %p too short", bp));
431 excess -= bp->bio_to->mediasize;
432 bp->bio_length -= excess;
433 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
434 bp->bio_ma_n = round_page(bp->bio_ma_offset +
435 bp->bio_length) / PAGE_SIZE;
436 }
437 if (excess > 0)
438 CTR3(KTR_GEOM, "g_down truncated bio "
439 "%p provider %s by %d", bp,
440 bp->bio_to->name, excess);
441 }
442
443 /* Deliver zero length transfers right here. */
444 if (bp->bio_length == 0) {
445 CTR2(KTR_GEOM, "g_down terminated 0-length "
446 "bp %p provider %s", bp, bp->bio_to->name);
447 return (0);
448 }
449
450 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
451 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
452 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
453 if ((error = g_io_transient_map_bio(bp)) >= 0)
454 return (error);
455 }
456 break;
457 default:
458 break;
459 }
460 return (EJUSTRETURN);
461 }
462
463 /*
464 * bio classification support.
465 *
466 * g_register_classifier() and g_unregister_classifier()
467 * are used to add/remove a classifier from the list.
468 * The list is protected using the g_bio_run_down lock,
469 * because the classifiers are called in this path.
470 *
471 * g_io_request() passes bio's that are not already classified
472 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
473 * Classifiers can store their result in the two fields
474 * bio_classifier1 and bio_classifier2.
475 * A classifier that updates one of the fields should
476 * return a non-zero value.
477 * If no classifier updates the field, g_run_classifiers() sets
478 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
479 */
480
481 int
482 g_register_classifier(struct g_classifier_hook *hook)
483 {
484
485 g_bioq_lock(&g_bio_run_down);
486 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
487 g_bioq_unlock(&g_bio_run_down);
488
489 return (0);
490 }
491
492 void
493 g_unregister_classifier(struct g_classifier_hook *hook)
494 {
495 struct g_classifier_hook *entry;
496
497 g_bioq_lock(&g_bio_run_down);
498 TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
499 if (entry == hook) {
500 TAILQ_REMOVE(&g_classifier_tailq, hook, link);
501 break;
502 }
503 }
504 g_bioq_unlock(&g_bio_run_down);
505 }
506
507 static void
508 g_run_classifiers(struct bio *bp)
509 {
510 struct g_classifier_hook *hook;
511 int classified = 0;
512
513 biotrack(bp, __func__);
514
515 TAILQ_FOREACH(hook, &g_classifier_tailq, link)
516 classified |= hook->func(hook->arg, bp);
517
518 if (!classified)
519 bp->bio_classifier1 = BIO_NOTCLASSIFIED;
520 }
521
522 void
523 g_io_request(struct bio *bp, struct g_consumer *cp)
524 {
525 struct g_provider *pp;
526 struct mtx *mtxp;
527 int direct, error, first;
528 uint8_t cmd;
529
530 biotrack(bp, __func__);
531
532 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
533 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
534 pp = cp->provider;
535 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
536 #ifdef DIAGNOSTIC
537 KASSERT(bp->bio_driver1 == NULL,
538 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
539 KASSERT(bp->bio_driver2 == NULL,
540 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
541 KASSERT(bp->bio_pflags == 0,
542 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
543 /*
544 * Remember consumer's private fields, so we can detect if they were
545 * modified by the provider.
546 */
547 bp->_bio_caller1 = bp->bio_caller1;
548 bp->_bio_caller2 = bp->bio_caller2;
549 bp->_bio_cflags = bp->bio_cflags;
550 #endif
551
552 cmd = bp->bio_cmd;
553 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
554 KASSERT(bp->bio_data != NULL,
555 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
556 }
557 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
558 KASSERT(bp->bio_data == NULL,
559 ("non-NULL bp->data in g_io_request(cmd=%hu)",
560 bp->bio_cmd));
561 }
562 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
563 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
564 ("wrong offset %jd for sectorsize %u",
565 bp->bio_offset, cp->provider->sectorsize));
566 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
567 ("wrong length %jd for sectorsize %u",
568 bp->bio_length, cp->provider->sectorsize));
569 }
570
571 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
572 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
573
574 bp->bio_from = cp;
575 bp->bio_to = pp;
576 bp->bio_error = 0;
577 bp->bio_completed = 0;
578
579 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
580 ("Bio already on queue bp=%p", bp));
581 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
582 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
583 binuptime(&bp->bio_t0);
584 else
585 getbinuptime(&bp->bio_t0);
586
587 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
588 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
589 !g_is_geom_thread(curthread) &&
590 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
591 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
592 pace == 0;
593 if (direct) {
594 /* Block direct execution if less then half of stack left. */
595 size_t st, su;
596 GET_STACK_USAGE(st, su);
597 if (su * 2 > st)
598 direct = 0;
599 }
600
601 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
602 g_bioq_lock(&g_bio_run_down);
603 g_run_classifiers(bp);
604 g_bioq_unlock(&g_bio_run_down);
605 }
606
607 /*
608 * The statistics collection is lockless, as such, but we
609 * can not update one instance of the statistics from more
610 * than one thread at a time, so grab the lock first.
611 */
612 mtxp = mtx_pool_find(mtxpool_sleep, pp);
613 mtx_lock(mtxp);
614 if (g_collectstats & G_STATS_PROVIDERS)
615 devstat_start_transaction_bio_t0(pp->stat, bp);
616 if (g_collectstats & G_STATS_CONSUMERS)
617 devstat_start_transaction_bio_t0(cp->stat, bp);
618 pp->nstart++;
619 cp->nstart++;
620 mtx_unlock(mtxp);
621
622 if (direct) {
623 error = g_io_check(bp);
624 if (error >= 0) {
625 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
626 "provider %s returned %d", bp, bp->bio_to->name,
627 error);
628 g_io_deliver(bp, error);
629 return;
630 }
631 bp->bio_to->geom->start(bp);
632 } else {
633 g_bioq_lock(&g_bio_run_down);
634 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
635 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
636 bp->bio_flags |= BIO_ONQUEUE;
637 g_bio_run_down.bio_queue_length++;
638 g_bioq_unlock(&g_bio_run_down);
639 /* Pass it on down. */
640 if (first)
641 wakeup(&g_wait_down);
642 }
643 }
644
645 void
646 g_io_deliver(struct bio *bp, int error)
647 {
648 struct bintime now;
649 struct g_consumer *cp;
650 struct g_provider *pp;
651 struct mtx *mtxp;
652 int direct, first;
653
654 biotrack(bp, __func__);
655
656 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
657 pp = bp->bio_to;
658 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
659 cp = bp->bio_from;
660 if (cp == NULL) {
661 bp->bio_error = error;
662 bp->bio_done(bp);
663 return;
664 }
665 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
666 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
667 #ifdef DIAGNOSTIC
668 /*
669 * Some classes - GJournal in particular - can modify bio's
670 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
671 * flag means it's an expected behaviour for that particular geom.
672 */
673 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
674 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
675 ("bio_caller1 used by the provider %s", pp->name));
676 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
677 ("bio_caller2 used by the provider %s", pp->name));
678 KASSERT(bp->bio_cflags == bp->_bio_cflags,
679 ("bio_cflags used by the provider %s", pp->name));
680 }
681 #endif
682 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
683 KASSERT(bp->bio_completed <= bp->bio_length,
684 ("bio_completed can't be greater than bio_length"));
685
686 g_trace(G_T_BIO,
687 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
688 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
689 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
690
691 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
692 ("Bio already on queue bp=%p", bp));
693
694 /*
695 * XXX: next two doesn't belong here
696 */
697 bp->bio_bcount = bp->bio_length;
698 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
699
700 direct = (pp->flags & G_PF_DIRECT_SEND) &&
701 (cp->flags & G_CF_DIRECT_RECEIVE) &&
702 !g_is_geom_thread(curthread);
703 if (direct) {
704 /* Block direct execution if less then half of stack left. */
705 size_t st, su;
706 GET_STACK_USAGE(st, su);
707 if (su * 2 > st)
708 direct = 0;
709 }
710
711 /*
712 * The statistics collection is lockless, as such, but we
713 * can not update one instance of the statistics from more
714 * than one thread at a time, so grab the lock first.
715 */
716 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
717 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
718 binuptime(&now);
719 mtxp = mtx_pool_find(mtxpool_sleep, cp);
720 mtx_lock(mtxp);
721 if (g_collectstats & G_STATS_PROVIDERS)
722 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
723 if (g_collectstats & G_STATS_CONSUMERS)
724 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
725 cp->nend++;
726 pp->nend++;
727 mtx_unlock(mtxp);
728
729 if (error != ENOMEM) {
730 bp->bio_error = error;
731 if (direct) {
732 biodone(bp);
733 } else {
734 g_bioq_lock(&g_bio_run_up);
735 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
736 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
737 bp->bio_flags |= BIO_ONQUEUE;
738 g_bio_run_up.bio_queue_length++;
739 g_bioq_unlock(&g_bio_run_up);
740 if (first)
741 wakeup(&g_wait_up);
742 }
743 return;
744 }
745
746 if (bootverbose)
747 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
748 bp->bio_children = 0;
749 bp->bio_inbed = 0;
750 bp->bio_driver1 = NULL;
751 bp->bio_driver2 = NULL;
752 bp->bio_pflags = 0;
753 g_io_request(bp, cp);
754 pace = 1;
755 return;
756 }
757
758 SYSCTL_DECL(_kern_geom);
759
760 static long transient_maps;
761 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
762 &transient_maps, 0,
763 "Total count of the transient mapping requests");
764 u_int transient_map_retries = 10;
765 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
766 &transient_map_retries, 0,
767 "Max count of retries used before giving up on creating transient map");
768 int transient_map_hard_failures;
769 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
770 &transient_map_hard_failures, 0,
771 "Failures to establish the transient mapping due to retry attempts "
772 "exhausted");
773 int transient_map_soft_failures;
774 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
775 &transient_map_soft_failures, 0,
776 "Count of retried failures to establish the transient mapping");
777 int inflight_transient_maps;
778 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
779 &inflight_transient_maps, 0,
780 "Current count of the active transient maps");
781
782 static int
783 g_io_transient_map_bio(struct bio *bp)
784 {
785 vm_offset_t addr;
786 long size;
787 u_int retried;
788
789 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
790
791 size = round_page(bp->bio_ma_offset + bp->bio_length);
792 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
793 addr = 0;
794 retried = 0;
795 atomic_add_long(&transient_maps, 1);
796 retry:
797 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
798 if (transient_map_retries != 0 &&
799 retried >= transient_map_retries) {
800 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
801 bp, bp->bio_to->name);
802 atomic_add_int(&transient_map_hard_failures, 1);
803 return (EDEADLK/* XXXKIB */);
804 } else {
805 /*
806 * Naive attempt to quisce the I/O to get more
807 * in-flight requests completed and defragment
808 * the transient_arena.
809 */
810 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
811 bp, bp->bio_to->name, retried);
812 pause("g_d_tra", hz / 10);
813 retried++;
814 atomic_add_int(&transient_map_soft_failures, 1);
815 goto retry;
816 }
817 }
818 atomic_add_int(&inflight_transient_maps, 1);
819 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
820 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
821 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
822 bp->bio_flags &= ~BIO_UNMAPPED;
823 return (EJUSTRETURN);
824 }
825
826 void
827 g_io_schedule_down(struct thread *tp __unused)
828 {
829 struct bio *bp;
830 int error;
831
832 for(;;) {
833 g_bioq_lock(&g_bio_run_down);
834 bp = g_bioq_first(&g_bio_run_down);
835 if (bp == NULL) {
836 CTR0(KTR_GEOM, "g_down going to sleep");
837 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
838 PRIBIO | PDROP, "-", 0);
839 continue;
840 }
841 CTR0(KTR_GEOM, "g_down has work to do");
842 g_bioq_unlock(&g_bio_run_down);
843 biotrack(bp, __func__);
844 if (pace != 0) {
845 /*
846 * There has been at least one memory allocation
847 * failure since the last I/O completed. Pause 1ms to
848 * give the system a chance to free up memory. We only
849 * do this once because a large number of allocations
850 * can fail in the direct dispatch case and there's no
851 * relationship between the number of these failures and
852 * the length of the outage. If there's still an outage,
853 * we'll pause again and again until it's
854 * resolved. Older versions paused longer and once per
855 * allocation failure. This was OK for a single threaded
856 * g_down, but with direct dispatch would lead to max of
857 * 10 IOPs for minutes at a time when transient memory
858 * issues prevented allocation for a batch of requests
859 * from the upper layers.
860 *
861 * XXX This pacing is really lame. It needs to be solved
862 * by other methods. This is OK only because the worst
863 * case scenario is so rare. In the worst case scenario
864 * all memory is tied up waiting for I/O to complete
865 * which can never happen since we can't allocate bios
866 * for that I/O.
867 */
868 CTR0(KTR_GEOM, "g_down pacing self");
869 pause("g_down", min(hz/1000, 1));
870 pace = 0;
871 }
872 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
873 bp->bio_to->name);
874 error = g_io_check(bp);
875 if (error >= 0) {
876 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
877 "%s returned %d", bp, bp->bio_to->name, error);
878 g_io_deliver(bp, error);
879 continue;
880 }
881 THREAD_NO_SLEEPING();
882 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
883 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
884 bp->bio_length);
885 bp->bio_to->geom->start(bp);
886 THREAD_SLEEPING_OK();
887 }
888 }
889
890 void
891 g_io_schedule_up(struct thread *tp __unused)
892 {
893 struct bio *bp;
894
895 for(;;) {
896 g_bioq_lock(&g_bio_run_up);
897 bp = g_bioq_first(&g_bio_run_up);
898 if (bp == NULL) {
899 CTR0(KTR_GEOM, "g_up going to sleep");
900 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
901 PRIBIO | PDROP, "-", 0);
902 continue;
903 }
904 g_bioq_unlock(&g_bio_run_up);
905 THREAD_NO_SLEEPING();
906 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
907 "%jd len %ld", bp, bp->bio_to->name,
908 bp->bio_offset, bp->bio_length);
909 biodone(bp);
910 THREAD_SLEEPING_OK();
911 }
912 }
913
914 void *
915 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
916 {
917 struct bio *bp;
918 void *ptr;
919 int errorc;
920
921 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
922 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
923 (intmax_t)length));
924
925 bp = g_alloc_bio();
926 bp->bio_cmd = BIO_READ;
927 bp->bio_done = NULL;
928 bp->bio_offset = offset;
929 bp->bio_length = length;
930 ptr = g_malloc(length, M_WAITOK);
931 bp->bio_data = ptr;
932 g_io_request(bp, cp);
933 errorc = biowait(bp, "gread");
934 if (error != NULL)
935 *error = errorc;
936 g_destroy_bio(bp);
937 if (errorc) {
938 g_free(ptr);
939 ptr = NULL;
940 }
941 return (ptr);
942 }
943
944 /*
945 * A read function for use by ffs_sbget when used by GEOM-layer routines.
946 */
947 int
948 g_use_g_read_data(void *devfd, off_t loc, void **bufp, int size)
949 {
950 struct g_consumer *cp;
951
952 KASSERT(*bufp == NULL,
953 ("g_use_g_read_data: non-NULL *bufp %p\n", *bufp));
954
955 cp = (struct g_consumer *)devfd;
956 /*
957 * Take care not to issue an invalid I/O request. The offset of
958 * the superblock candidate must be multiples of the provider's
959 * sector size, otherwise an FFS can't exist on the provider
960 * anyway.
961 */
962 if (loc % cp->provider->sectorsize != 0)
963 return (ENOENT);
964 *bufp = g_read_data(cp, loc, size, NULL);
965 if (*bufp == NULL)
966 return (ENOENT);
967 return (0);
968 }
969
970 int
971 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
972 {
973 struct bio *bp;
974 int error;
975
976 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
977 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
978 (intmax_t)length));
979
980 bp = g_alloc_bio();
981 bp->bio_cmd = BIO_WRITE;
982 bp->bio_done = NULL;
983 bp->bio_offset = offset;
984 bp->bio_length = length;
985 bp->bio_data = ptr;
986 g_io_request(bp, cp);
987 error = biowait(bp, "gwrite");
988 g_destroy_bio(bp);
989 return (error);
990 }
991
992 /*
993 * A write function for use by ffs_sbput when used by GEOM-layer routines.
994 */
995 int
996 g_use_g_write_data(void *devfd, off_t loc, void *buf, int size)
997 {
998
999 return (g_write_data((struct g_consumer *)devfd, loc, buf, size));
1000 }
1001
1002 int
1003 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
1004 {
1005 struct bio *bp;
1006 int error;
1007
1008 KASSERT(length > 0 && length >= cp->provider->sectorsize,
1009 ("g_delete_data(): invalid length %jd", (intmax_t)length));
1010
1011 bp = g_alloc_bio();
1012 bp->bio_cmd = BIO_DELETE;
1013 bp->bio_done = NULL;
1014 bp->bio_offset = offset;
1015 bp->bio_length = length;
1016 bp->bio_data = NULL;
1017 g_io_request(bp, cp);
1018 error = biowait(bp, "gdelete");
1019 g_destroy_bio(bp);
1020 return (error);
1021 }
1022
1023 void
1024 g_print_bio(struct bio *bp)
1025 {
1026 const char *pname, *cmd = NULL;
1027
1028 if (bp->bio_to != NULL)
1029 pname = bp->bio_to->name;
1030 else
1031 pname = "[unknown]";
1032
1033 switch (bp->bio_cmd) {
1034 case BIO_GETATTR:
1035 cmd = "GETATTR";
1036 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
1037 return;
1038 case BIO_FLUSH:
1039 cmd = "FLUSH";
1040 printf("%s[%s]", pname, cmd);
1041 return;
1042 case BIO_ZONE: {
1043 char *subcmd = NULL;
1044 cmd = "ZONE";
1045 switch (bp->bio_zone.zone_cmd) {
1046 case DISK_ZONE_OPEN:
1047 subcmd = "OPEN";
1048 break;
1049 case DISK_ZONE_CLOSE:
1050 subcmd = "CLOSE";
1051 break;
1052 case DISK_ZONE_FINISH:
1053 subcmd = "FINISH";
1054 break;
1055 case DISK_ZONE_RWP:
1056 subcmd = "RWP";
1057 break;
1058 case DISK_ZONE_REPORT_ZONES:
1059 subcmd = "REPORT ZONES";
1060 break;
1061 case DISK_ZONE_GET_PARAMS:
1062 subcmd = "GET PARAMS";
1063 break;
1064 default:
1065 subcmd = "UNKNOWN";
1066 break;
1067 }
1068 printf("%s[%s,%s]", pname, cmd, subcmd);
1069 return;
1070 }
1071 case BIO_READ:
1072 cmd = "READ";
1073 break;
1074 case BIO_WRITE:
1075 cmd = "WRITE";
1076 break;
1077 case BIO_DELETE:
1078 cmd = "DELETE";
1079 break;
1080 default:
1081 cmd = "UNKNOWN";
1082 printf("%s[%s()]", pname, cmd);
1083 return;
1084 }
1085 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1086 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1087 }
Cache object: e7dcae6c5aa485ad30a75e5ac4618c2e
|