FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
8 * and NAI Labs, the Security Research Division of Network Associates, Inc.
9 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
10 * DARPA CHATS research program.
11 *
12 * Portions of this software were developed by Konstantin Belousov
13 * under sponsorship from the FreeBSD Foundation.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. The names of the authors may not be used to endorse or promote
24 * products derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/11.2/sys/geom/geom_io.c 300207 2016-05-19 14:08:36Z ken $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/bio.h>
48 #include <sys/ktr.h>
49 #include <sys/proc.h>
50 #include <sys/stack.h>
51 #include <sys/sysctl.h>
52 #include <sys/vmem.h>
53
54 #include <sys/errno.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <sys/devicestat.h>
58
59 #include <vm/uma.h>
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67
68 static int g_io_transient_map_bio(struct bio *bp);
69
70 static struct g_bioq g_bio_run_down;
71 static struct g_bioq g_bio_run_up;
72 static struct g_bioq g_bio_run_task;
73
74 /*
75 * Pace is a hint that we've had some trouble recently allocating
76 * bios, so we should back off trying to send I/O down the stack
77 * a bit to let the problem resolve. When pacing, we also turn
78 * off direct dispatch to also reduce memory pressure from I/Os
79 * there, at the expxense of some added latency while the memory
80 * pressures exist. See g_io_schedule_down() for more details
81 * and limitations.
82 */
83 static volatile u_int pace;
84
85 static uma_zone_t biozone;
86
87 /*
88 * The head of the list of classifiers used in g_io_request.
89 * Use g_register_classifier() and g_unregister_classifier()
90 * to add/remove entries to the list.
91 * Classifiers are invoked in registration order.
92 */
93 static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook)
94 g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq);
95
96 #include <machine/atomic.h>
97
98 static void
99 g_bioq_lock(struct g_bioq *bq)
100 {
101
102 mtx_lock(&bq->bio_queue_lock);
103 }
104
105 static void
106 g_bioq_unlock(struct g_bioq *bq)
107 {
108
109 mtx_unlock(&bq->bio_queue_lock);
110 }
111
112 #if 0
113 static void
114 g_bioq_destroy(struct g_bioq *bq)
115 {
116
117 mtx_destroy(&bq->bio_queue_lock);
118 }
119 #endif
120
121 static void
122 g_bioq_init(struct g_bioq *bq)
123 {
124
125 TAILQ_INIT(&bq->bio_queue);
126 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
127 }
128
129 static struct bio *
130 g_bioq_first(struct g_bioq *bq)
131 {
132 struct bio *bp;
133
134 bp = TAILQ_FIRST(&bq->bio_queue);
135 if (bp != NULL) {
136 KASSERT((bp->bio_flags & BIO_ONQUEUE),
137 ("Bio not on queue bp=%p target %p", bp, bq));
138 bp->bio_flags &= ~BIO_ONQUEUE;
139 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
140 bq->bio_queue_length--;
141 }
142 return (bp);
143 }
144
145 struct bio *
146 g_new_bio(void)
147 {
148 struct bio *bp;
149
150 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
151 #ifdef KTR
152 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
153 struct stack st;
154
155 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
156 stack_save(&st);
157 CTRSTACK(KTR_GEOM, &st, 3, 0);
158 }
159 #endif
160 return (bp);
161 }
162
163 struct bio *
164 g_alloc_bio(void)
165 {
166 struct bio *bp;
167
168 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
169 #ifdef KTR
170 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
171 struct stack st;
172
173 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
174 stack_save(&st);
175 CTRSTACK(KTR_GEOM, &st, 3, 0);
176 }
177 #endif
178 return (bp);
179 }
180
181 void
182 g_destroy_bio(struct bio *bp)
183 {
184 #ifdef KTR
185 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
186 struct stack st;
187
188 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
189 stack_save(&st);
190 CTRSTACK(KTR_GEOM, &st, 3, 0);
191 }
192 #endif
193 uma_zfree(biozone, bp);
194 }
195
196 struct bio *
197 g_clone_bio(struct bio *bp)
198 {
199 struct bio *bp2;
200
201 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
202 if (bp2 != NULL) {
203 bp2->bio_parent = bp;
204 bp2->bio_cmd = bp->bio_cmd;
205 /*
206 * BIO_ORDERED flag may be used by disk drivers to enforce
207 * ordering restrictions, so this flag needs to be cloned.
208 * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly
209 * indicate which way the buffer is passed.
210 * Other bio flags are not suitable for cloning.
211 */
212 bp2->bio_flags = bp->bio_flags &
213 (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST);
214 bp2->bio_length = bp->bio_length;
215 bp2->bio_offset = bp->bio_offset;
216 bp2->bio_data = bp->bio_data;
217 bp2->bio_ma = bp->bio_ma;
218 bp2->bio_ma_n = bp->bio_ma_n;
219 bp2->bio_ma_offset = bp->bio_ma_offset;
220 bp2->bio_attribute = bp->bio_attribute;
221 if (bp->bio_cmd == BIO_ZONE)
222 bcopy(&bp->bio_zone, &bp2->bio_zone,
223 sizeof(bp->bio_zone));
224 /* Inherit classification info from the parent */
225 bp2->bio_classifier1 = bp->bio_classifier1;
226 bp2->bio_classifier2 = bp->bio_classifier2;
227 bp->bio_children++;
228 }
229 #ifdef KTR
230 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
231 struct stack st;
232
233 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
234 stack_save(&st);
235 CTRSTACK(KTR_GEOM, &st, 3, 0);
236 }
237 #endif
238 return(bp2);
239 }
240
241 struct bio *
242 g_duplicate_bio(struct bio *bp)
243 {
244 struct bio *bp2;
245
246 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
247 bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST);
248 bp2->bio_parent = bp;
249 bp2->bio_cmd = bp->bio_cmd;
250 bp2->bio_length = bp->bio_length;
251 bp2->bio_offset = bp->bio_offset;
252 bp2->bio_data = bp->bio_data;
253 bp2->bio_ma = bp->bio_ma;
254 bp2->bio_ma_n = bp->bio_ma_n;
255 bp2->bio_ma_offset = bp->bio_ma_offset;
256 bp2->bio_attribute = bp->bio_attribute;
257 bp->bio_children++;
258 #ifdef KTR
259 if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) {
260 struct stack st;
261
262 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
263 stack_save(&st);
264 CTRSTACK(KTR_GEOM, &st, 3, 0);
265 }
266 #endif
267 return(bp2);
268 }
269
270 void
271 g_reset_bio(struct bio *bp)
272 {
273
274 bzero(bp, sizeof(*bp));
275 }
276
277 void
278 g_io_init()
279 {
280
281 g_bioq_init(&g_bio_run_down);
282 g_bioq_init(&g_bio_run_up);
283 g_bioq_init(&g_bio_run_task);
284 biozone = uma_zcreate("g_bio", sizeof (struct bio),
285 NULL, NULL,
286 NULL, NULL,
287 0, 0);
288 }
289
290 int
291 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
292 {
293 struct bio *bp;
294 int error;
295
296 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
297 bp = g_alloc_bio();
298 bp->bio_cmd = BIO_GETATTR;
299 bp->bio_done = NULL;
300 bp->bio_attribute = attr;
301 bp->bio_length = *len;
302 bp->bio_data = ptr;
303 g_io_request(bp, cp);
304 error = biowait(bp, "ggetattr");
305 *len = bp->bio_completed;
306 g_destroy_bio(bp);
307 return (error);
308 }
309
310 int
311 g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp)
312 {
313 struct bio *bp;
314 int error;
315
316 g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd);
317 bp = g_alloc_bio();
318 bp->bio_cmd = BIO_ZONE;
319 bp->bio_done = NULL;
320 /*
321 * XXX KDM need to handle report zone data.
322 */
323 bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args));
324 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES)
325 bp->bio_length =
326 zone_args->zone_params.report.entries_allocated *
327 sizeof(struct disk_zone_rep_entry);
328 else
329 bp->bio_length = 0;
330
331 g_io_request(bp, cp);
332 error = biowait(bp, "gzone");
333 bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args));
334 g_destroy_bio(bp);
335 return (error);
336 }
337
338 int
339 g_io_flush(struct g_consumer *cp)
340 {
341 struct bio *bp;
342 int error;
343
344 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
345 bp = g_alloc_bio();
346 bp->bio_cmd = BIO_FLUSH;
347 bp->bio_flags |= BIO_ORDERED;
348 bp->bio_done = NULL;
349 bp->bio_attribute = NULL;
350 bp->bio_offset = cp->provider->mediasize;
351 bp->bio_length = 0;
352 bp->bio_data = NULL;
353 g_io_request(bp, cp);
354 error = biowait(bp, "gflush");
355 g_destroy_bio(bp);
356 return (error);
357 }
358
359 static int
360 g_io_check(struct bio *bp)
361 {
362 struct g_consumer *cp;
363 struct g_provider *pp;
364 off_t excess;
365 int error;
366
367 cp = bp->bio_from;
368 pp = bp->bio_to;
369
370 /* Fail if access counters dont allow the operation */
371 switch(bp->bio_cmd) {
372 case BIO_READ:
373 case BIO_GETATTR:
374 if (cp->acr == 0)
375 return (EPERM);
376 break;
377 case BIO_WRITE:
378 case BIO_DELETE:
379 case BIO_FLUSH:
380 if (cp->acw == 0)
381 return (EPERM);
382 break;
383 case BIO_ZONE:
384 if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) ||
385 (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) {
386 if (cp->acr == 0)
387 return (EPERM);
388 } else if (cp->acw == 0)
389 return (EPERM);
390 break;
391 default:
392 return (EPERM);
393 }
394 /* if provider is marked for error, don't disturb. */
395 if (pp->error)
396 return (pp->error);
397 if (cp->flags & G_CF_ORPHAN)
398 return (ENXIO);
399
400 switch(bp->bio_cmd) {
401 case BIO_READ:
402 case BIO_WRITE:
403 case BIO_DELETE:
404 /* Zero sectorsize or mediasize is probably a lack of media. */
405 if (pp->sectorsize == 0 || pp->mediasize == 0)
406 return (ENXIO);
407 /* Reject I/O not on sector boundary */
408 if (bp->bio_offset % pp->sectorsize)
409 return (EINVAL);
410 /* Reject I/O not integral sector long */
411 if (bp->bio_length % pp->sectorsize)
412 return (EINVAL);
413 /* Reject requests before or past the end of media. */
414 if (bp->bio_offset < 0)
415 return (EIO);
416 if (bp->bio_offset > pp->mediasize)
417 return (EIO);
418
419 /* Truncate requests to the end of providers media. */
420 excess = bp->bio_offset + bp->bio_length;
421 if (excess > bp->bio_to->mediasize) {
422 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
423 round_page(bp->bio_ma_offset +
424 bp->bio_length) / PAGE_SIZE == bp->bio_ma_n,
425 ("excess bio %p too short", bp));
426 excess -= bp->bio_to->mediasize;
427 bp->bio_length -= excess;
428 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
429 bp->bio_ma_n = round_page(bp->bio_ma_offset +
430 bp->bio_length) / PAGE_SIZE;
431 }
432 if (excess > 0)
433 CTR3(KTR_GEOM, "g_down truncated bio "
434 "%p provider %s by %d", bp,
435 bp->bio_to->name, excess);
436 }
437
438 /* Deliver zero length transfers right here. */
439 if (bp->bio_length == 0) {
440 CTR2(KTR_GEOM, "g_down terminated 0-length "
441 "bp %p provider %s", bp, bp->bio_to->name);
442 return (0);
443 }
444
445 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
446 (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 &&
447 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
448 if ((error = g_io_transient_map_bio(bp)) >= 0)
449 return (error);
450 }
451 break;
452 default:
453 break;
454 }
455 return (EJUSTRETURN);
456 }
457
458 /*
459 * bio classification support.
460 *
461 * g_register_classifier() and g_unregister_classifier()
462 * are used to add/remove a classifier from the list.
463 * The list is protected using the g_bio_run_down lock,
464 * because the classifiers are called in this path.
465 *
466 * g_io_request() passes bio's that are not already classified
467 * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers().
468 * Classifiers can store their result in the two fields
469 * bio_classifier1 and bio_classifier2.
470 * A classifier that updates one of the fields should
471 * return a non-zero value.
472 * If no classifier updates the field, g_run_classifiers() sets
473 * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls.
474 */
475
476 int
477 g_register_classifier(struct g_classifier_hook *hook)
478 {
479
480 g_bioq_lock(&g_bio_run_down);
481 TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link);
482 g_bioq_unlock(&g_bio_run_down);
483
484 return (0);
485 }
486
487 void
488 g_unregister_classifier(struct g_classifier_hook *hook)
489 {
490 struct g_classifier_hook *entry;
491
492 g_bioq_lock(&g_bio_run_down);
493 TAILQ_FOREACH(entry, &g_classifier_tailq, link) {
494 if (entry == hook) {
495 TAILQ_REMOVE(&g_classifier_tailq, hook, link);
496 break;
497 }
498 }
499 g_bioq_unlock(&g_bio_run_down);
500 }
501
502 static void
503 g_run_classifiers(struct bio *bp)
504 {
505 struct g_classifier_hook *hook;
506 int classified = 0;
507
508 TAILQ_FOREACH(hook, &g_classifier_tailq, link)
509 classified |= hook->func(hook->arg, bp);
510
511 if (!classified)
512 bp->bio_classifier1 = BIO_NOTCLASSIFIED;
513 }
514
515 void
516 g_io_request(struct bio *bp, struct g_consumer *cp)
517 {
518 struct g_provider *pp;
519 struct mtx *mtxp;
520 int direct, error, first;
521 uint8_t cmd;
522
523 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
524 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
525 pp = cp->provider;
526 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
527 #ifdef DIAGNOSTIC
528 KASSERT(bp->bio_driver1 == NULL,
529 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
530 KASSERT(bp->bio_driver2 == NULL,
531 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
532 KASSERT(bp->bio_pflags == 0,
533 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
534 /*
535 * Remember consumer's private fields, so we can detect if they were
536 * modified by the provider.
537 */
538 bp->_bio_caller1 = bp->bio_caller1;
539 bp->_bio_caller2 = bp->bio_caller2;
540 bp->_bio_cflags = bp->bio_cflags;
541 #endif
542
543 cmd = bp->bio_cmd;
544 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) {
545 KASSERT(bp->bio_data != NULL,
546 ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd));
547 }
548 if (cmd == BIO_DELETE || cmd == BIO_FLUSH) {
549 KASSERT(bp->bio_data == NULL,
550 ("non-NULL bp->data in g_io_request(cmd=%hu)",
551 bp->bio_cmd));
552 }
553 if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) {
554 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
555 ("wrong offset %jd for sectorsize %u",
556 bp->bio_offset, cp->provider->sectorsize));
557 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
558 ("wrong length %jd for sectorsize %u",
559 bp->bio_length, cp->provider->sectorsize));
560 }
561
562 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
563 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
564
565 bp->bio_from = cp;
566 bp->bio_to = pp;
567 bp->bio_error = 0;
568 bp->bio_completed = 0;
569
570 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
571 ("Bio already on queue bp=%p", bp));
572 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
573 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
574 binuptime(&bp->bio_t0);
575 else
576 getbinuptime(&bp->bio_t0);
577
578 #ifdef GET_STACK_USAGE
579 direct = (cp->flags & G_CF_DIRECT_SEND) != 0 &&
580 (pp->flags & G_PF_DIRECT_RECEIVE) != 0 &&
581 !g_is_geom_thread(curthread) &&
582 ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ||
583 (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) &&
584 pace == 0;
585 if (direct) {
586 /* Block direct execution if less then half of stack left. */
587 size_t st, su;
588 GET_STACK_USAGE(st, su);
589 if (su * 2 > st)
590 direct = 0;
591 }
592 #else
593 direct = 0;
594 #endif
595
596 if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) {
597 g_bioq_lock(&g_bio_run_down);
598 g_run_classifiers(bp);
599 g_bioq_unlock(&g_bio_run_down);
600 }
601
602 /*
603 * The statistics collection is lockless, as such, but we
604 * can not update one instance of the statistics from more
605 * than one thread at a time, so grab the lock first.
606 */
607 mtxp = mtx_pool_find(mtxpool_sleep, pp);
608 mtx_lock(mtxp);
609 if (g_collectstats & G_STATS_PROVIDERS)
610 devstat_start_transaction(pp->stat, &bp->bio_t0);
611 if (g_collectstats & G_STATS_CONSUMERS)
612 devstat_start_transaction(cp->stat, &bp->bio_t0);
613 pp->nstart++;
614 cp->nstart++;
615 mtx_unlock(mtxp);
616
617 if (direct) {
618 error = g_io_check(bp);
619 if (error >= 0) {
620 CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p "
621 "provider %s returned %d", bp, bp->bio_to->name,
622 error);
623 g_io_deliver(bp, error);
624 return;
625 }
626 bp->bio_to->geom->start(bp);
627 } else {
628 g_bioq_lock(&g_bio_run_down);
629 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
630 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
631 bp->bio_flags |= BIO_ONQUEUE;
632 g_bio_run_down.bio_queue_length++;
633 g_bioq_unlock(&g_bio_run_down);
634 /* Pass it on down. */
635 if (first)
636 wakeup(&g_wait_down);
637 }
638 }
639
640 void
641 g_io_deliver(struct bio *bp, int error)
642 {
643 struct bintime now;
644 struct g_consumer *cp;
645 struct g_provider *pp;
646 struct mtx *mtxp;
647 int direct, first;
648
649 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
650 pp = bp->bio_to;
651 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
652 cp = bp->bio_from;
653 if (cp == NULL) {
654 bp->bio_error = error;
655 bp->bio_done(bp);
656 return;
657 }
658 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
659 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
660 #ifdef DIAGNOSTIC
661 /*
662 * Some classes - GJournal in particular - can modify bio's
663 * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO
664 * flag means it's an expected behaviour for that particular geom.
665 */
666 if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) {
667 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
668 ("bio_caller1 used by the provider %s", pp->name));
669 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
670 ("bio_caller2 used by the provider %s", pp->name));
671 KASSERT(bp->bio_cflags == bp->_bio_cflags,
672 ("bio_cflags used by the provider %s", pp->name));
673 }
674 #endif
675 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
676 KASSERT(bp->bio_completed <= bp->bio_length,
677 ("bio_completed can't be greater than bio_length"));
678
679 g_trace(G_T_BIO,
680 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
681 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
682 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
683
684 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
685 ("Bio already on queue bp=%p", bp));
686
687 /*
688 * XXX: next two doesn't belong here
689 */
690 bp->bio_bcount = bp->bio_length;
691 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
692
693 #ifdef GET_STACK_USAGE
694 direct = (pp->flags & G_PF_DIRECT_SEND) &&
695 (cp->flags & G_CF_DIRECT_RECEIVE) &&
696 !g_is_geom_thread(curthread);
697 if (direct) {
698 /* Block direct execution if less then half of stack left. */
699 size_t st, su;
700 GET_STACK_USAGE(st, su);
701 if (su * 2 > st)
702 direct = 0;
703 }
704 #else
705 direct = 0;
706 #endif
707
708 /*
709 * The statistics collection is lockless, as such, but we
710 * can not update one instance of the statistics from more
711 * than one thread at a time, so grab the lock first.
712 */
713 if ((g_collectstats & G_STATS_CONSUMERS) != 0 ||
714 ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL))
715 binuptime(&now);
716 mtxp = mtx_pool_find(mtxpool_sleep, cp);
717 mtx_lock(mtxp);
718 if (g_collectstats & G_STATS_PROVIDERS)
719 devstat_end_transaction_bio_bt(pp->stat, bp, &now);
720 if (g_collectstats & G_STATS_CONSUMERS)
721 devstat_end_transaction_bio_bt(cp->stat, bp, &now);
722 cp->nend++;
723 pp->nend++;
724 mtx_unlock(mtxp);
725
726 if (error != ENOMEM) {
727 bp->bio_error = error;
728 if (direct) {
729 biodone(bp);
730 } else {
731 g_bioq_lock(&g_bio_run_up);
732 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
733 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
734 bp->bio_flags |= BIO_ONQUEUE;
735 g_bio_run_up.bio_queue_length++;
736 g_bioq_unlock(&g_bio_run_up);
737 if (first)
738 wakeup(&g_wait_up);
739 }
740 return;
741 }
742
743 if (bootverbose)
744 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
745 bp->bio_children = 0;
746 bp->bio_inbed = 0;
747 bp->bio_driver1 = NULL;
748 bp->bio_driver2 = NULL;
749 bp->bio_pflags = 0;
750 g_io_request(bp, cp);
751 pace = 1;
752 return;
753 }
754
755 SYSCTL_DECL(_kern_geom);
756
757 static long transient_maps;
758 SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD,
759 &transient_maps, 0,
760 "Total count of the transient mapping requests");
761 u_int transient_map_retries = 10;
762 SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW,
763 &transient_map_retries, 0,
764 "Max count of retries used before giving up on creating transient map");
765 int transient_map_hard_failures;
766 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD,
767 &transient_map_hard_failures, 0,
768 "Failures to establish the transient mapping due to retry attempts "
769 "exhausted");
770 int transient_map_soft_failures;
771 SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD,
772 &transient_map_soft_failures, 0,
773 "Count of retried failures to establish the transient mapping");
774 int inflight_transient_maps;
775 SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD,
776 &inflight_transient_maps, 0,
777 "Current count of the active transient maps");
778
779 static int
780 g_io_transient_map_bio(struct bio *bp)
781 {
782 vm_offset_t addr;
783 long size;
784 u_int retried;
785
786 KASSERT(unmapped_buf_allowed, ("unmapped disabled"));
787
788 size = round_page(bp->bio_ma_offset + bp->bio_length);
789 KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp));
790 addr = 0;
791 retried = 0;
792 atomic_add_long(&transient_maps, 1);
793 retry:
794 if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) {
795 if (transient_map_retries != 0 &&
796 retried >= transient_map_retries) {
797 CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s",
798 bp, bp->bio_to->name);
799 atomic_add_int(&transient_map_hard_failures, 1);
800 return (EDEADLK/* XXXKIB */);
801 } else {
802 /*
803 * Naive attempt to quisce the I/O to get more
804 * in-flight requests completed and defragment
805 * the transient_arena.
806 */
807 CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d",
808 bp, bp->bio_to->name, retried);
809 pause("g_d_tra", hz / 10);
810 retried++;
811 atomic_add_int(&transient_map_soft_failures, 1);
812 goto retry;
813 }
814 }
815 atomic_add_int(&inflight_transient_maps, 1);
816 pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size));
817 bp->bio_data = (caddr_t)addr + bp->bio_ma_offset;
818 bp->bio_flags |= BIO_TRANSIENT_MAPPING;
819 bp->bio_flags &= ~BIO_UNMAPPED;
820 return (EJUSTRETURN);
821 }
822
823 void
824 g_io_schedule_down(struct thread *tp __unused)
825 {
826 struct bio *bp;
827 int error;
828
829 for(;;) {
830 g_bioq_lock(&g_bio_run_down);
831 bp = g_bioq_first(&g_bio_run_down);
832 if (bp == NULL) {
833 CTR0(KTR_GEOM, "g_down going to sleep");
834 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
835 PRIBIO | PDROP, "-", 0);
836 continue;
837 }
838 CTR0(KTR_GEOM, "g_down has work to do");
839 g_bioq_unlock(&g_bio_run_down);
840 if (pace != 0) {
841 /*
842 * There has been at least one memory allocation
843 * failure since the last I/O completed. Pause 1ms to
844 * give the system a chance to free up memory. We only
845 * do this once because a large number of allocations
846 * can fail in the direct dispatch case and there's no
847 * relationship between the number of these failures and
848 * the length of the outage. If there's still an outage,
849 * we'll pause again and again until it's
850 * resolved. Older versions paused longer and once per
851 * allocation failure. This was OK for a single threaded
852 * g_down, but with direct dispatch would lead to max of
853 * 10 IOPs for minutes at a time when transient memory
854 * issues prevented allocation for a batch of requests
855 * from the upper layers.
856 *
857 * XXX This pacing is really lame. It needs to be solved
858 * by other methods. This is OK only because the worst
859 * case scenario is so rare. In the worst case scenario
860 * all memory is tied up waiting for I/O to complete
861 * which can never happen since we can't allocate bios
862 * for that I/O.
863 */
864 CTR0(KTR_GEOM, "g_down pacing self");
865 pause("g_down", min(hz/1000, 1));
866 pace = 0;
867 }
868 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
869 bp->bio_to->name);
870 error = g_io_check(bp);
871 if (error >= 0) {
872 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
873 "%s returned %d", bp, bp->bio_to->name, error);
874 g_io_deliver(bp, error);
875 continue;
876 }
877 THREAD_NO_SLEEPING();
878 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
879 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
880 bp->bio_length);
881 bp->bio_to->geom->start(bp);
882 THREAD_SLEEPING_OK();
883 }
884 }
885
886 void
887 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
888 {
889 bp->bio_task = func;
890 bp->bio_task_arg = arg;
891 /*
892 * The taskqueue is actually just a second queue off the "up"
893 * queue, so we use the same lock.
894 */
895 g_bioq_lock(&g_bio_run_up);
896 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
897 ("Bio already on queue bp=%p target taskq", bp));
898 bp->bio_flags |= BIO_ONQUEUE;
899 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
900 g_bio_run_task.bio_queue_length++;
901 wakeup(&g_wait_up);
902 g_bioq_unlock(&g_bio_run_up);
903 }
904
905
906 void
907 g_io_schedule_up(struct thread *tp __unused)
908 {
909 struct bio *bp;
910 for(;;) {
911 g_bioq_lock(&g_bio_run_up);
912 bp = g_bioq_first(&g_bio_run_task);
913 if (bp != NULL) {
914 g_bioq_unlock(&g_bio_run_up);
915 THREAD_NO_SLEEPING();
916 CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
917 bp->bio_task(bp->bio_task_arg);
918 THREAD_SLEEPING_OK();
919 continue;
920 }
921 bp = g_bioq_first(&g_bio_run_up);
922 if (bp != NULL) {
923 g_bioq_unlock(&g_bio_run_up);
924 THREAD_NO_SLEEPING();
925 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
926 "%jd len %ld", bp, bp->bio_to->name,
927 bp->bio_offset, bp->bio_length);
928 biodone(bp);
929 THREAD_SLEEPING_OK();
930 continue;
931 }
932 CTR0(KTR_GEOM, "g_up going to sleep");
933 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
934 PRIBIO | PDROP, "-", 0);
935 }
936 }
937
938 void *
939 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
940 {
941 struct bio *bp;
942 void *ptr;
943 int errorc;
944
945 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
946 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
947 (intmax_t)length));
948
949 bp = g_alloc_bio();
950 bp->bio_cmd = BIO_READ;
951 bp->bio_done = NULL;
952 bp->bio_offset = offset;
953 bp->bio_length = length;
954 ptr = g_malloc(length, M_WAITOK);
955 bp->bio_data = ptr;
956 g_io_request(bp, cp);
957 errorc = biowait(bp, "gread");
958 if (error != NULL)
959 *error = errorc;
960 g_destroy_bio(bp);
961 if (errorc) {
962 g_free(ptr);
963 ptr = NULL;
964 }
965 return (ptr);
966 }
967
968 int
969 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
970 {
971 struct bio *bp;
972 int error;
973
974 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
975 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
976 (intmax_t)length));
977
978 bp = g_alloc_bio();
979 bp->bio_cmd = BIO_WRITE;
980 bp->bio_done = NULL;
981 bp->bio_offset = offset;
982 bp->bio_length = length;
983 bp->bio_data = ptr;
984 g_io_request(bp, cp);
985 error = biowait(bp, "gwrite");
986 g_destroy_bio(bp);
987 return (error);
988 }
989
990 int
991 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
992 {
993 struct bio *bp;
994 int error;
995
996 KASSERT(length > 0 && length >= cp->provider->sectorsize,
997 ("g_delete_data(): invalid length %jd", (intmax_t)length));
998
999 bp = g_alloc_bio();
1000 bp->bio_cmd = BIO_DELETE;
1001 bp->bio_done = NULL;
1002 bp->bio_offset = offset;
1003 bp->bio_length = length;
1004 bp->bio_data = NULL;
1005 g_io_request(bp, cp);
1006 error = biowait(bp, "gdelete");
1007 g_destroy_bio(bp);
1008 return (error);
1009 }
1010
1011 void
1012 g_print_bio(struct bio *bp)
1013 {
1014 const char *pname, *cmd = NULL;
1015
1016 if (bp->bio_to != NULL)
1017 pname = bp->bio_to->name;
1018 else
1019 pname = "[unknown]";
1020
1021 switch (bp->bio_cmd) {
1022 case BIO_GETATTR:
1023 cmd = "GETATTR";
1024 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
1025 return;
1026 case BIO_FLUSH:
1027 cmd = "FLUSH";
1028 printf("%s[%s]", pname, cmd);
1029 return;
1030 case BIO_ZONE: {
1031 char *subcmd = NULL;
1032 cmd = "ZONE";
1033 switch (bp->bio_zone.zone_cmd) {
1034 case DISK_ZONE_OPEN:
1035 subcmd = "OPEN";
1036 break;
1037 case DISK_ZONE_CLOSE:
1038 subcmd = "CLOSE";
1039 break;
1040 case DISK_ZONE_FINISH:
1041 subcmd = "FINISH";
1042 break;
1043 case DISK_ZONE_RWP:
1044 subcmd = "RWP";
1045 break;
1046 case DISK_ZONE_REPORT_ZONES:
1047 subcmd = "REPORT ZONES";
1048 break;
1049 case DISK_ZONE_GET_PARAMS:
1050 subcmd = "GET PARAMS";
1051 break;
1052 default:
1053 subcmd = "UNKNOWN";
1054 break;
1055 }
1056 printf("%s[%s,%s]", pname, cmd, subcmd);
1057 return;
1058 }
1059 case BIO_READ:
1060 cmd = "READ";
1061 break;
1062 case BIO_WRITE:
1063 cmd = "WRITE";
1064 break;
1065 case BIO_DELETE:
1066 cmd = "DELETE";
1067 break;
1068 default:
1069 cmd = "UNKNOWN";
1070 printf("%s[%s()]", pname, cmd);
1071 return;
1072 }
1073 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
1074 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
1075 }
Cache object: 5e1dae0fae394476c322325de14db185
|