FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 #include <sys/ktr.h>
45 #include <sys/proc.h>
46 #include <sys/stack.h>
47
48 #include <sys/errno.h>
49 #include <geom/geom.h>
50 #include <geom/geom_int.h>
51 #include <sys/devicestat.h>
52
53 #include <vm/uma.h>
54
55 static struct g_bioq g_bio_run_down;
56 static struct g_bioq g_bio_run_up;
57 static struct g_bioq g_bio_run_task;
58
59 static u_int pace;
60 static uma_zone_t biozone;
61
62 #include <machine/atomic.h>
63
64 static void
65 g_bioq_lock(struct g_bioq *bq)
66 {
67
68 mtx_lock(&bq->bio_queue_lock);
69 }
70
71 static void
72 g_bioq_unlock(struct g_bioq *bq)
73 {
74
75 mtx_unlock(&bq->bio_queue_lock);
76 }
77
78 #if 0
79 static void
80 g_bioq_destroy(struct g_bioq *bq)
81 {
82
83 mtx_destroy(&bq->bio_queue_lock);
84 }
85 #endif
86
87 static void
88 g_bioq_init(struct g_bioq *bq)
89 {
90
91 TAILQ_INIT(&bq->bio_queue);
92 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
93 }
94
95 static struct bio *
96 g_bioq_first(struct g_bioq *bq)
97 {
98 struct bio *bp;
99
100 bp = TAILQ_FIRST(&bq->bio_queue);
101 if (bp != NULL) {
102 KASSERT((bp->bio_flags & BIO_ONQUEUE),
103 ("Bio not on queue bp=%p target %p", bp, bq));
104 bp->bio_flags &= ~BIO_ONQUEUE;
105 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
106 bq->bio_queue_length--;
107 }
108 return (bp);
109 }
110
111 struct bio *
112 g_new_bio(void)
113 {
114 struct bio *bp;
115
116 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
117 #ifdef KTR
118 if (KTR_COMPILE & KTR_GEOM) {
119 struct stack st;
120
121 CTR1(KTR_GEOM, "g_new_bio(): %p", bp);
122 stack_save(&st);
123 CTRSTACK(KTR_GEOM, &st, 3, 0);
124 }
125 #endif
126 return (bp);
127 }
128
129 struct bio *
130 g_alloc_bio(void)
131 {
132 struct bio *bp;
133
134 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
135 #ifdef KTR
136 if (KTR_COMPILE & KTR_GEOM) {
137 struct stack st;
138
139 CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp);
140 stack_save(&st);
141 CTRSTACK(KTR_GEOM, &st, 3, 0);
142 }
143 #endif
144 return (bp);
145 }
146
147 void
148 g_destroy_bio(struct bio *bp)
149 {
150 #ifdef KTR
151 if (KTR_COMPILE & KTR_GEOM) {
152 struct stack st;
153
154 CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp);
155 stack_save(&st);
156 CTRSTACK(KTR_GEOM, &st, 3, 0);
157 }
158 #endif
159 uma_zfree(biozone, bp);
160 }
161
162 struct bio *
163 g_clone_bio(struct bio *bp)
164 {
165 struct bio *bp2;
166
167 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
168 if (bp2 != NULL) {
169 bp2->bio_parent = bp;
170 bp2->bio_cmd = bp->bio_cmd;
171 bp2->bio_length = bp->bio_length;
172 bp2->bio_offset = bp->bio_offset;
173 bp2->bio_data = bp->bio_data;
174 bp2->bio_attribute = bp->bio_attribute;
175 bp->bio_children++;
176 }
177 #ifdef KTR
178 if (KTR_COMPILE & KTR_GEOM) {
179 struct stack st;
180
181 CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2);
182 stack_save(&st);
183 CTRSTACK(KTR_GEOM, &st, 3, 0);
184 }
185 #endif
186 return(bp2);
187 }
188
189 struct bio *
190 g_duplicate_bio(struct bio *bp)
191 {
192 struct bio *bp2;
193
194 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
195 bp2->bio_parent = bp;
196 bp2->bio_cmd = bp->bio_cmd;
197 bp2->bio_length = bp->bio_length;
198 bp2->bio_offset = bp->bio_offset;
199 bp2->bio_data = bp->bio_data;
200 bp2->bio_attribute = bp->bio_attribute;
201 bp->bio_children++;
202 #ifdef KTR
203 if (KTR_COMPILE & KTR_GEOM) {
204 struct stack st;
205
206 CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2);
207 stack_save(&st);
208 CTRSTACK(KTR_GEOM, &st, 3, 0);
209 }
210 #endif
211 return(bp2);
212 }
213
214 void
215 g_io_init()
216 {
217
218 g_bioq_init(&g_bio_run_down);
219 g_bioq_init(&g_bio_run_up);
220 g_bioq_init(&g_bio_run_task);
221 biozone = uma_zcreate("g_bio", sizeof (struct bio),
222 NULL, NULL,
223 NULL, NULL,
224 0, 0);
225 }
226
227 int
228 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
229 {
230 struct bio *bp;
231 int error;
232
233 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
234 bp = g_alloc_bio();
235 bp->bio_cmd = BIO_GETATTR;
236 bp->bio_done = NULL;
237 bp->bio_attribute = attr;
238 bp->bio_length = *len;
239 bp->bio_data = ptr;
240 g_io_request(bp, cp);
241 error = biowait(bp, "ggetattr");
242 *len = bp->bio_completed;
243 g_destroy_bio(bp);
244 return (error);
245 }
246
247 int
248 g_io_flush(struct g_consumer *cp)
249 {
250 struct bio *bp;
251 int error;
252
253 g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name);
254 bp = g_alloc_bio();
255 bp->bio_cmd = BIO_FLUSH;
256 bp->bio_done = NULL;
257 bp->bio_attribute = NULL;
258 bp->bio_offset = cp->provider->mediasize;
259 bp->bio_length = 0;
260 bp->bio_data = NULL;
261 g_io_request(bp, cp);
262 error = biowait(bp, "gflush");
263 g_destroy_bio(bp);
264 return (error);
265 }
266
267 static int
268 g_io_check(struct bio *bp)
269 {
270 struct g_consumer *cp;
271 struct g_provider *pp;
272
273 cp = bp->bio_from;
274 pp = bp->bio_to;
275
276 /* Fail if access counters dont allow the operation */
277 switch(bp->bio_cmd) {
278 case BIO_READ:
279 case BIO_GETATTR:
280 if (cp->acr == 0)
281 return (EPERM);
282 break;
283 case BIO_WRITE:
284 case BIO_DELETE:
285 case BIO_FLUSH:
286 if (cp->acw == 0)
287 return (EPERM);
288 break;
289 default:
290 return (EPERM);
291 }
292 /* if provider is marked for error, don't disturb. */
293 if (pp->error)
294 return (pp->error);
295
296 switch(bp->bio_cmd) {
297 case BIO_READ:
298 case BIO_WRITE:
299 case BIO_DELETE:
300 /* Zero sectorsize or mediasize is probably a lack of media. */
301 if (pp->sectorsize == 0 || pp->mediasize == 0)
302 return (ENXIO);
303 /* Reject I/O not on sector boundary */
304 if (bp->bio_offset % pp->sectorsize)
305 return (EINVAL);
306 /* Reject I/O not integral sector long */
307 if (bp->bio_length % pp->sectorsize)
308 return (EINVAL);
309 /* Reject requests before or past the end of media. */
310 if (bp->bio_offset < 0)
311 return (EIO);
312 if (bp->bio_offset > pp->mediasize)
313 return (EIO);
314 break;
315 default:
316 break;
317 }
318 return (0);
319 }
320
321 void
322 g_io_request(struct bio *bp, struct g_consumer *cp)
323 {
324 struct g_provider *pp;
325 int first;
326
327 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
328 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
329 pp = cp->provider;
330 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
331 #ifdef DIAGNOSTIC
332 KASSERT(bp->bio_driver1 == NULL,
333 ("bio_driver1 used by the consumer (geom %s)", cp->geom->name));
334 KASSERT(bp->bio_driver2 == NULL,
335 ("bio_driver2 used by the consumer (geom %s)", cp->geom->name));
336 KASSERT(bp->bio_pflags == 0,
337 ("bio_pflags used by the consumer (geom %s)", cp->geom->name));
338 /*
339 * Remember consumer's private fields, so we can detect if they were
340 * modified by the provider.
341 */
342 bp->_bio_caller1 = bp->bio_caller1;
343 bp->_bio_caller2 = bp->bio_caller2;
344 bp->_bio_cflags = bp->bio_cflags;
345 #endif
346
347 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_GETATTR)) {
348 KASSERT(bp->bio_data != NULL,
349 ("NULL bp->data in g_io_request(cmd=%hhu)", bp->bio_cmd));
350 }
351 if (bp->bio_cmd & (BIO_DELETE|BIO_FLUSH)) {
352 KASSERT(bp->bio_data == NULL,
353 ("non-NULL bp->data in g_io_request(cmd=%hhu)",
354 bp->bio_cmd));
355 }
356 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
357 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
358 ("wrong offset %jd for sectorsize %u",
359 bp->bio_offset, cp->provider->sectorsize));
360 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
361 ("wrong length %jd for sectorsize %u",
362 bp->bio_length, cp->provider->sectorsize));
363 }
364
365 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
366 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
367
368 bp->bio_from = cp;
369 bp->bio_to = pp;
370 bp->bio_error = 0;
371 bp->bio_completed = 0;
372
373 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
374 ("Bio already on queue bp=%p", bp));
375 bp->bio_flags |= BIO_ONQUEUE;
376
377 if (g_collectstats)
378 binuptime(&bp->bio_t0);
379 else
380 getbinuptime(&bp->bio_t0);
381
382 /*
383 * The statistics collection is lockless, as such, but we
384 * can not update one instance of the statistics from more
385 * than one thread at a time, so grab the lock first.
386 */
387 g_bioq_lock(&g_bio_run_down);
388 if (g_collectstats & 1)
389 devstat_start_transaction(pp->stat, &bp->bio_t0);
390 if (g_collectstats & 2)
391 devstat_start_transaction(cp->stat, &bp->bio_t0);
392
393 pp->nstart++;
394 cp->nstart++;
395 first = TAILQ_EMPTY(&g_bio_run_down.bio_queue);
396 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
397 g_bio_run_down.bio_queue_length++;
398 g_bioq_unlock(&g_bio_run_down);
399
400 /* Pass it on down. */
401 if (first)
402 wakeup(&g_wait_down);
403 }
404
405 void
406 g_io_deliver(struct bio *bp, int error)
407 {
408 struct g_consumer *cp;
409 struct g_provider *pp;
410 int first;
411
412 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
413 pp = bp->bio_to;
414 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
415 #ifdef DIAGNOSTIC
416 KASSERT(bp->bio_caller1 == bp->_bio_caller1,
417 ("bio_caller1 used by the provider %s", pp->name));
418 KASSERT(bp->bio_caller2 == bp->_bio_caller2,
419 ("bio_caller2 used by the provider %s", pp->name));
420 KASSERT(bp->bio_cflags == bp->_bio_cflags,
421 ("bio_cflags used by the provider %s", pp->name));
422 #endif
423 cp = bp->bio_from;
424 if (cp == NULL) {
425 bp->bio_error = error;
426 bp->bio_done(bp);
427 return;
428 }
429 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
430 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
431 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
432 KASSERT(bp->bio_completed <= bp->bio_length,
433 ("bio_completed can't be greater than bio_length"));
434
435 g_trace(G_T_BIO,
436 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
437 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
438 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
439
440 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
441 ("Bio already on queue bp=%p", bp));
442
443 /*
444 * XXX: next two doesn't belong here
445 */
446 bp->bio_bcount = bp->bio_length;
447 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
448
449 /*
450 * The statistics collection is lockless, as such, but we
451 * can not update one instance of the statistics from more
452 * than one thread at a time, so grab the lock first.
453 */
454 g_bioq_lock(&g_bio_run_up);
455 if (g_collectstats & 1)
456 devstat_end_transaction_bio(pp->stat, bp);
457 if (g_collectstats & 2)
458 devstat_end_transaction_bio(cp->stat, bp);
459
460 cp->nend++;
461 pp->nend++;
462 if (error != ENOMEM) {
463 bp->bio_error = error;
464 first = TAILQ_EMPTY(&g_bio_run_up.bio_queue);
465 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
466 bp->bio_flags |= BIO_ONQUEUE;
467 g_bio_run_up.bio_queue_length++;
468 g_bioq_unlock(&g_bio_run_up);
469 if (first)
470 wakeup(&g_wait_up);
471 return;
472 }
473 g_bioq_unlock(&g_bio_run_up);
474
475 if (bootverbose)
476 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
477 bp->bio_children = 0;
478 bp->bio_inbed = 0;
479 g_io_request(bp, cp);
480 pace++;
481 return;
482 }
483
484 void
485 g_io_schedule_down(struct thread *tp __unused)
486 {
487 struct bio *bp;
488 off_t excess;
489 int error;
490
491 for(;;) {
492 g_bioq_lock(&g_bio_run_down);
493 bp = g_bioq_first(&g_bio_run_down);
494 if (bp == NULL) {
495 CTR0(KTR_GEOM, "g_down going to sleep");
496 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
497 PRIBIO | PDROP, "-", 0);
498 continue;
499 }
500 CTR0(KTR_GEOM, "g_down has work to do");
501 g_bioq_unlock(&g_bio_run_down);
502 if (pace > 0) {
503 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
504 pause("g_down", hz/10);
505 pace--;
506 }
507 error = g_io_check(bp);
508 if (error) {
509 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
510 "%s returned %d", bp, bp->bio_to->name, error);
511 g_io_deliver(bp, error);
512 continue;
513 }
514 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
515 bp->bio_to->name);
516 switch (bp->bio_cmd) {
517 case BIO_READ:
518 case BIO_WRITE:
519 case BIO_DELETE:
520 /* Truncate requests to the end of providers media. */
521 /*
522 * XXX: What if we truncate because of offset being
523 * bad, not length?
524 */
525 excess = bp->bio_offset + bp->bio_length;
526 if (excess > bp->bio_to->mediasize) {
527 excess -= bp->bio_to->mediasize;
528 bp->bio_length -= excess;
529 if (excess > 0)
530 CTR3(KTR_GEOM, "g_down truncated bio "
531 "%p provider %s by %d", bp,
532 bp->bio_to->name, excess);
533 }
534 /* Deliver zero length transfers right here. */
535 if (bp->bio_length == 0) {
536 g_io_deliver(bp, 0);
537 CTR2(KTR_GEOM, "g_down terminated 0-length "
538 "bp %p provider %s", bp, bp->bio_to->name);
539 continue;
540 }
541 break;
542 default:
543 break;
544 }
545 THREAD_NO_SLEEPING();
546 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
547 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
548 bp->bio_length);
549 bp->bio_to->geom->start(bp);
550 THREAD_SLEEPING_OK();
551 }
552 }
553
554 void
555 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
556 {
557 bp->bio_task = func;
558 bp->bio_task_arg = arg;
559 /*
560 * The taskqueue is actually just a second queue off the "up"
561 * queue, so we use the same lock.
562 */
563 g_bioq_lock(&g_bio_run_up);
564 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
565 ("Bio already on queue bp=%p target taskq", bp));
566 bp->bio_flags |= BIO_ONQUEUE;
567 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
568 g_bio_run_task.bio_queue_length++;
569 wakeup(&g_wait_up);
570 g_bioq_unlock(&g_bio_run_up);
571 }
572
573
574 void
575 g_io_schedule_up(struct thread *tp __unused)
576 {
577 struct bio *bp;
578 for(;;) {
579 g_bioq_lock(&g_bio_run_up);
580 bp = g_bioq_first(&g_bio_run_task);
581 if (bp != NULL) {
582 g_bioq_unlock(&g_bio_run_up);
583 THREAD_NO_SLEEPING();
584 CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
585 bp->bio_task(bp->bio_task_arg);
586 THREAD_SLEEPING_OK();
587 continue;
588 }
589 bp = g_bioq_first(&g_bio_run_up);
590 if (bp != NULL) {
591 g_bioq_unlock(&g_bio_run_up);
592 THREAD_NO_SLEEPING();
593 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
594 "%ld len %ld", bp, bp->bio_to->name,
595 bp->bio_offset, bp->bio_length);
596 biodone(bp);
597 THREAD_SLEEPING_OK();
598 continue;
599 }
600 CTR0(KTR_GEOM, "g_up going to sleep");
601 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
602 PRIBIO | PDROP, "-", 0);
603 }
604 }
605
606 void *
607 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
608 {
609 struct bio *bp;
610 void *ptr;
611 int errorc;
612
613 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
614 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
615 (intmax_t)length));
616
617 bp = g_alloc_bio();
618 bp->bio_cmd = BIO_READ;
619 bp->bio_done = NULL;
620 bp->bio_offset = offset;
621 bp->bio_length = length;
622 ptr = g_malloc(length, M_WAITOK);
623 bp->bio_data = ptr;
624 g_io_request(bp, cp);
625 errorc = biowait(bp, "gread");
626 if (error != NULL)
627 *error = errorc;
628 g_destroy_bio(bp);
629 if (errorc) {
630 g_free(ptr);
631 ptr = NULL;
632 }
633 return (ptr);
634 }
635
636 int
637 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
638 {
639 struct bio *bp;
640 int error;
641
642 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
643 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
644 (intmax_t)length));
645
646 bp = g_alloc_bio();
647 bp->bio_cmd = BIO_WRITE;
648 bp->bio_done = NULL;
649 bp->bio_offset = offset;
650 bp->bio_length = length;
651 bp->bio_data = ptr;
652 g_io_request(bp, cp);
653 error = biowait(bp, "gwrite");
654 g_destroy_bio(bp);
655 return (error);
656 }
657
658 int
659 g_delete_data(struct g_consumer *cp, off_t offset, off_t length)
660 {
661 struct bio *bp;
662 int error;
663
664 KASSERT(length > 0 && length >= cp->provider->sectorsize,
665 ("g_delete_data(): invalid length %jd", (intmax_t)length));
666
667 bp = g_alloc_bio();
668 bp->bio_cmd = BIO_DELETE;
669 bp->bio_done = NULL;
670 bp->bio_offset = offset;
671 bp->bio_length = length;
672 bp->bio_data = NULL;
673 g_io_request(bp, cp);
674 error = biowait(bp, "gdelete");
675 g_destroy_bio(bp);
676 return (error);
677 }
678
679 void
680 g_print_bio(struct bio *bp)
681 {
682 const char *pname, *cmd = NULL;
683
684 if (bp->bio_to != NULL)
685 pname = bp->bio_to->name;
686 else
687 pname = "[unknown]";
688
689 switch (bp->bio_cmd) {
690 case BIO_GETATTR:
691 cmd = "GETATTR";
692 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
693 return;
694 case BIO_FLUSH:
695 cmd = "FLUSH";
696 printf("%s[%s]", pname, cmd);
697 return;
698 case BIO_READ:
699 cmd = "READ";
700 case BIO_WRITE:
701 if (cmd == NULL)
702 cmd = "WRITE";
703 case BIO_DELETE:
704 if (cmd == NULL)
705 cmd = "DELETE";
706 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
707 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
708 return;
709 default:
710 cmd = "UNKNOWN";
711 printf("%s[%s()]", pname, cmd);
712 return;
713 }
714 /* NOTREACHED */
715 }
Cache object: 1e5b977cc548897b82c2ca240f9e37ca
|