FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 #include <sys/ktr.h>
45 #include <sys/proc.h>
46
47 #include <sys/errno.h>
48 #include <geom/geom.h>
49 #include <geom/geom_int.h>
50 #include <sys/devicestat.h>
51
52 #include <vm/uma.h>
53
54 static struct g_bioq g_bio_run_down;
55 static struct g_bioq g_bio_run_up;
56 static struct g_bioq g_bio_run_task;
57
58 static u_int pace;
59 static uma_zone_t biozone;
60
61 #include <machine/atomic.h>
62
63 static void
64 g_bioq_lock(struct g_bioq *bq)
65 {
66
67 mtx_lock(&bq->bio_queue_lock);
68 }
69
70 static void
71 g_bioq_unlock(struct g_bioq *bq)
72 {
73
74 mtx_unlock(&bq->bio_queue_lock);
75 }
76
77 #if 0
78 static void
79 g_bioq_destroy(struct g_bioq *bq)
80 {
81
82 mtx_destroy(&bq->bio_queue_lock);
83 }
84 #endif
85
86 static void
87 g_bioq_init(struct g_bioq *bq)
88 {
89
90 TAILQ_INIT(&bq->bio_queue);
91 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
92 }
93
94 static struct bio *
95 g_bioq_first(struct g_bioq *bq)
96 {
97 struct bio *bp;
98
99 bp = TAILQ_FIRST(&bq->bio_queue);
100 if (bp != NULL) {
101 KASSERT((bp->bio_flags & BIO_ONQUEUE),
102 ("Bio not on queue bp=%p target %p", bp, bq));
103 bp->bio_flags &= ~BIO_ONQUEUE;
104 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
105 bq->bio_queue_length--;
106 }
107 return (bp);
108 }
109
110 struct bio *
111 g_new_bio(void)
112 {
113 struct bio *bp;
114
115 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
116 return (bp);
117 }
118
119 struct bio *
120 g_alloc_bio(void)
121 {
122 struct bio *bp;
123
124 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
125 return (bp);
126 }
127
128 void
129 g_destroy_bio(struct bio *bp)
130 {
131
132 uma_zfree(biozone, bp);
133 }
134
135 struct bio *
136 g_clone_bio(struct bio *bp)
137 {
138 struct bio *bp2;
139
140 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
141 if (bp2 != NULL) {
142 bp2->bio_parent = bp;
143 bp2->bio_cmd = bp->bio_cmd;
144 bp2->bio_length = bp->bio_length;
145 bp2->bio_offset = bp->bio_offset;
146 bp2->bio_data = bp->bio_data;
147 bp2->bio_attribute = bp->bio_attribute;
148 bp->bio_children++;
149 }
150 return(bp2);
151 }
152
153 struct bio *
154 g_duplicate_bio(struct bio *bp)
155 {
156 struct bio *bp2;
157
158 bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO);
159 bp2->bio_parent = bp;
160 bp2->bio_cmd = bp->bio_cmd;
161 bp2->bio_length = bp->bio_length;
162 bp2->bio_offset = bp->bio_offset;
163 bp2->bio_data = bp->bio_data;
164 bp2->bio_attribute = bp->bio_attribute;
165 bp->bio_children++;
166 return(bp2);
167 }
168
169 void
170 g_io_init()
171 {
172
173 g_bioq_init(&g_bio_run_down);
174 g_bioq_init(&g_bio_run_up);
175 g_bioq_init(&g_bio_run_task);
176 biozone = uma_zcreate("g_bio", sizeof (struct bio),
177 NULL, NULL,
178 NULL, NULL,
179 0, 0);
180 }
181
182 int
183 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
184 {
185 struct bio *bp;
186 int error;
187
188 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
189 bp = g_alloc_bio();
190 bp->bio_cmd = BIO_GETATTR;
191 bp->bio_done = NULL;
192 bp->bio_attribute = attr;
193 bp->bio_length = *len;
194 bp->bio_data = ptr;
195 g_io_request(bp, cp);
196 error = biowait(bp, "ggetattr");
197 *len = bp->bio_completed;
198 g_destroy_bio(bp);
199 return (error);
200 }
201
202 static int
203 g_io_check(struct bio *bp)
204 {
205 struct g_consumer *cp;
206 struct g_provider *pp;
207
208 cp = bp->bio_from;
209 pp = bp->bio_to;
210
211 /* Fail if access counters dont allow the operation */
212 switch(bp->bio_cmd) {
213 case BIO_READ:
214 case BIO_GETATTR:
215 if (cp->acr == 0)
216 return (EPERM);
217 break;
218 case BIO_WRITE:
219 case BIO_DELETE:
220 if (cp->acw == 0)
221 return (EPERM);
222 break;
223 default:
224 return (EPERM);
225 }
226 /* if provider is marked for error, don't disturb. */
227 if (pp->error)
228 return (pp->error);
229
230 switch(bp->bio_cmd) {
231 case BIO_READ:
232 case BIO_WRITE:
233 case BIO_DELETE:
234 /* Zero sectorsize is a probably lack of media */
235 if (pp->sectorsize == 0)
236 return (ENXIO);
237 /* Reject I/O not on sector boundary */
238 if (bp->bio_offset % pp->sectorsize)
239 return (EINVAL);
240 /* Reject I/O not integral sector long */
241 if (bp->bio_length % pp->sectorsize)
242 return (EINVAL);
243 /* Reject requests before or past the end of media. */
244 if (bp->bio_offset < 0)
245 return (EIO);
246 if (bp->bio_offset > pp->mediasize)
247 return (EIO);
248 break;
249 default:
250 break;
251 }
252 return (0);
253 }
254
255 void
256 g_io_request(struct bio *bp, struct g_consumer *cp)
257 {
258 struct g_provider *pp;
259
260 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
261 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
262 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
263 pp = cp->provider;
264 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
265
266 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
267 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
268 ("wrong offset %jd for sectorsize %u",
269 bp->bio_offset, cp->provider->sectorsize));
270 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
271 ("wrong length %jd for sectorsize %u",
272 bp->bio_length, cp->provider->sectorsize));
273 }
274
275 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
276 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
277
278 bp->bio_from = cp;
279 bp->bio_to = pp;
280 bp->bio_error = 0;
281 bp->bio_completed = 0;
282
283 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
284 ("Bio already on queue bp=%p", bp));
285 bp->bio_flags |= BIO_ONQUEUE;
286
287 binuptime(&bp->bio_t0);
288
289 /*
290 * The statistics collection is lockless, as such, but we
291 * can not update one instance of the statistics from more
292 * than one thread at a time, so grab the lock first.
293 */
294 g_bioq_lock(&g_bio_run_down);
295 if (g_collectstats & 1)
296 devstat_start_transaction(pp->stat, &bp->bio_t0);
297 if (g_collectstats & 2)
298 devstat_start_transaction(cp->stat, &bp->bio_t0);
299
300 pp->nstart++;
301 cp->nstart++;
302 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
303 g_bio_run_down.bio_queue_length++;
304 g_bioq_unlock(&g_bio_run_down);
305
306 /* Pass it on down. */
307 wakeup(&g_wait_down);
308 }
309
310 void
311 g_io_deliver(struct bio *bp, int error)
312 {
313 struct g_consumer *cp;
314 struct g_provider *pp;
315
316 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
317 pp = bp->bio_to;
318 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
319 cp = bp->bio_from;
320 if (cp == NULL) {
321 bp->bio_error = error;
322 bp->bio_done(bp);
323 return;
324 }
325 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
326 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
327 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
328 KASSERT(bp->bio_completed <= bp->bio_length,
329 ("bio_completed can't be greater than bio_length"));
330
331 g_trace(G_T_BIO,
332 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
333 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
334 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
335
336 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
337 ("Bio already on queue bp=%p", bp));
338
339 /*
340 * XXX: next two doesn't belong here
341 */
342 bp->bio_bcount = bp->bio_length;
343 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
344
345 /*
346 * The statistics collection is lockless, as such, but we
347 * can not update one instance of the statistics from more
348 * than one thread at a time, so grab the lock first.
349 */
350 g_bioq_lock(&g_bio_run_up);
351 if (g_collectstats & 1)
352 devstat_end_transaction_bio(pp->stat, bp);
353 if (g_collectstats & 2)
354 devstat_end_transaction_bio(cp->stat, bp);
355
356 cp->nend++;
357 pp->nend++;
358 if (error != ENOMEM) {
359 bp->bio_error = error;
360 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
361 bp->bio_flags |= BIO_ONQUEUE;
362 g_bio_run_up.bio_queue_length++;
363 g_bioq_unlock(&g_bio_run_up);
364 wakeup(&g_wait_up);
365 return;
366 }
367 g_bioq_unlock(&g_bio_run_up);
368
369 if (bootverbose)
370 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
371 bp->bio_children = 0;
372 bp->bio_inbed = 0;
373 g_io_request(bp, cp);
374 pace++;
375 return;
376 }
377
378 void
379 g_io_schedule_down(struct thread *tp __unused)
380 {
381 struct bio *bp;
382 off_t excess;
383 int error;
384
385 for(;;) {
386 g_bioq_lock(&g_bio_run_down);
387 bp = g_bioq_first(&g_bio_run_down);
388 if (bp == NULL) {
389 CTR0(KTR_GEOM, "g_down going to sleep");
390 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
391 PRIBIO | PDROP, "-", hz/10);
392 continue;
393 }
394 CTR0(KTR_GEOM, "g_down has work to do");
395 g_bioq_unlock(&g_bio_run_down);
396 if (pace > 0) {
397 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
398 msleep(&error, NULL, PRIBIO, "g_down", hz/10);
399 pace--;
400 }
401 error = g_io_check(bp);
402 if (error) {
403 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
404 "%s returned %d", bp, bp->bio_to->name, error);
405 g_io_deliver(bp, error);
406 continue;
407 }
408 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
409 bp->bio_to->name);
410 switch (bp->bio_cmd) {
411 case BIO_READ:
412 case BIO_WRITE:
413 case BIO_DELETE:
414 /* Truncate requests to the end of providers media. */
415 /*
416 * XXX: What if we truncate because of offset being
417 * bad, not length?
418 */
419 excess = bp->bio_offset + bp->bio_length;
420 if (excess > bp->bio_to->mediasize) {
421 excess -= bp->bio_to->mediasize;
422 bp->bio_length -= excess;
423 if (excess > 0)
424 CTR3(KTR_GEOM, "g_down truncated bio "
425 "%p provider %s by %d", bp,
426 bp->bio_to->name, excess);
427 }
428 /* Deliver zero length transfers right here. */
429 if (bp->bio_length == 0) {
430 g_io_deliver(bp, 0);
431 CTR2(KTR_GEOM, "g_down terminated 0-length "
432 "bp %p provider %s", bp, bp->bio_to->name);
433 continue;
434 }
435 break;
436 default:
437 break;
438 }
439 THREAD_NO_SLEEPING();
440 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
441 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
442 bp->bio_length);
443 bp->bio_to->geom->start(bp);
444 THREAD_SLEEPING_OK();
445 }
446 }
447
448 void
449 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
450 {
451 bp->bio_task = func;
452 bp->bio_task_arg = arg;
453 /*
454 * The taskqueue is actually just a second queue off the "up"
455 * queue, so we use the same lock.
456 */
457 g_bioq_lock(&g_bio_run_up);
458 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
459 ("Bio already on queue bp=%p target taskq", bp));
460 bp->bio_flags |= BIO_ONQUEUE;
461 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
462 g_bio_run_task.bio_queue_length++;
463 wakeup(&g_wait_up);
464 g_bioq_unlock(&g_bio_run_up);
465 }
466
467
468 void
469 g_io_schedule_up(struct thread *tp __unused)
470 {
471 struct bio *bp;
472 for(;;) {
473 g_bioq_lock(&g_bio_run_up);
474 bp = g_bioq_first(&g_bio_run_task);
475 if (bp != NULL) {
476 g_bioq_unlock(&g_bio_run_up);
477 THREAD_NO_SLEEPING();
478 CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
479 bp->bio_task(bp->bio_task_arg);
480 THREAD_SLEEPING_OK();
481 continue;
482 }
483 bp = g_bioq_first(&g_bio_run_up);
484 if (bp != NULL) {
485 g_bioq_unlock(&g_bio_run_up);
486 THREAD_NO_SLEEPING();
487 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
488 "%ld len %ld", bp, bp->bio_to->name,
489 bp->bio_offset, bp->bio_length);
490 biodone(bp);
491 THREAD_SLEEPING_OK();
492 continue;
493 }
494 CTR0(KTR_GEOM, "g_up going to sleep");
495 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
496 PRIBIO | PDROP, "-", hz/10);
497 }
498 }
499
500 void *
501 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
502 {
503 struct bio *bp;
504 void *ptr;
505 int errorc;
506
507 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
508 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
509 (intmax_t)length));
510
511 bp = g_alloc_bio();
512 bp->bio_cmd = BIO_READ;
513 bp->bio_done = NULL;
514 bp->bio_offset = offset;
515 bp->bio_length = length;
516 ptr = g_malloc(length, M_WAITOK);
517 bp->bio_data = ptr;
518 g_io_request(bp, cp);
519 errorc = biowait(bp, "gread");
520 if (error != NULL)
521 *error = errorc;
522 g_destroy_bio(bp);
523 if (errorc) {
524 g_free(ptr);
525 ptr = NULL;
526 }
527 return (ptr);
528 }
529
530 int
531 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
532 {
533 struct bio *bp;
534 int error;
535
536 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
537 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
538 (intmax_t)length));
539
540 bp = g_alloc_bio();
541 bp->bio_cmd = BIO_WRITE;
542 bp->bio_done = NULL;
543 bp->bio_offset = offset;
544 bp->bio_length = length;
545 bp->bio_data = ptr;
546 g_io_request(bp, cp);
547 error = biowait(bp, "gwrite");
548 g_destroy_bio(bp);
549 return (error);
550 }
551
552 void
553 g_print_bio(struct bio *bp)
554 {
555 const char *pname, *cmd = NULL;
556
557 if (bp->bio_to != NULL)
558 pname = bp->bio_to->name;
559 else
560 pname = "[unknown]";
561
562 switch (bp->bio_cmd) {
563 case BIO_GETATTR:
564 cmd = "GETATTR";
565 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
566 return;
567 case BIO_READ:
568 cmd = "READ";
569 case BIO_WRITE:
570 if (cmd == NULL)
571 cmd = "WRITE";
572 case BIO_DELETE:
573 if (cmd == NULL)
574 cmd = "DELETE";
575 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
576 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
577 return;
578 default:
579 cmd = "UNKNOWN";
580 printf("%s[%s()]", pname, cmd);
581 return;
582 }
583 /* NOTREACHED */
584 }
Cache object: b0ed4bf760e1edc979d3e75d878c3c70
|