FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/6.1/sys/geom/geom_io.c 158179 2006-04-30 16:44:43Z cvs2svn $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44 #include <sys/ktr.h>
45 #include <sys/proc.h>
46
47 #include <sys/errno.h>
48 #include <geom/geom.h>
49 #include <geom/geom_int.h>
50 #include <sys/devicestat.h>
51
52 #include <vm/uma.h>
53
54 static struct g_bioq g_bio_run_down;
55 static struct g_bioq g_bio_run_up;
56 static struct g_bioq g_bio_run_task;
57
58 static u_int pace;
59 static uma_zone_t biozone;
60
61 #include <machine/atomic.h>
62
63 static void
64 g_bioq_lock(struct g_bioq *bq)
65 {
66
67 mtx_lock(&bq->bio_queue_lock);
68 }
69
70 static void
71 g_bioq_unlock(struct g_bioq *bq)
72 {
73
74 mtx_unlock(&bq->bio_queue_lock);
75 }
76
77 #if 0
78 static void
79 g_bioq_destroy(struct g_bioq *bq)
80 {
81
82 mtx_destroy(&bq->bio_queue_lock);
83 }
84 #endif
85
86 static void
87 g_bioq_init(struct g_bioq *bq)
88 {
89
90 TAILQ_INIT(&bq->bio_queue);
91 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
92 }
93
94 static struct bio *
95 g_bioq_first(struct g_bioq *bq)
96 {
97 struct bio *bp;
98
99 bp = TAILQ_FIRST(&bq->bio_queue);
100 if (bp != NULL) {
101 KASSERT((bp->bio_flags & BIO_ONQUEUE),
102 ("Bio not on queue bp=%p target %p", bp, bq));
103 bp->bio_flags &= ~BIO_ONQUEUE;
104 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
105 bq->bio_queue_length--;
106 }
107 return (bp);
108 }
109
110 struct bio *
111 g_new_bio(void)
112 {
113 struct bio *bp;
114
115 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
116 return (bp);
117 }
118
119 struct bio *
120 g_alloc_bio(void)
121 {
122 struct bio *bp;
123
124 bp = uma_zalloc(biozone, M_WAITOK | M_ZERO);
125 return (bp);
126 }
127
128 void
129 g_destroy_bio(struct bio *bp)
130 {
131
132 uma_zfree(biozone, bp);
133 }
134
135 struct bio *
136 g_clone_bio(struct bio *bp)
137 {
138 struct bio *bp2;
139
140 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
141 if (bp2 != NULL) {
142 bp2->bio_parent = bp;
143 bp2->bio_cmd = bp->bio_cmd;
144 bp2->bio_length = bp->bio_length;
145 bp2->bio_offset = bp->bio_offset;
146 bp2->bio_data = bp->bio_data;
147 bp2->bio_attribute = bp->bio_attribute;
148 bp->bio_children++;
149 }
150 return(bp2);
151 }
152
153 void
154 g_io_init()
155 {
156
157 g_bioq_init(&g_bio_run_down);
158 g_bioq_init(&g_bio_run_up);
159 g_bioq_init(&g_bio_run_task);
160 biozone = uma_zcreate("g_bio", sizeof (struct bio),
161 NULL, NULL,
162 NULL, NULL,
163 0, 0);
164 }
165
166 int
167 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
168 {
169 struct bio *bp;
170 int error;
171
172 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
173 bp = g_alloc_bio();
174 bp->bio_cmd = BIO_GETATTR;
175 bp->bio_done = NULL;
176 bp->bio_attribute = attr;
177 bp->bio_length = *len;
178 bp->bio_data = ptr;
179 g_io_request(bp, cp);
180 error = biowait(bp, "ggetattr");
181 *len = bp->bio_completed;
182 g_destroy_bio(bp);
183 return (error);
184 }
185
186 static int
187 g_io_check(struct bio *bp)
188 {
189 struct g_consumer *cp;
190 struct g_provider *pp;
191
192 cp = bp->bio_from;
193 pp = bp->bio_to;
194
195 /* Fail if access counters dont allow the operation */
196 switch(bp->bio_cmd) {
197 case BIO_READ:
198 case BIO_GETATTR:
199 if (cp->acr == 0)
200 return (EPERM);
201 break;
202 case BIO_WRITE:
203 case BIO_DELETE:
204 if (cp->acw == 0)
205 return (EPERM);
206 break;
207 default:
208 return (EPERM);
209 }
210 /* if provider is marked for error, don't disturb. */
211 if (pp->error)
212 return (pp->error);
213
214 switch(bp->bio_cmd) {
215 case BIO_READ:
216 case BIO_WRITE:
217 case BIO_DELETE:
218 /* Zero sectorsize is a probably lack of media */
219 if (pp->sectorsize == 0)
220 return (ENXIO);
221 /* Reject I/O not on sector boundary */
222 if (bp->bio_offset % pp->sectorsize)
223 return (EINVAL);
224 /* Reject I/O not integral sector long */
225 if (bp->bio_length % pp->sectorsize)
226 return (EINVAL);
227 /* Reject requests before or past the end of media. */
228 if (bp->bio_offset < 0)
229 return (EIO);
230 if (bp->bio_offset > pp->mediasize)
231 return (EIO);
232 break;
233 default:
234 break;
235 }
236 return (0);
237 }
238
239 void
240 g_io_request(struct bio *bp, struct g_consumer *cp)
241 {
242 struct g_provider *pp;
243
244 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
245 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
246 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
247 pp = cp->provider;
248 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
249
250 if (bp->bio_cmd & (BIO_READ|BIO_WRITE|BIO_DELETE)) {
251 KASSERT(bp->bio_offset % cp->provider->sectorsize == 0,
252 ("wrong offset %jd for sectorsize %u",
253 bp->bio_offset, cp->provider->sectorsize));
254 KASSERT(bp->bio_length % cp->provider->sectorsize == 0,
255 ("wrong length %jd for sectorsize %u",
256 bp->bio_length, cp->provider->sectorsize));
257 }
258
259 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
260 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
261
262 bp->bio_from = cp;
263 bp->bio_to = pp;
264 bp->bio_error = 0;
265 bp->bio_completed = 0;
266
267 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
268 ("Bio already on queue bp=%p", bp));
269 bp->bio_flags |= BIO_ONQUEUE;
270
271 binuptime(&bp->bio_t0);
272
273 /*
274 * The statistics collection is lockless, as such, but we
275 * can not update one instance of the statistics from more
276 * than one thread at a time, so grab the lock first.
277 */
278 g_bioq_lock(&g_bio_run_down);
279 if (g_collectstats & 1)
280 devstat_start_transaction(pp->stat, &bp->bio_t0);
281 if (g_collectstats & 2)
282 devstat_start_transaction(cp->stat, &bp->bio_t0);
283
284 pp->nstart++;
285 cp->nstart++;
286 TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue);
287 g_bio_run_down.bio_queue_length++;
288 g_bioq_unlock(&g_bio_run_down);
289
290 /* Pass it on down. */
291 wakeup(&g_wait_down);
292 }
293
294 void
295 g_io_deliver(struct bio *bp, int error)
296 {
297 struct g_consumer *cp;
298 struct g_provider *pp;
299
300 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
301 pp = bp->bio_to;
302 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
303 cp = bp->bio_from;
304 if (cp == NULL) {
305 bp->bio_error = error;
306 bp->bio_done(bp);
307 return;
308 }
309 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
310 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
311 KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0"));
312 KASSERT(bp->bio_completed <= bp->bio_length,
313 ("bio_completed can't be greater than bio_length"));
314
315 g_trace(G_T_BIO,
316 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
317 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
318 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
319
320 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
321 ("Bio already on queue bp=%p", bp));
322
323 /*
324 * XXX: next two doesn't belong here
325 */
326 bp->bio_bcount = bp->bio_length;
327 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
328
329 /*
330 * The statistics collection is lockless, as such, but we
331 * can not update one instance of the statistics from more
332 * than one thread at a time, so grab the lock first.
333 */
334 g_bioq_lock(&g_bio_run_up);
335 if (g_collectstats & 1)
336 devstat_end_transaction_bio(pp->stat, bp);
337 if (g_collectstats & 2)
338 devstat_end_transaction_bio(cp->stat, bp);
339
340 cp->nend++;
341 pp->nend++;
342 if (error != ENOMEM) {
343 bp->bio_error = error;
344 TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue);
345 bp->bio_flags |= BIO_ONQUEUE;
346 g_bio_run_up.bio_queue_length++;
347 g_bioq_unlock(&g_bio_run_up);
348 wakeup(&g_wait_up);
349 return;
350 }
351 g_bioq_unlock(&g_bio_run_up);
352
353 if (bootverbose)
354 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
355 bp->bio_children = 0;
356 bp->bio_inbed = 0;
357 g_io_request(bp, cp);
358 pace++;
359 return;
360 }
361
362 void
363 g_io_schedule_down(struct thread *tp __unused)
364 {
365 struct bio *bp;
366 off_t excess;
367 int error;
368
369 for(;;) {
370 g_bioq_lock(&g_bio_run_down);
371 bp = g_bioq_first(&g_bio_run_down);
372 if (bp == NULL) {
373 CTR0(KTR_GEOM, "g_down going to sleep");
374 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
375 PRIBIO | PDROP, "-", hz/10);
376 continue;
377 }
378 CTR0(KTR_GEOM, "g_down has work to do");
379 g_bioq_unlock(&g_bio_run_down);
380 if (pace > 0) {
381 CTR1(KTR_GEOM, "g_down pacing self (pace %d)", pace);
382 msleep(&error, NULL, PRIBIO, "g_down", hz/10);
383 pace--;
384 }
385 error = g_io_check(bp);
386 if (error) {
387 CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider "
388 "%s returned %d", bp, bp->bio_to->name, error);
389 g_io_deliver(bp, error);
390 continue;
391 }
392 CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp,
393 bp->bio_to->name);
394 switch (bp->bio_cmd) {
395 case BIO_READ:
396 case BIO_WRITE:
397 case BIO_DELETE:
398 /* Truncate requests to the end of providers media. */
399 /*
400 * XXX: What if we truncate because of offset being
401 * bad, not length?
402 */
403 excess = bp->bio_offset + bp->bio_length;
404 if (excess > bp->bio_to->mediasize) {
405 excess -= bp->bio_to->mediasize;
406 bp->bio_length -= excess;
407 if (excess > 0)
408 CTR3(KTR_GEOM, "g_down truncated bio "
409 "%p provider %s by %d", bp,
410 bp->bio_to->name, excess);
411 }
412 /* Deliver zero length transfers right here. */
413 if (bp->bio_length == 0) {
414 g_io_deliver(bp, 0);
415 CTR2(KTR_GEOM, "g_down terminated 0-length "
416 "bp %p provider %s", bp, bp->bio_to->name);
417 continue;
418 }
419 break;
420 default:
421 break;
422 }
423 THREAD_NO_SLEEPING();
424 CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld "
425 "len %ld", bp, bp->bio_to->name, bp->bio_offset,
426 bp->bio_length);
427 bp->bio_to->geom->start(bp);
428 THREAD_SLEEPING_OK();
429 }
430 }
431
432 void
433 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
434 {
435 bp->bio_task = func;
436 bp->bio_task_arg = arg;
437 /*
438 * The taskqueue is actually just a second queue off the "up"
439 * queue, so we use the same lock.
440 */
441 g_bioq_lock(&g_bio_run_up);
442 KASSERT(!(bp->bio_flags & BIO_ONQUEUE),
443 ("Bio already on queue bp=%p target taskq", bp));
444 bp->bio_flags |= BIO_ONQUEUE;
445 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
446 g_bio_run_task.bio_queue_length++;
447 wakeup(&g_wait_up);
448 g_bioq_unlock(&g_bio_run_up);
449 }
450
451
452 void
453 g_io_schedule_up(struct thread *tp __unused)
454 {
455 struct bio *bp;
456 for(;;) {
457 g_bioq_lock(&g_bio_run_up);
458 bp = g_bioq_first(&g_bio_run_task);
459 if (bp != NULL) {
460 g_bioq_unlock(&g_bio_run_up);
461 THREAD_NO_SLEEPING();
462 CTR1(KTR_GEOM, "g_up processing task bp %p", bp);
463 bp->bio_task(bp->bio_task_arg);
464 THREAD_SLEEPING_OK();
465 continue;
466 }
467 bp = g_bioq_first(&g_bio_run_up);
468 if (bp != NULL) {
469 g_bioq_unlock(&g_bio_run_up);
470 THREAD_NO_SLEEPING();
471 CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off "
472 "%ld len %ld", bp, bp->bio_to->name,
473 bp->bio_offset, bp->bio_length);
474 biodone(bp);
475 THREAD_SLEEPING_OK();
476 continue;
477 }
478 CTR0(KTR_GEOM, "g_up going to sleep");
479 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
480 PRIBIO | PDROP, "-", hz/10);
481 }
482 }
483
484 void *
485 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
486 {
487 struct bio *bp;
488 void *ptr;
489 int errorc;
490
491 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
492 length <= MAXPHYS, ("g_read_data(): invalid length %jd",
493 (intmax_t)length));
494
495 bp = g_alloc_bio();
496 bp->bio_cmd = BIO_READ;
497 bp->bio_done = NULL;
498 bp->bio_offset = offset;
499 bp->bio_length = length;
500 ptr = g_malloc(length, M_WAITOK);
501 bp->bio_data = ptr;
502 g_io_request(bp, cp);
503 errorc = biowait(bp, "gread");
504 if (error != NULL)
505 *error = errorc;
506 g_destroy_bio(bp);
507 if (errorc) {
508 g_free(ptr);
509 ptr = NULL;
510 }
511 return (ptr);
512 }
513
514 int
515 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
516 {
517 struct bio *bp;
518 int error;
519
520 KASSERT(length > 0 && length >= cp->provider->sectorsize &&
521 length <= MAXPHYS, ("g_write_data(): invalid length %jd",
522 (intmax_t)length));
523
524 bp = g_alloc_bio();
525 bp->bio_cmd = BIO_WRITE;
526 bp->bio_done = NULL;
527 bp->bio_offset = offset;
528 bp->bio_length = length;
529 bp->bio_data = ptr;
530 g_io_request(bp, cp);
531 error = biowait(bp, "gwrite");
532 g_destroy_bio(bp);
533 return (error);
534 }
535
536 void
537 g_print_bio(struct bio *bp)
538 {
539 const char *pname, *cmd = NULL;
540
541 if (bp->bio_to != NULL)
542 pname = bp->bio_to->name;
543 else
544 pname = "[unknown]";
545
546 switch (bp->bio_cmd) {
547 case BIO_GETATTR:
548 cmd = "GETATTR";
549 printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute);
550 return;
551 case BIO_READ:
552 cmd = "READ";
553 case BIO_WRITE:
554 if (cmd == NULL)
555 cmd = "WRITE";
556 case BIO_DELETE:
557 if (cmd == NULL)
558 cmd = "DELETE";
559 printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd,
560 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
561 return;
562 default:
563 cmd = "UNKNOWN";
564 printf("%s[%s()]", pname, cmd);
565 return;
566 }
567 /* NOTREACHED */
568 }
Cache object: 55d68dbee31fbcb4d7979aecd372f0eb
|