FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_io.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/5.2/sys/geom/geom_io.c 125692 2004-02-11 08:31:23Z scottl $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/bio.h>
44
45 #include <sys/errno.h>
46 #include <geom/geom.h>
47 #include <geom/geom_int.h>
48 #include <sys/devicestat.h>
49
50 #include <vm/uma.h>
51
52 static struct g_bioq g_bio_run_down;
53 static struct g_bioq g_bio_run_up;
54 static struct g_bioq g_bio_run_task;
55
56 static u_int pace;
57 static uma_zone_t biozone;
58
59 #include <machine/atomic.h>
60
61 static void
62 g_bioq_lock(struct g_bioq *bq)
63 {
64
65 mtx_lock(&bq->bio_queue_lock);
66 }
67
68 static void
69 g_bioq_unlock(struct g_bioq *bq)
70 {
71
72 mtx_unlock(&bq->bio_queue_lock);
73 }
74
75 #if 0
76 static void
77 g_bioq_destroy(struct g_bioq *bq)
78 {
79
80 mtx_destroy(&bq->bio_queue_lock);
81 }
82 #endif
83
84 static void
85 g_bioq_init(struct g_bioq *bq)
86 {
87
88 TAILQ_INIT(&bq->bio_queue);
89 mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF);
90 }
91
92 static struct bio *
93 g_bioq_first(struct g_bioq *bq)
94 {
95 struct bio *bp;
96
97 bp = TAILQ_FIRST(&bq->bio_queue);
98 if (bp != NULL) {
99 TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
100 bq->bio_queue_length--;
101 }
102 return (bp);
103 }
104
105 static void
106 g_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
107 {
108
109 g_bioq_lock(rq);
110 TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
111 rq->bio_queue_length++;
112 g_bioq_unlock(rq);
113 }
114
115 struct bio *
116 g_new_bio(void)
117 {
118 struct bio *bp;
119
120 bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
121 return (bp);
122 }
123
124 void
125 g_destroy_bio(struct bio *bp)
126 {
127
128 uma_zfree(biozone, bp);
129 }
130
131 struct bio *
132 g_clone_bio(struct bio *bp)
133 {
134 struct bio *bp2;
135
136 bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO);
137 if (bp2 != NULL) {
138 bp2->bio_parent = bp;
139 bp2->bio_cmd = bp->bio_cmd;
140 bp2->bio_length = bp->bio_length;
141 bp2->bio_offset = bp->bio_offset;
142 bp2->bio_data = bp->bio_data;
143 bp2->bio_attribute = bp->bio_attribute;
144 bp->bio_children++;
145 }
146 return(bp2);
147 }
148
149 void
150 g_io_init()
151 {
152
153 g_bioq_init(&g_bio_run_down);
154 g_bioq_init(&g_bio_run_up);
155 g_bioq_init(&g_bio_run_task);
156 biozone = uma_zcreate("g_bio", sizeof (struct bio),
157 NULL, NULL,
158 NULL, NULL,
159 0, 0);
160 }
161
162 int
163 g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr)
164 {
165 struct bio *bp;
166 int error;
167
168 g_trace(G_T_BIO, "bio_getattr(%s)", attr);
169 bp = g_new_bio();
170 bp->bio_cmd = BIO_GETATTR;
171 bp->bio_done = NULL;
172 bp->bio_attribute = attr;
173 bp->bio_length = *len;
174 bp->bio_data = ptr;
175 g_io_request(bp, cp);
176 error = biowait(bp, "ggetattr");
177 *len = bp->bio_completed;
178 g_destroy_bio(bp);
179 return (error);
180 }
181
182 static int
183 g_io_check(struct bio *bp)
184 {
185 struct g_consumer *cp;
186 struct g_provider *pp;
187
188 cp = bp->bio_from;
189 pp = bp->bio_to;
190
191 /* Fail if access counters dont allow the operation */
192 switch(bp->bio_cmd) {
193 case BIO_READ:
194 case BIO_GETATTR:
195 if (cp->acr == 0)
196 return (EPERM);
197 break;
198 case BIO_WRITE:
199 case BIO_DELETE:
200 if (cp->acw == 0)
201 return (EPERM);
202 break;
203 default:
204 return (EPERM);
205 }
206 /* if provider is marked for error, don't disturb. */
207 if (pp->error)
208 return (pp->error);
209
210 switch(bp->bio_cmd) {
211 case BIO_READ:
212 case BIO_WRITE:
213 case BIO_DELETE:
214 /* Zero sectorsize is a probably lack of media */
215 if (pp->sectorsize == 0)
216 return (ENXIO);
217 /* Reject I/O not on sector boundary */
218 if (bp->bio_offset % pp->sectorsize)
219 return (EINVAL);
220 /* Reject I/O not integral sector long */
221 if (bp->bio_length % pp->sectorsize)
222 return (EINVAL);
223 /* Reject requests before or past the end of media. */
224 if (bp->bio_offset < 0)
225 return (EIO);
226 if (bp->bio_offset > pp->mediasize)
227 return (EIO);
228 break;
229 default:
230 break;
231 }
232 return (0);
233 }
234
235 void
236 g_io_request(struct bio *bp, struct g_consumer *cp)
237 {
238 struct g_provider *pp;
239
240 KASSERT(cp != NULL, ("NULL cp in g_io_request"));
241 KASSERT(bp != NULL, ("NULL bp in g_io_request"));
242 KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request"));
243 pp = cp->provider;
244 KASSERT(pp != NULL, ("consumer not attached in g_io_request"));
245
246 bp->bio_from = cp;
247 bp->bio_to = pp;
248 bp->bio_error = 0;
249 bp->bio_completed = 0;
250
251 if (g_collectstats) {
252 devstat_start_transaction_bio(cp->stat, bp);
253 devstat_start_transaction_bio(pp->stat, bp);
254 }
255 cp->nstart++;
256 pp->nstart++;
257
258 /* Pass it on down. */
259 g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
260 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd);
261 g_bioq_enqueue_tail(bp, &g_bio_run_down);
262 wakeup(&g_wait_down);
263 }
264
265 void
266 g_io_deliver(struct bio *bp, int error)
267 {
268 struct g_consumer *cp;
269 struct g_provider *pp;
270
271 KASSERT(bp != NULL, ("NULL bp in g_io_deliver"));
272 pp = bp->bio_to;
273 KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver"));
274 cp = bp->bio_from;
275 if (cp == NULL) {
276 bp->bio_error = error;
277 bp->bio_done(bp);
278 return;
279 }
280 KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver"));
281 KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver"));
282
283 g_trace(G_T_BIO,
284 "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd",
285 bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error,
286 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length);
287
288 bp->bio_bcount = bp->bio_length;
289 if (g_collectstats) {
290 bp->bio_resid = bp->bio_bcount - bp->bio_completed;
291 devstat_end_transaction_bio(cp->stat, bp);
292 devstat_end_transaction_bio(pp->stat, bp);
293 }
294 cp->nend++;
295 pp->nend++;
296
297 if (error == ENOMEM) {
298 if (bootverbose)
299 printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name);
300 g_io_request(bp, cp);
301 pace++;
302 return;
303 }
304 bp->bio_error = error;
305 g_bioq_enqueue_tail(bp, &g_bio_run_up);
306 wakeup(&g_wait_up);
307 }
308
309 void
310 g_io_schedule_down(struct thread *tp __unused)
311 {
312 struct bio *bp;
313 off_t excess;
314 int error;
315 struct mtx mymutex;
316
317 bzero(&mymutex, sizeof mymutex);
318 mtx_init(&mymutex, "g_xdown", NULL, MTX_DEF);
319
320 for(;;) {
321 g_bioq_lock(&g_bio_run_down);
322 bp = g_bioq_first(&g_bio_run_down);
323 if (bp == NULL) {
324 msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock,
325 PRIBIO | PDROP, "-", hz/10);
326 continue;
327 }
328 g_bioq_unlock(&g_bio_run_down);
329 if (pace > 0) {
330 msleep(&error, NULL, PRIBIO, "g_down", hz/10);
331 pace--;
332 }
333 error = g_io_check(bp);
334 if (error) {
335 g_io_deliver(bp, error);
336 continue;
337 }
338 switch (bp->bio_cmd) {
339 case BIO_READ:
340 case BIO_WRITE:
341 case BIO_DELETE:
342 /* Truncate requests to the end of providers media. */
343 excess = bp->bio_offset + bp->bio_length;
344 if (excess > bp->bio_to->mediasize) {
345 excess -= bp->bio_to->mediasize;
346 bp->bio_length -= excess;
347 }
348 /* Deliver zero length transfers right here. */
349 if (bp->bio_length == 0) {
350 g_io_deliver(bp, 0);
351 continue;
352 }
353 break;
354 default:
355 break;
356 }
357 mtx_lock(&mymutex);
358 bp->bio_to->geom->start(bp);
359 mtx_unlock(&mymutex);
360 }
361 }
362
363 void
364 bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg)
365 {
366 bp->bio_task = func;
367 bp->bio_task_arg = arg;
368 /*
369 * The taskqueue is actually just a second queue off the "up"
370 * queue, so we use the same lock.
371 */
372 g_bioq_lock(&g_bio_run_up);
373 TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue);
374 g_bio_run_task.bio_queue_length++;
375 wakeup(&g_wait_up);
376 g_bioq_unlock(&g_bio_run_up);
377 }
378
379
380 void
381 g_io_schedule_up(struct thread *tp __unused)
382 {
383 struct bio *bp;
384 struct mtx mymutex;
385
386 bzero(&mymutex, sizeof mymutex);
387 mtx_init(&mymutex, "g_xup", NULL, MTX_DEF);
388 for(;;) {
389 g_bioq_lock(&g_bio_run_up);
390 bp = g_bioq_first(&g_bio_run_task);
391 if (bp != NULL) {
392 g_bioq_unlock(&g_bio_run_up);
393 mtx_lock(&mymutex);
394 bp->bio_task(bp->bio_task_arg);
395 mtx_unlock(&mymutex);
396 continue;
397 }
398 bp = g_bioq_first(&g_bio_run_up);
399 if (bp != NULL) {
400 g_bioq_unlock(&g_bio_run_up);
401 mtx_lock(&mymutex);
402 biodone(bp);
403 mtx_unlock(&mymutex);
404 continue;
405 }
406 msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock,
407 PRIBIO | PDROP, "-", hz/10);
408 }
409 }
410
411 void *
412 g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
413 {
414 struct bio *bp;
415 void *ptr;
416 int errorc;
417
418 KASSERT(length >= 512 && length <= DFLTPHYS,
419 ("g_read_data(): invalid length %jd", (intmax_t)length));
420
421 bp = g_new_bio();
422 bp->bio_cmd = BIO_READ;
423 bp->bio_done = NULL;
424 bp->bio_offset = offset;
425 bp->bio_length = length;
426 ptr = g_malloc(length, M_WAITOK);
427 bp->bio_data = ptr;
428 g_io_request(bp, cp);
429 errorc = biowait(bp, "gread");
430 if (error != NULL)
431 *error = errorc;
432 g_destroy_bio(bp);
433 if (errorc) {
434 g_free(ptr);
435 ptr = NULL;
436 }
437 return (ptr);
438 }
439
440 int
441 g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length)
442 {
443 struct bio *bp;
444 int error;
445
446 KASSERT(length >= 512 && length <= DFLTPHYS,
447 ("g_write_data(): invalid length %jd", (intmax_t)length));
448
449 bp = g_new_bio();
450 bp->bio_cmd = BIO_WRITE;
451 bp->bio_done = NULL;
452 bp->bio_offset = offset;
453 bp->bio_length = length;
454 bp->bio_data = ptr;
455 g_io_request(bp, cp);
456 error = biowait(bp, "gwrite");
457 g_destroy_bio(bp);
458 return (error);
459 }
Cache object: b4f992893f4cabaf2077ed521d1172b3
|