1 /*-
2 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.1/sys/geom/cache/g_cache.c 223921 2011-07-11 05:22:31Z ae $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 #include <sys/time.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <geom/cache/g_cache.h>
45
46 FEATURE(geom_cache, "GEOM cache module");
47
48 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
49
50 SYSCTL_DECL(_kern_geom);
51 SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0, "GEOM_CACHE stuff");
52 static u_int g_cache_debug = 0;
53 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
54 "Debug level");
55 static u_int g_cache_enable = 1;
56 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
57 "");
58 static u_int g_cache_timeout = 10;
59 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
60 0, "");
61 static u_int g_cache_idletime = 5;
62 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
63 0, "");
64 static u_int g_cache_used_lo = 5;
65 static u_int g_cache_used_hi = 20;
66 static int
67 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
68 {
69 u_int val = *(u_int *)arg1;
70 int error;
71
72 error = sysctl_handle_int(oidp, &val, 0, req);
73 if (error || !req->newptr)
74 return (error);
75 if (val < 0 || val > 100)
76 return (EINVAL);
77 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
78 (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
79 return (EINVAL);
80 *(u_int *)arg1 = val;
81 return (0);
82 }
83 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
84 &g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
85 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
86 &g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
87
88
89 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
90 static g_ctl_destroy_geom_t g_cache_destroy_geom;
91
92 static g_taste_t g_cache_taste;
93 static g_ctl_req_t g_cache_config;
94 static g_dumpconf_t g_cache_dumpconf;
95
96 struct g_class g_cache_class = {
97 .name = G_CACHE_CLASS_NAME,
98 .version = G_VERSION,
99 .ctlreq = g_cache_config,
100 .taste = g_cache_taste,
101 .destroy_geom = g_cache_destroy_geom
102 };
103
104 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift)
105 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift)
106
107
108 static struct g_cache_desc *
109 g_cache_alloc(struct g_cache_softc *sc)
110 {
111 struct g_cache_desc *dp;
112
113 mtx_assert(&sc->sc_mtx, MA_OWNED);
114
115 if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
116 dp = TAILQ_FIRST(&sc->sc_usedlist);
117 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
118 sc->sc_nused--;
119 dp->d_flags = 0;
120 LIST_REMOVE(dp, d_next);
121 return (dp);
122 }
123 if (sc->sc_nent > sc->sc_maxent) {
124 sc->sc_cachefull++;
125 return (NULL);
126 }
127 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
128 if (dp == NULL)
129 return (NULL);
130 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
131 if (dp->d_data == NULL) {
132 free(dp, M_GCACHE);
133 return (NULL);
134 }
135 sc->sc_nent++;
136 return (dp);
137 }
138
139 static void
140 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
141 {
142
143 mtx_assert(&sc->sc_mtx, MA_OWNED);
144
145 uma_zfree(sc->sc_zone, dp->d_data);
146 free(dp, M_GCACHE);
147 sc->sc_nent--;
148 }
149
150 static void
151 g_cache_free_used(struct g_cache_softc *sc)
152 {
153 struct g_cache_desc *dp;
154 u_int n;
155
156 mtx_assert(&sc->sc_mtx, MA_OWNED);
157
158 n = g_cache_used_lo * sc->sc_maxent / 100;
159 while (sc->sc_nused > n) {
160 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
161 dp = TAILQ_FIRST(&sc->sc_usedlist);
162 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
163 sc->sc_nused--;
164 LIST_REMOVE(dp, d_next);
165 g_cache_free(sc, dp);
166 }
167 }
168
169 static void
170 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
171 struct g_cache_desc *dp, int error)
172 {
173 off_t off1, off, len;
174
175 mtx_assert(&sc->sc_mtx, MA_OWNED);
176 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
177 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
178 dp->d_bno, ("wrong entry"));
179
180 off1 = BNO2OFF(dp->d_bno, sc);
181 off = MAX(bp->bio_offset, off1);
182 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
183
184 if (bp->bio_error == 0)
185 bp->bio_error = error;
186 if (bp->bio_error == 0) {
187 bcopy(dp->d_data + (off - off1),
188 bp->bio_data + (off - bp->bio_offset), len);
189 }
190 bp->bio_completed += len;
191 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
192 if (bp->bio_completed == bp->bio_length) {
193 if (bp->bio_error != 0)
194 bp->bio_completed = 0;
195 g_io_deliver(bp, bp->bio_error);
196 }
197
198 if (dp->d_flags & D_FLAG_USED) {
199 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
200 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
201 } else if (OFF2BNO(off + len, sc) > dp->d_bno) {
202 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
203 sc->sc_nused++;
204 dp->d_flags |= D_FLAG_USED;
205 }
206 dp->d_atime = time_uptime;
207 }
208
209 static void
210 g_cache_done(struct bio *bp)
211 {
212 struct g_cache_softc *sc;
213 struct g_cache_desc *dp;
214 struct bio *bp2, *tmpbp;
215
216 sc = bp->bio_from->geom->softc;
217 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
218 dp = G_CACHE_DESC2(bp);
219 mtx_lock(&sc->sc_mtx);
220 bp2 = dp->d_biolist;
221 while (bp2 != NULL) {
222 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
223 tmpbp = G_CACHE_NEXT_BIO2(bp2);
224 g_cache_deliver(sc, bp2, dp, bp->bio_error);
225 bp2 = tmpbp;
226 }
227 dp->d_biolist = NULL;
228 if (dp->d_flags & D_FLAG_INVALID) {
229 sc->sc_invalid--;
230 g_cache_free(sc, dp);
231 } else if (bp->bio_error) {
232 LIST_REMOVE(dp, d_next);
233 if (dp->d_flags & D_FLAG_USED) {
234 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
235 sc->sc_nused--;
236 }
237 g_cache_free(sc, dp);
238 }
239 mtx_unlock(&sc->sc_mtx);
240 g_destroy_bio(bp);
241 }
242
243 static struct g_cache_desc *
244 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
245 {
246 struct g_cache_desc *dp;
247
248 mtx_assert(&sc->sc_mtx, MA_OWNED);
249
250 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
251 if (dp->d_bno == bno)
252 return (dp);
253 return (NULL);
254 }
255
256 static int
257 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
258 {
259 struct bio *cbp;
260 struct g_cache_desc *dp;
261
262 mtx_lock(&sc->sc_mtx);
263 dp = g_cache_lookup(sc,
264 OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
265 if (dp != NULL) {
266 /* Add to waiters list or deliver. */
267 sc->sc_cachehits++;
268 if (dp->d_biolist != NULL) {
269 G_CACHE_NEXT_BIO1(bp) = sc;
270 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
271 dp->d_biolist = bp;
272 } else
273 g_cache_deliver(sc, bp, dp, 0);
274 mtx_unlock(&sc->sc_mtx);
275 return (0);
276 }
277
278 /* Cache miss. Allocate entry and schedule bio. */
279 sc->sc_cachemisses++;
280 dp = g_cache_alloc(sc);
281 if (dp == NULL) {
282 mtx_unlock(&sc->sc_mtx);
283 return (ENOMEM);
284 }
285 cbp = g_clone_bio(bp);
286 if (cbp == NULL) {
287 g_cache_free(sc, dp);
288 mtx_unlock(&sc->sc_mtx);
289 return (ENOMEM);
290 }
291
292 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
293 G_CACHE_NEXT_BIO1(bp) = sc;
294 G_CACHE_NEXT_BIO2(bp) = NULL;
295 dp->d_biolist = bp;
296 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
297 dp, d_next);
298 mtx_unlock(&sc->sc_mtx);
299
300 G_CACHE_DESC1(cbp) = sc;
301 G_CACHE_DESC2(cbp) = dp;
302 cbp->bio_done = g_cache_done;
303 cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
304 cbp->bio_data = dp->d_data;
305 cbp->bio_length = sc->sc_bsize;
306 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
307 return (0);
308 }
309
310 static void
311 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
312 {
313 struct g_cache_desc *dp;
314 off_t bno, lim;
315
316 mtx_lock(&sc->sc_mtx);
317 bno = OFF2BNO(bp->bio_offset, sc);
318 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
319 do {
320 if ((dp = g_cache_lookup(sc, bno)) != NULL) {
321 LIST_REMOVE(dp, d_next);
322 if (dp->d_flags & D_FLAG_USED) {
323 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
324 sc->sc_nused--;
325 }
326 if (dp->d_biolist == NULL)
327 g_cache_free(sc, dp);
328 else {
329 dp->d_flags = D_FLAG_INVALID;
330 sc->sc_invalid++;
331 }
332 }
333 bno++;
334 } while (bno <= lim);
335 mtx_unlock(&sc->sc_mtx);
336 }
337
338 static void
339 g_cache_start(struct bio *bp)
340 {
341 struct g_cache_softc *sc;
342 struct g_geom *gp;
343 struct g_cache_desc *dp;
344 struct bio *cbp;
345
346 gp = bp->bio_to->geom;
347 sc = gp->softc;
348 G_CACHE_LOGREQ(bp, "Request received.");
349 switch (bp->bio_cmd) {
350 case BIO_READ:
351 sc->sc_reads++;
352 sc->sc_readbytes += bp->bio_length;
353 if (!g_cache_enable)
354 break;
355 if (bp->bio_offset + bp->bio_length > sc->sc_tail)
356 break;
357 if (OFF2BNO(bp->bio_offset, sc) ==
358 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
359 sc->sc_cachereads++;
360 sc->sc_cachereadbytes += bp->bio_length;
361 if (g_cache_read(sc, bp) == 0)
362 return;
363 sc->sc_cachereads--;
364 sc->sc_cachereadbytes -= bp->bio_length;
365 break;
366 } else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
367 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
368 mtx_lock(&sc->sc_mtx);
369 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
370 if (dp == NULL || dp->d_biolist != NULL) {
371 mtx_unlock(&sc->sc_mtx);
372 break;
373 }
374 sc->sc_cachereads++;
375 sc->sc_cachereadbytes += bp->bio_length;
376 g_cache_deliver(sc, bp, dp, 0);
377 mtx_unlock(&sc->sc_mtx);
378 if (g_cache_read(sc, bp) == 0)
379 return;
380 sc->sc_cachereads--;
381 sc->sc_cachereadbytes -= bp->bio_length;
382 break;
383 }
384 break;
385 case BIO_WRITE:
386 sc->sc_writes++;
387 sc->sc_wrotebytes += bp->bio_length;
388 g_cache_invalidate(sc, bp);
389 break;
390 }
391 cbp = g_clone_bio(bp);
392 if (cbp == NULL) {
393 g_io_deliver(bp, ENOMEM);
394 return;
395 }
396 cbp->bio_done = g_std_done;
397 G_CACHE_LOGREQ(cbp, "Sending request.");
398 g_io_request(cbp, LIST_FIRST(&gp->consumer));
399 }
400
401 static void
402 g_cache_go(void *arg)
403 {
404 struct g_cache_softc *sc = arg;
405 struct g_cache_desc *dp;
406 int i;
407
408 mtx_assert(&sc->sc_mtx, MA_OWNED);
409
410 /* Forcibly mark idle ready entries as used. */
411 for (i = 0; i < G_CACHE_BUCKETS; i++) {
412 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
413 if (dp->d_flags & D_FLAG_USED ||
414 dp->d_biolist != NULL ||
415 time_uptime - dp->d_atime < g_cache_idletime)
416 continue;
417 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
418 sc->sc_nused++;
419 dp->d_flags |= D_FLAG_USED;
420 }
421 }
422
423 /* Keep the number of used entries low. */
424 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
425 g_cache_free_used(sc);
426
427 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
428 }
429
430 static int
431 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
432 {
433 struct g_geom *gp;
434 struct g_consumer *cp;
435 int error;
436
437 gp = pp->geom;
438 cp = LIST_FIRST(&gp->consumer);
439 error = g_access(cp, dr, dw, de);
440
441 return (error);
442 }
443
444 static void
445 g_cache_orphan(struct g_consumer *cp)
446 {
447
448 g_topology_assert();
449 g_cache_destroy(cp->geom->softc, 1);
450 }
451
452 static struct g_cache_softc *
453 g_cache_find_device(struct g_class *mp, const char *name)
454 {
455 struct g_geom *gp;
456
457 LIST_FOREACH(gp, &mp->geom, geom) {
458 if (strcmp(gp->name, name) == 0)
459 return (gp->softc);
460 }
461 return (NULL);
462 }
463
464 static struct g_geom *
465 g_cache_create(struct g_class *mp, struct g_provider *pp,
466 const struct g_cache_metadata *md, u_int type)
467 {
468 struct g_cache_softc *sc;
469 struct g_geom *gp;
470 struct g_provider *newpp;
471 struct g_consumer *cp;
472 u_int bshift;
473 int i;
474
475 g_topology_assert();
476
477 gp = NULL;
478 newpp = NULL;
479 cp = NULL;
480
481 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
482
483 /* Cache size is minimum 100. */
484 if (md->md_size < 100) {
485 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
486 return (NULL);
487 }
488
489 /* Block size restrictions. */
490 bshift = ffs(md->md_bsize) - 1;
491 if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
492 md->md_bsize != 1 << bshift ||
493 (md->md_bsize % pp->sectorsize) != 0) {
494 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
495 return (NULL);
496 }
497
498 /* Check for duplicate unit. */
499 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
500 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
501 return (NULL);
502 }
503
504 gp = g_new_geomf(mp, md->md_name);
505 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
506 sc->sc_type = type;
507 sc->sc_bshift = bshift;
508 sc->sc_bsize = 1 << bshift;
509 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
510 UMA_ALIGN_PTR, 0);
511 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
512 for (i = 0; i < G_CACHE_BUCKETS; i++)
513 LIST_INIT(&sc->sc_desclist[i]);
514 TAILQ_INIT(&sc->sc_usedlist);
515 sc->sc_maxent = md->md_size;
516 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
517 gp->softc = sc;
518 sc->sc_geom = gp;
519 gp->start = g_cache_start;
520 gp->orphan = g_cache_orphan;
521 gp->access = g_cache_access;
522 gp->dumpconf = g_cache_dumpconf;
523
524 newpp = g_new_providerf(gp, "cache/%s", gp->name);
525 newpp->sectorsize = pp->sectorsize;
526 newpp->mediasize = pp->mediasize;
527 if (type == G_CACHE_TYPE_AUTOMATIC)
528 newpp->mediasize -= pp->sectorsize;
529 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
530
531 cp = g_new_consumer(gp);
532 if (g_attach(cp, pp) != 0) {
533 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
534 g_destroy_consumer(cp);
535 g_destroy_provider(newpp);
536 mtx_destroy(&sc->sc_mtx);
537 g_free(sc);
538 g_destroy_geom(gp);
539 return (NULL);
540 }
541
542 g_error_provider(newpp, 0);
543 G_CACHE_DEBUG(0, "Device %s created.", gp->name);
544 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
545 return (gp);
546 }
547
548 static int
549 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
550 {
551 struct g_geom *gp;
552 struct g_provider *pp;
553 struct g_cache_desc *dp, *dp2;
554 int i;
555
556 g_topology_assert();
557 if (sc == NULL)
558 return (ENXIO);
559 gp = sc->sc_geom;
560 pp = LIST_FIRST(&gp->provider);
561 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
562 if (force) {
563 G_CACHE_DEBUG(0, "Device %s is still open, so it "
564 "can't be definitely removed.", pp->name);
565 } else {
566 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
567 pp->name, pp->acr, pp->acw, pp->ace);
568 return (EBUSY);
569 }
570 } else {
571 G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
572 }
573 callout_drain(&sc->sc_callout);
574 mtx_lock(&sc->sc_mtx);
575 for (i = 0; i < G_CACHE_BUCKETS; i++) {
576 dp = LIST_FIRST(&sc->sc_desclist[i]);
577 while (dp != NULL) {
578 dp2 = LIST_NEXT(dp, d_next);
579 g_cache_free(sc, dp);
580 dp = dp2;
581 }
582 }
583 mtx_unlock(&sc->sc_mtx);
584 mtx_destroy(&sc->sc_mtx);
585 uma_zdestroy(sc->sc_zone);
586 g_free(sc);
587 gp->softc = NULL;
588 g_wither_geom(gp, ENXIO);
589
590 return (0);
591 }
592
593 static int
594 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
595 {
596
597 return (g_cache_destroy(gp->softc, 0));
598 }
599
600 static int
601 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
602 {
603 struct g_provider *pp;
604 u_char *buf;
605 int error;
606
607 g_topology_assert();
608
609 error = g_access(cp, 1, 0, 0);
610 if (error != 0)
611 return (error);
612 pp = cp->provider;
613 g_topology_unlock();
614 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
615 &error);
616 g_topology_lock();
617 g_access(cp, -1, 0, 0);
618 if (buf == NULL)
619 return (error);
620
621 /* Decode metadata. */
622 cache_metadata_decode(buf, md);
623 g_free(buf);
624
625 return (0);
626 }
627
628 static int
629 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
630 {
631 struct g_provider *pp;
632 u_char *buf;
633 int error;
634
635 g_topology_assert();
636
637 error = g_access(cp, 0, 1, 0);
638 if (error != 0)
639 return (error);
640 pp = cp->provider;
641 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
642 cache_metadata_encode(md, buf);
643 g_topology_unlock();
644 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
645 g_topology_lock();
646 g_access(cp, 0, -1, 0);
647 free(buf, M_GCACHE);
648
649 return (error);
650 }
651
652 static struct g_geom *
653 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
654 {
655 struct g_cache_metadata md;
656 struct g_consumer *cp;
657 struct g_geom *gp;
658 int error;
659
660 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
661 g_topology_assert();
662
663 G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
664
665 gp = g_new_geomf(mp, "cache:taste");
666 gp->start = g_cache_start;
667 gp->orphan = g_cache_orphan;
668 gp->access = g_cache_access;
669 cp = g_new_consumer(gp);
670 g_attach(cp, pp);
671 error = g_cache_read_metadata(cp, &md);
672 g_detach(cp);
673 g_destroy_consumer(cp);
674 g_destroy_geom(gp);
675 if (error != 0)
676 return (NULL);
677
678 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
679 return (NULL);
680 if (md.md_version > G_CACHE_VERSION) {
681 printf("geom_cache.ko module is too old to handle %s.\n",
682 pp->name);
683 return (NULL);
684 }
685 if (md.md_provsize != pp->mediasize)
686 return (NULL);
687
688 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
689 if (gp == NULL) {
690 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
691 return (NULL);
692 }
693 return (gp);
694 }
695
696 static void
697 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
698 {
699 struct g_cache_metadata md;
700 struct g_provider *pp;
701 struct g_geom *gp;
702 intmax_t *bsize, *size;
703 const char *name;
704 int *nargs;
705
706 g_topology_assert();
707
708 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
709 if (nargs == NULL) {
710 gctl_error(req, "No '%s' argument", "nargs");
711 return;
712 }
713 if (*nargs != 2) {
714 gctl_error(req, "Invalid number of arguments.");
715 return;
716 }
717
718 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
719 md.md_version = G_CACHE_VERSION;
720 name = gctl_get_asciiparam(req, "arg0");
721 if (name == NULL) {
722 gctl_error(req, "No 'arg0' argument");
723 return;
724 }
725 strlcpy(md.md_name, name, sizeof(md.md_name));
726
727 size = gctl_get_paraml(req, "size", sizeof(*size));
728 if (size == NULL) {
729 gctl_error(req, "No '%s' argument", "size");
730 return;
731 }
732 if ((u_int)*size < 100) {
733 gctl_error(req, "Invalid '%s' argument", "size");
734 return;
735 }
736 md.md_size = (u_int)*size;
737
738 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
739 if (bsize == NULL) {
740 gctl_error(req, "No '%s' argument", "blocksize");
741 return;
742 }
743 if (*bsize < 0) {
744 gctl_error(req, "Invalid '%s' argument", "blocksize");
745 return;
746 }
747 md.md_bsize = (u_int)*bsize;
748
749 /* This field is not important here. */
750 md.md_provsize = 0;
751
752 name = gctl_get_asciiparam(req, "arg1");
753 if (name == NULL) {
754 gctl_error(req, "No 'arg1' argument");
755 return;
756 }
757 if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
758 name += strlen("/dev/");
759 pp = g_provider_by_name(name);
760 if (pp == NULL) {
761 G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
762 gctl_error(req, "Provider %s is invalid.", name);
763 return;
764 }
765 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
766 if (gp == NULL) {
767 gctl_error(req, "Can't create %s.", md.md_name);
768 return;
769 }
770 }
771
772 static void
773 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
774 {
775 struct g_cache_metadata md;
776 struct g_cache_softc *sc;
777 struct g_consumer *cp;
778 intmax_t *bsize, *size;
779 const char *name;
780 int error, *nargs;
781
782 g_topology_assert();
783
784 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
785 if (nargs == NULL) {
786 gctl_error(req, "No '%s' argument", "nargs");
787 return;
788 }
789 if (*nargs != 1) {
790 gctl_error(req, "Missing device.");
791 return;
792 }
793
794 name = gctl_get_asciiparam(req, "arg0");
795 if (name == NULL) {
796 gctl_error(req, "No 'arg0' argument");
797 return;
798 }
799 sc = g_cache_find_device(mp, name);
800 if (sc == NULL) {
801 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
802 gctl_error(req, "Device %s is invalid.", name);
803 return;
804 }
805
806 size = gctl_get_paraml(req, "size", sizeof(*size));
807 if (size == NULL) {
808 gctl_error(req, "No '%s' argument", "size");
809 return;
810 }
811 if ((u_int)*size != 0 && (u_int)*size < 100) {
812 gctl_error(req, "Invalid '%s' argument", "size");
813 return;
814 }
815 if ((u_int)*size != 0)
816 sc->sc_maxent = (u_int)*size;
817
818 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
819 if (bsize == NULL) {
820 gctl_error(req, "No '%s' argument", "blocksize");
821 return;
822 }
823 if (*bsize < 0) {
824 gctl_error(req, "Invalid '%s' argument", "blocksize");
825 return;
826 }
827
828 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
829 return;
830
831 strlcpy(md.md_name, name, sizeof(md.md_name));
832 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
833 md.md_version = G_CACHE_VERSION;
834 if ((u_int)*size != 0)
835 md.md_size = (u_int)*size;
836 else
837 md.md_size = sc->sc_maxent;
838 if ((u_int)*bsize != 0)
839 md.md_bsize = (u_int)*bsize;
840 else
841 md.md_bsize = sc->sc_bsize;
842 cp = LIST_FIRST(&sc->sc_geom->consumer);
843 md.md_provsize = cp->provider->mediasize;
844 error = g_cache_write_metadata(cp, &md);
845 if (error == 0)
846 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
847 else
848 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
849 cp->provider->name, error);
850 }
851
852 static void
853 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
854 {
855 int *nargs, *force, error, i;
856 struct g_cache_softc *sc;
857 const char *name;
858 char param[16];
859
860 g_topology_assert();
861
862 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
863 if (nargs == NULL) {
864 gctl_error(req, "No '%s' argument", "nargs");
865 return;
866 }
867 if (*nargs <= 0) {
868 gctl_error(req, "Missing device(s).");
869 return;
870 }
871 force = gctl_get_paraml(req, "force", sizeof(*force));
872 if (force == NULL) {
873 gctl_error(req, "No 'force' argument");
874 return;
875 }
876
877 for (i = 0; i < *nargs; i++) {
878 snprintf(param, sizeof(param), "arg%d", i);
879 name = gctl_get_asciiparam(req, param);
880 if (name == NULL) {
881 gctl_error(req, "No 'arg%d' argument", i);
882 return;
883 }
884 sc = g_cache_find_device(mp, name);
885 if (sc == NULL) {
886 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
887 gctl_error(req, "Device %s is invalid.", name);
888 return;
889 }
890 error = g_cache_destroy(sc, *force);
891 if (error != 0) {
892 gctl_error(req, "Cannot destroy device %s (error=%d).",
893 sc->sc_name, error);
894 return;
895 }
896 }
897 }
898
899 static void
900 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
901 {
902 struct g_cache_softc *sc;
903 const char *name;
904 char param[16];
905 int i, *nargs;
906
907 g_topology_assert();
908
909 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
910 if (nargs == NULL) {
911 gctl_error(req, "No '%s' argument", "nargs");
912 return;
913 }
914 if (*nargs <= 0) {
915 gctl_error(req, "Missing device(s).");
916 return;
917 }
918
919 for (i = 0; i < *nargs; i++) {
920 snprintf(param, sizeof(param), "arg%d", i);
921 name = gctl_get_asciiparam(req, param);
922 if (name == NULL) {
923 gctl_error(req, "No 'arg%d' argument", i);
924 return;
925 }
926 sc = g_cache_find_device(mp, name);
927 if (sc == NULL) {
928 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
929 gctl_error(req, "Device %s is invalid.", name);
930 return;
931 }
932 sc->sc_reads = 0;
933 sc->sc_readbytes = 0;
934 sc->sc_cachereads = 0;
935 sc->sc_cachereadbytes = 0;
936 sc->sc_cachehits = 0;
937 sc->sc_cachemisses = 0;
938 sc->sc_cachefull = 0;
939 sc->sc_writes = 0;
940 sc->sc_wrotebytes = 0;
941 }
942 }
943
944 static void
945 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
946 {
947 uint32_t *version;
948
949 g_topology_assert();
950
951 version = gctl_get_paraml(req, "version", sizeof(*version));
952 if (version == NULL) {
953 gctl_error(req, "No '%s' argument.", "version");
954 return;
955 }
956 if (*version != G_CACHE_VERSION) {
957 gctl_error(req, "Userland and kernel parts are out of sync.");
958 return;
959 }
960
961 if (strcmp(verb, "create") == 0) {
962 g_cache_ctl_create(req, mp);
963 return;
964 } else if (strcmp(verb, "configure") == 0) {
965 g_cache_ctl_configure(req, mp);
966 return;
967 } else if (strcmp(verb, "destroy") == 0 ||
968 strcmp(verb, "stop") == 0) {
969 g_cache_ctl_destroy(req, mp);
970 return;
971 } else if (strcmp(verb, "reset") == 0) {
972 g_cache_ctl_reset(req, mp);
973 return;
974 }
975
976 gctl_error(req, "Unknown verb.");
977 }
978
979 static void
980 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
981 struct g_consumer *cp, struct g_provider *pp)
982 {
983 struct g_cache_softc *sc;
984
985 if (pp != NULL || cp != NULL)
986 return;
987 sc = gp->softc;
988 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
989 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
990 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
991 (uintmax_t)sc->sc_tail);
992 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
993 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
994 sc->sc_nused);
995 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
996 sc->sc_invalid);
997 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
998 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
999 sc->sc_readbytes);
1000 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1001 sc->sc_cachereads);
1002 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1003 sc->sc_cachereadbytes);
1004 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1005 sc->sc_cachehits);
1006 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1007 sc->sc_cachemisses);
1008 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1009 sc->sc_cachefull);
1010 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1011 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1012 sc->sc_wrotebytes);
1013 }
1014
1015 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
Cache object: d1c889e54d6ea25d999617928cba2244
|