1 /*-
2 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/queue.h>
40 #include <sys/time.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <geom/cache/g_cache.h>
44
45 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
46
47 SYSCTL_DECL(_kern_geom);
48 SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0, "GEOM_CACHE stuff");
49 static u_int g_cache_debug = 0;
50 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
51 "Debug level");
52 static u_int g_cache_enable = 1;
53 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
54 "");
55 static u_int g_cache_timeout = 10;
56 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
57 0, "");
58 static u_int g_cache_idletime = 5;
59 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
60 0, "");
61 static u_int g_cache_used_lo = 5;
62 static u_int g_cache_used_hi = 20;
63 static int
64 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
65 {
66 u_int val = *(u_int *)arg1;
67 int error;
68
69 error = sysctl_handle_int(oidp, &val, 0, req);
70 if (error || !req->newptr)
71 return (error);
72 if (val < 0 || val > 100)
73 return (EINVAL);
74 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
75 (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
76 return (EINVAL);
77 *(u_int *)arg1 = val;
78 return (0);
79 }
80 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW,
81 &g_cache_used_lo, 0, sysctl_handle_pct, "IU", "");
82 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW,
83 &g_cache_used_hi, 0, sysctl_handle_pct, "IU", "");
84
85
86 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
87 static g_ctl_destroy_geom_t g_cache_destroy_geom;
88
89 static g_taste_t g_cache_taste;
90 static g_ctl_req_t g_cache_config;
91 static g_dumpconf_t g_cache_dumpconf;
92
93 struct g_class g_cache_class = {
94 .name = G_CACHE_CLASS_NAME,
95 .version = G_VERSION,
96 .ctlreq = g_cache_config,
97 .taste = g_cache_taste,
98 .destroy_geom = g_cache_destroy_geom
99 };
100
101 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift)
102 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift)
103
104
105 static struct g_cache_desc *
106 g_cache_alloc(struct g_cache_softc *sc)
107 {
108 struct g_cache_desc *dp;
109
110 mtx_assert(&sc->sc_mtx, MA_OWNED);
111
112 if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
113 dp = TAILQ_FIRST(&sc->sc_usedlist);
114 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
115 sc->sc_nused--;
116 dp->d_flags = 0;
117 LIST_REMOVE(dp, d_next);
118 return (dp);
119 }
120 if (sc->sc_nent > sc->sc_maxent) {
121 sc->sc_cachefull++;
122 return (NULL);
123 }
124 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
125 if (dp == NULL)
126 return (NULL);
127 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
128 if (dp->d_data == NULL) {
129 free(dp, M_GCACHE);
130 return (NULL);
131 }
132 sc->sc_nent++;
133 return (dp);
134 }
135
136 static void
137 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
138 {
139
140 mtx_assert(&sc->sc_mtx, MA_OWNED);
141
142 uma_zfree(sc->sc_zone, dp->d_data);
143 free(dp, M_GCACHE);
144 sc->sc_nent--;
145 }
146
147 static void
148 g_cache_free_used(struct g_cache_softc *sc)
149 {
150 struct g_cache_desc *dp;
151 u_int n;
152
153 mtx_assert(&sc->sc_mtx, MA_OWNED);
154
155 n = g_cache_used_lo * sc->sc_maxent / 100;
156 while (sc->sc_nused > n) {
157 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
158 dp = TAILQ_FIRST(&sc->sc_usedlist);
159 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
160 sc->sc_nused--;
161 LIST_REMOVE(dp, d_next);
162 g_cache_free(sc, dp);
163 }
164 }
165
166 static void
167 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
168 struct g_cache_desc *dp, int error)
169 {
170 off_t off1, off, len;
171
172 mtx_assert(&sc->sc_mtx, MA_OWNED);
173 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
174 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
175 dp->d_bno, ("wrong entry"));
176
177 off1 = BNO2OFF(dp->d_bno, sc);
178 off = MAX(bp->bio_offset, off1);
179 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
180
181 if (bp->bio_error == 0)
182 bp->bio_error = error;
183 if (bp->bio_error == 0) {
184 bcopy(dp->d_data + (off - off1),
185 bp->bio_data + (off - bp->bio_offset), len);
186 }
187 bp->bio_completed += len;
188 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
189 if (bp->bio_completed == bp->bio_length) {
190 if (bp->bio_error != 0)
191 bp->bio_completed = 0;
192 g_io_deliver(bp, bp->bio_error);
193 }
194
195 if (dp->d_flags & D_FLAG_USED) {
196 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
197 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
198 } else if (OFF2BNO(off + len, sc) > dp->d_bno) {
199 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
200 sc->sc_nused++;
201 dp->d_flags |= D_FLAG_USED;
202 }
203 dp->d_atime = time_uptime;
204 }
205
206 static void
207 g_cache_done(struct bio *bp)
208 {
209 struct g_cache_softc *sc;
210 struct g_cache_desc *dp;
211 struct bio *bp2, *tmpbp;
212
213 sc = bp->bio_from->geom->softc;
214 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
215 dp = G_CACHE_DESC2(bp);
216 mtx_lock(&sc->sc_mtx);
217 bp2 = dp->d_biolist;
218 while (bp2 != NULL) {
219 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
220 tmpbp = G_CACHE_NEXT_BIO2(bp2);
221 g_cache_deliver(sc, bp2, dp, bp->bio_error);
222 bp2 = tmpbp;
223 }
224 dp->d_biolist = NULL;
225 if (dp->d_flags & D_FLAG_INVALID) {
226 sc->sc_invalid--;
227 g_cache_free(sc, dp);
228 } else if (bp->bio_error) {
229 LIST_REMOVE(dp, d_next);
230 if (dp->d_flags & D_FLAG_USED) {
231 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
232 sc->sc_nused--;
233 }
234 g_cache_free(sc, dp);
235 }
236 mtx_unlock(&sc->sc_mtx);
237 g_destroy_bio(bp);
238 }
239
240 static struct g_cache_desc *
241 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
242 {
243 struct g_cache_desc *dp;
244
245 mtx_assert(&sc->sc_mtx, MA_OWNED);
246
247 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
248 if (dp->d_bno == bno)
249 return (dp);
250 return (NULL);
251 }
252
253 static int
254 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
255 {
256 struct bio *cbp;
257 struct g_cache_desc *dp;
258
259 mtx_lock(&sc->sc_mtx);
260 dp = g_cache_lookup(sc,
261 OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
262 if (dp != NULL) {
263 /* Add to waiters list or deliver. */
264 sc->sc_cachehits++;
265 if (dp->d_biolist != NULL) {
266 G_CACHE_NEXT_BIO1(bp) = sc;
267 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
268 dp->d_biolist = bp;
269 } else
270 g_cache_deliver(sc, bp, dp, 0);
271 mtx_unlock(&sc->sc_mtx);
272 return (0);
273 }
274
275 /* Cache miss. Allocate entry and schedule bio. */
276 sc->sc_cachemisses++;
277 dp = g_cache_alloc(sc);
278 if (dp == NULL) {
279 mtx_unlock(&sc->sc_mtx);
280 return (ENOMEM);
281 }
282 cbp = g_clone_bio(bp);
283 if (cbp == NULL) {
284 g_cache_free(sc, dp);
285 mtx_unlock(&sc->sc_mtx);
286 return (ENOMEM);
287 }
288
289 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
290 G_CACHE_NEXT_BIO1(bp) = sc;
291 G_CACHE_NEXT_BIO2(bp) = NULL;
292 dp->d_biolist = bp;
293 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
294 dp, d_next);
295 mtx_unlock(&sc->sc_mtx);
296
297 G_CACHE_DESC1(cbp) = sc;
298 G_CACHE_DESC2(cbp) = dp;
299 cbp->bio_done = g_cache_done;
300 cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
301 cbp->bio_data = dp->d_data;
302 cbp->bio_length = sc->sc_bsize;
303 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
304 return (0);
305 }
306
307 static void
308 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
309 {
310 struct g_cache_desc *dp;
311 off_t bno, lim;
312
313 mtx_lock(&sc->sc_mtx);
314 bno = OFF2BNO(bp->bio_offset, sc);
315 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
316 do {
317 if ((dp = g_cache_lookup(sc, bno)) != NULL) {
318 LIST_REMOVE(dp, d_next);
319 if (dp->d_flags & D_FLAG_USED) {
320 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
321 sc->sc_nused--;
322 }
323 if (dp->d_biolist == NULL)
324 g_cache_free(sc, dp);
325 else {
326 dp->d_flags = D_FLAG_INVALID;
327 sc->sc_invalid++;
328 }
329 }
330 bno++;
331 } while (bno <= lim);
332 mtx_unlock(&sc->sc_mtx);
333 }
334
335 static void
336 g_cache_start(struct bio *bp)
337 {
338 struct g_cache_softc *sc;
339 struct g_geom *gp;
340 struct g_cache_desc *dp;
341 struct bio *cbp;
342
343 gp = bp->bio_to->geom;
344 sc = gp->softc;
345 G_CACHE_LOGREQ(bp, "Request received.");
346 switch (bp->bio_cmd) {
347 case BIO_READ:
348 sc->sc_reads++;
349 sc->sc_readbytes += bp->bio_length;
350 if (!g_cache_enable)
351 break;
352 if (bp->bio_offset + bp->bio_length > sc->sc_tail)
353 break;
354 if (OFF2BNO(bp->bio_offset, sc) ==
355 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
356 sc->sc_cachereads++;
357 sc->sc_cachereadbytes += bp->bio_length;
358 if (g_cache_read(sc, bp) == 0)
359 return;
360 sc->sc_cachereads--;
361 sc->sc_cachereadbytes -= bp->bio_length;
362 break;
363 } else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
364 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
365 mtx_lock(&sc->sc_mtx);
366 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
367 if (dp == NULL || dp->d_biolist != NULL) {
368 mtx_unlock(&sc->sc_mtx);
369 break;
370 }
371 sc->sc_cachereads++;
372 sc->sc_cachereadbytes += bp->bio_length;
373 g_cache_deliver(sc, bp, dp, 0);
374 mtx_unlock(&sc->sc_mtx);
375 if (g_cache_read(sc, bp) == 0)
376 return;
377 sc->sc_cachereads--;
378 sc->sc_cachereadbytes -= bp->bio_length;
379 break;
380 }
381 break;
382 case BIO_WRITE:
383 sc->sc_writes++;
384 sc->sc_wrotebytes += bp->bio_length;
385 g_cache_invalidate(sc, bp);
386 break;
387 }
388 cbp = g_clone_bio(bp);
389 if (cbp == NULL) {
390 g_io_deliver(bp, ENOMEM);
391 return;
392 }
393 cbp->bio_done = g_std_done;
394 G_CACHE_LOGREQ(cbp, "Sending request.");
395 g_io_request(cbp, LIST_FIRST(&gp->consumer));
396 }
397
398 static void
399 g_cache_go(void *arg)
400 {
401 struct g_cache_softc *sc = arg;
402 struct g_cache_desc *dp;
403 int i;
404
405 mtx_assert(&sc->sc_mtx, MA_OWNED);
406
407 /* Forcibly mark idle ready entries as used. */
408 for (i = 0; i < G_CACHE_BUCKETS; i++) {
409 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
410 if (dp->d_flags & D_FLAG_USED ||
411 dp->d_biolist != NULL ||
412 time_uptime - dp->d_atime < g_cache_idletime)
413 continue;
414 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
415 sc->sc_nused++;
416 dp->d_flags |= D_FLAG_USED;
417 }
418 }
419
420 /* Keep the number of used entries low. */
421 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
422 g_cache_free_used(sc);
423
424 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
425 }
426
427 static int
428 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
429 {
430 struct g_geom *gp;
431 struct g_consumer *cp;
432 int error;
433
434 gp = pp->geom;
435 cp = LIST_FIRST(&gp->consumer);
436 error = g_access(cp, dr, dw, de);
437
438 return (error);
439 }
440
441 static void
442 g_cache_orphan(struct g_consumer *cp)
443 {
444
445 g_topology_assert();
446 g_cache_destroy(cp->geom->softc, 1);
447 }
448
449 static struct g_cache_softc *
450 g_cache_find_device(struct g_class *mp, const char *name)
451 {
452 struct g_geom *gp;
453
454 LIST_FOREACH(gp, &mp->geom, geom) {
455 if (strcmp(gp->name, name) == 0)
456 return (gp->softc);
457 }
458 return (NULL);
459 }
460
461 static struct g_geom *
462 g_cache_create(struct g_class *mp, struct g_provider *pp,
463 const struct g_cache_metadata *md, u_int type)
464 {
465 struct g_cache_softc *sc;
466 struct g_geom *gp;
467 struct g_provider *newpp;
468 struct g_consumer *cp;
469 u_int bshift;
470 int i;
471
472 g_topology_assert();
473
474 gp = NULL;
475 newpp = NULL;
476 cp = NULL;
477
478 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
479
480 /* Cache size is minimum 100. */
481 if (md->md_size < 100) {
482 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
483 return (NULL);
484 }
485
486 /* Block size restrictions. */
487 bshift = ffs(md->md_bsize) - 1;
488 if (md->md_bsize == 0 || md->md_bsize > MAXPHYS ||
489 md->md_bsize != 1 << bshift ||
490 (md->md_bsize % pp->sectorsize) != 0) {
491 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
492 return (NULL);
493 }
494
495 /* Check for duplicate unit. */
496 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
497 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
498 return (NULL);
499 }
500
501 gp = g_new_geomf(mp, md->md_name);
502 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
503 sc->sc_type = type;
504 sc->sc_bshift = bshift;
505 sc->sc_bsize = 1 << bshift;
506 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
507 UMA_ALIGN_PTR, 0);
508 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
509 for (i = 0; i < G_CACHE_BUCKETS; i++)
510 LIST_INIT(&sc->sc_desclist[i]);
511 TAILQ_INIT(&sc->sc_usedlist);
512 sc->sc_maxent = md->md_size;
513 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
514 gp->softc = sc;
515 sc->sc_geom = gp;
516 gp->start = g_cache_start;
517 gp->orphan = g_cache_orphan;
518 gp->access = g_cache_access;
519 gp->dumpconf = g_cache_dumpconf;
520
521 newpp = g_new_providerf(gp, "cache/%s", gp->name);
522 newpp->sectorsize = pp->sectorsize;
523 newpp->mediasize = pp->mediasize;
524 if (type == G_CACHE_TYPE_AUTOMATIC)
525 newpp->mediasize -= pp->sectorsize;
526 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
527
528 cp = g_new_consumer(gp);
529 if (g_attach(cp, pp) != 0) {
530 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
531 g_destroy_consumer(cp);
532 g_destroy_provider(newpp);
533 mtx_destroy(&sc->sc_mtx);
534 g_free(sc);
535 g_destroy_geom(gp);
536 return (NULL);
537 }
538
539 g_error_provider(newpp, 0);
540 G_CACHE_DEBUG(0, "Device %s created.", gp->name);
541 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
542 return (gp);
543 }
544
545 static int
546 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
547 {
548 struct g_geom *gp;
549 struct g_provider *pp;
550 struct g_cache_desc *dp, *dp2;
551 int i;
552
553 g_topology_assert();
554 if (sc == NULL)
555 return (ENXIO);
556 gp = sc->sc_geom;
557 pp = LIST_FIRST(&gp->provider);
558 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
559 if (force) {
560 G_CACHE_DEBUG(0, "Device %s is still open, so it "
561 "can't be definitely removed.", pp->name);
562 } else {
563 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
564 pp->name, pp->acr, pp->acw, pp->ace);
565 return (EBUSY);
566 }
567 } else {
568 G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
569 }
570 callout_drain(&sc->sc_callout);
571 mtx_lock(&sc->sc_mtx);
572 for (i = 0; i < G_CACHE_BUCKETS; i++) {
573 dp = LIST_FIRST(&sc->sc_desclist[i]);
574 while (dp != NULL) {
575 dp2 = LIST_NEXT(dp, d_next);
576 g_cache_free(sc, dp);
577 dp = dp2;
578 }
579 }
580 mtx_unlock(&sc->sc_mtx);
581 mtx_destroy(&sc->sc_mtx);
582 uma_zdestroy(sc->sc_zone);
583 g_free(sc);
584 gp->softc = NULL;
585 g_wither_geom(gp, ENXIO);
586
587 return (0);
588 }
589
590 static int
591 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
592 {
593
594 return (g_cache_destroy(gp->softc, 0));
595 }
596
597 static int
598 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
599 {
600 struct g_provider *pp;
601 u_char *buf;
602 int error;
603
604 g_topology_assert();
605
606 error = g_access(cp, 1, 0, 0);
607 if (error != 0)
608 return (error);
609 pp = cp->provider;
610 g_topology_unlock();
611 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
612 &error);
613 g_topology_lock();
614 g_access(cp, -1, 0, 0);
615 if (buf == NULL)
616 return (error);
617
618 /* Decode metadata. */
619 cache_metadata_decode(buf, md);
620 g_free(buf);
621
622 return (0);
623 }
624
625 static int
626 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
627 {
628 struct g_provider *pp;
629 u_char *buf;
630 int error;
631
632 g_topology_assert();
633
634 error = g_access(cp, 0, 1, 0);
635 if (error != 0)
636 return (error);
637 pp = cp->provider;
638 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
639 cache_metadata_encode(md, buf);
640 g_topology_unlock();
641 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
642 g_topology_lock();
643 g_access(cp, 0, -1, 0);
644 free(buf, M_GCACHE);
645
646 return (error);
647 }
648
649 static struct g_geom *
650 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
651 {
652 struct g_cache_metadata md;
653 struct g_consumer *cp;
654 struct g_geom *gp;
655 int error;
656
657 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
658 g_topology_assert();
659
660 G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
661
662 gp = g_new_geomf(mp, "cache:taste");
663 gp->start = g_cache_start;
664 gp->orphan = g_cache_orphan;
665 gp->access = g_cache_access;
666 cp = g_new_consumer(gp);
667 g_attach(cp, pp);
668 error = g_cache_read_metadata(cp, &md);
669 g_detach(cp);
670 g_destroy_consumer(cp);
671 g_destroy_geom(gp);
672 if (error != 0)
673 return (NULL);
674
675 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
676 return (NULL);
677 if (md.md_version > G_CACHE_VERSION) {
678 printf("geom_cache.ko module is too old to handle %s.\n",
679 pp->name);
680 return (NULL);
681 }
682 if (md.md_provsize != pp->mediasize)
683 return (NULL);
684
685 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
686 if (gp == NULL) {
687 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
688 return (NULL);
689 }
690 return (gp);
691 }
692
693 static void
694 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
695 {
696 struct g_cache_metadata md;
697 struct g_provider *pp;
698 struct g_geom *gp;
699 intmax_t *bsize, *size;
700 const char *name;
701 int *nargs;
702
703 g_topology_assert();
704
705 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
706 if (nargs == NULL) {
707 gctl_error(req, "No '%s' argument", "nargs");
708 return;
709 }
710 if (*nargs != 2) {
711 gctl_error(req, "Invalid number of arguments.");
712 return;
713 }
714
715 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
716 md.md_version = G_CACHE_VERSION;
717 name = gctl_get_asciiparam(req, "arg0");
718 if (name == NULL) {
719 gctl_error(req, "No 'arg0' argument");
720 return;
721 }
722 strlcpy(md.md_name, name, sizeof(md.md_name));
723
724 size = gctl_get_paraml(req, "size", sizeof(*size));
725 if (size == NULL) {
726 gctl_error(req, "No '%s' argument", "size");
727 return;
728 }
729 if ((u_int)*size < 100) {
730 gctl_error(req, "Invalid '%s' argument", "size");
731 return;
732 }
733 md.md_size = (u_int)*size;
734
735 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
736 if (bsize == NULL) {
737 gctl_error(req, "No '%s' argument", "blocksize");
738 return;
739 }
740 if (*bsize < 0) {
741 gctl_error(req, "Invalid '%s' argument", "blocksize");
742 return;
743 }
744 md.md_bsize = (u_int)*bsize;
745
746 /* This field is not important here. */
747 md.md_provsize = 0;
748
749 name = gctl_get_asciiparam(req, "arg1");
750 if (name == NULL) {
751 gctl_error(req, "No 'arg1' argument");
752 return;
753 }
754 if (strncmp(name, "/dev/", strlen("/dev/")) == 0)
755 name += strlen("/dev/");
756 pp = g_provider_by_name(name);
757 if (pp == NULL) {
758 G_CACHE_DEBUG(1, "Provider %s is invalid.", name);
759 gctl_error(req, "Provider %s is invalid.", name);
760 return;
761 }
762 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
763 if (gp == NULL) {
764 gctl_error(req, "Can't create %s.", md.md_name);
765 return;
766 }
767 }
768
769 static void
770 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
771 {
772 struct g_cache_metadata md;
773 struct g_cache_softc *sc;
774 struct g_consumer *cp;
775 intmax_t *bsize, *size;
776 const char *name;
777 int error, *nargs;
778
779 g_topology_assert();
780
781 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
782 if (nargs == NULL) {
783 gctl_error(req, "No '%s' argument", "nargs");
784 return;
785 }
786 if (*nargs != 1) {
787 gctl_error(req, "Missing device.");
788 return;
789 }
790
791 name = gctl_get_asciiparam(req, "arg0");
792 if (name == NULL) {
793 gctl_error(req, "No 'arg0' argument");
794 return;
795 }
796 sc = g_cache_find_device(mp, name);
797 if (sc == NULL) {
798 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
799 gctl_error(req, "Device %s is invalid.", name);
800 return;
801 }
802
803 size = gctl_get_paraml(req, "size", sizeof(*size));
804 if (size == NULL) {
805 gctl_error(req, "No '%s' argument", "size");
806 return;
807 }
808 if ((u_int)*size != 0 && (u_int)*size < 100) {
809 gctl_error(req, "Invalid '%s' argument", "size");
810 return;
811 }
812 if ((u_int)*size != 0)
813 sc->sc_maxent = (u_int)*size;
814
815 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
816 if (bsize == NULL) {
817 gctl_error(req, "No '%s' argument", "blocksize");
818 return;
819 }
820 if (*bsize < 0) {
821 gctl_error(req, "Invalid '%s' argument", "blocksize");
822 return;
823 }
824
825 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
826 return;
827
828 strlcpy(md.md_name, name, sizeof(md.md_name));
829 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
830 md.md_version = G_CACHE_VERSION;
831 if ((u_int)*size != 0)
832 md.md_size = (u_int)*size;
833 else
834 md.md_size = sc->sc_maxent;
835 if ((u_int)*bsize != 0)
836 md.md_bsize = (u_int)*bsize;
837 else
838 md.md_bsize = sc->sc_bsize;
839 cp = LIST_FIRST(&sc->sc_geom->consumer);
840 md.md_provsize = cp->provider->mediasize;
841 error = g_cache_write_metadata(cp, &md);
842 if (error == 0)
843 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
844 else
845 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
846 cp->provider->name, error);
847 }
848
849 static void
850 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
851 {
852 int *nargs, *force, error, i;
853 struct g_cache_softc *sc;
854 const char *name;
855 char param[16];
856
857 g_topology_assert();
858
859 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
860 if (nargs == NULL) {
861 gctl_error(req, "No '%s' argument", "nargs");
862 return;
863 }
864 if (*nargs <= 0) {
865 gctl_error(req, "Missing device(s).");
866 return;
867 }
868 force = gctl_get_paraml(req, "force", sizeof(*force));
869 if (force == NULL) {
870 gctl_error(req, "No 'force' argument");
871 return;
872 }
873
874 for (i = 0; i < *nargs; i++) {
875 snprintf(param, sizeof(param), "arg%d", i);
876 name = gctl_get_asciiparam(req, param);
877 if (name == NULL) {
878 gctl_error(req, "No 'arg%d' argument", i);
879 return;
880 }
881 sc = g_cache_find_device(mp, name);
882 if (sc == NULL) {
883 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
884 gctl_error(req, "Device %s is invalid.", name);
885 return;
886 }
887 error = g_cache_destroy(sc, *force);
888 if (error != 0) {
889 gctl_error(req, "Cannot destroy device %s (error=%d).",
890 sc->sc_name, error);
891 return;
892 }
893 }
894 }
895
896 static void
897 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
898 {
899 struct g_cache_softc *sc;
900 const char *name;
901 char param[16];
902 int i, *nargs;
903
904 g_topology_assert();
905
906 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
907 if (nargs == NULL) {
908 gctl_error(req, "No '%s' argument", "nargs");
909 return;
910 }
911 if (*nargs <= 0) {
912 gctl_error(req, "Missing device(s).");
913 return;
914 }
915
916 for (i = 0; i < *nargs; i++) {
917 snprintf(param, sizeof(param), "arg%d", i);
918 name = gctl_get_asciiparam(req, param);
919 if (name == NULL) {
920 gctl_error(req, "No 'arg%d' argument", i);
921 return;
922 }
923 sc = g_cache_find_device(mp, name);
924 if (sc == NULL) {
925 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
926 gctl_error(req, "Device %s is invalid.", name);
927 return;
928 }
929 sc->sc_reads = 0;
930 sc->sc_readbytes = 0;
931 sc->sc_cachereads = 0;
932 sc->sc_cachereadbytes = 0;
933 sc->sc_cachehits = 0;
934 sc->sc_cachemisses = 0;
935 sc->sc_cachefull = 0;
936 sc->sc_writes = 0;
937 sc->sc_wrotebytes = 0;
938 }
939 }
940
941 static void
942 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
943 {
944 uint32_t *version;
945
946 g_topology_assert();
947
948 version = gctl_get_paraml(req, "version", sizeof(*version));
949 if (version == NULL) {
950 gctl_error(req, "No '%s' argument.", "version");
951 return;
952 }
953 if (*version != G_CACHE_VERSION) {
954 gctl_error(req, "Userland and kernel parts are out of sync.");
955 return;
956 }
957
958 if (strcmp(verb, "create") == 0) {
959 g_cache_ctl_create(req, mp);
960 return;
961 } else if (strcmp(verb, "configure") == 0) {
962 g_cache_ctl_configure(req, mp);
963 return;
964 } else if (strcmp(verb, "destroy") == 0 ||
965 strcmp(verb, "stop") == 0) {
966 g_cache_ctl_destroy(req, mp);
967 return;
968 } else if (strcmp(verb, "reset") == 0) {
969 g_cache_ctl_reset(req, mp);
970 return;
971 }
972
973 gctl_error(req, "Unknown verb.");
974 }
975
976 static void
977 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
978 struct g_consumer *cp, struct g_provider *pp)
979 {
980 struct g_cache_softc *sc;
981
982 if (pp != NULL || cp != NULL)
983 return;
984 sc = gp->softc;
985 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
986 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
987 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
988 (uintmax_t)sc->sc_tail);
989 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
990 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
991 sc->sc_nused);
992 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
993 sc->sc_invalid);
994 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
995 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
996 sc->sc_readbytes);
997 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
998 sc->sc_cachereads);
999 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1000 sc->sc_cachereadbytes);
1001 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1002 sc->sc_cachehits);
1003 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1004 sc->sc_cachemisses);
1005 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1006 sc->sc_cachefull);
1007 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1008 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1009 sc->sc_wrotebytes);
1010 }
1011
1012 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
Cache object: a1bdcc78a34e65913371f2dcec8745d8
|