1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 #include <sys/time.h>
44 #include <vm/uma.h>
45 #include <geom/geom.h>
46 #include <geom/geom_dbg.h>
47 #include <geom/cache/g_cache.h>
48
49 FEATURE(geom_cache, "GEOM cache module");
50
51 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data");
52
53 SYSCTL_DECL(_kern_geom);
54 static SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
55 "GEOM_CACHE stuff");
56 static u_int g_cache_debug = 0;
57 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0,
58 "Debug level");
59 static u_int g_cache_enable = 1;
60 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0,
61 "");
62 static u_int g_cache_timeout = 10;
63 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout,
64 0, "");
65 static u_int g_cache_idletime = 5;
66 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime,
67 0, "");
68 static u_int g_cache_used_lo = 5;
69 static u_int g_cache_used_hi = 20;
70 static int
71 sysctl_handle_pct(SYSCTL_HANDLER_ARGS)
72 {
73 u_int val = *(u_int *)arg1;
74 int error;
75
76 error = sysctl_handle_int(oidp, &val, 0, req);
77 if (error || !req->newptr)
78 return (error);
79 if (val > 100)
80 return (EINVAL);
81 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) ||
82 (arg1 == &g_cache_used_hi && g_cache_used_lo > val))
83 return (EINVAL);
84 *(u_int *)arg1 = val;
85 return (0);
86 }
87 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo,
88 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &g_cache_used_lo, 0,
89 sysctl_handle_pct, "IU",
90 "");
91 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi,
92 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &g_cache_used_hi, 0,
93 sysctl_handle_pct, "IU",
94 "");
95
96 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force);
97 static g_ctl_destroy_geom_t g_cache_destroy_geom;
98
99 static g_taste_t g_cache_taste;
100 static g_ctl_req_t g_cache_config;
101 static g_dumpconf_t g_cache_dumpconf;
102
103 struct g_class g_cache_class = {
104 .name = G_CACHE_CLASS_NAME,
105 .version = G_VERSION,
106 .ctlreq = g_cache_config,
107 .taste = g_cache_taste,
108 .destroy_geom = g_cache_destroy_geom
109 };
110
111 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift)
112 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift)
113
114 static struct g_cache_desc *
115 g_cache_alloc(struct g_cache_softc *sc)
116 {
117 struct g_cache_desc *dp;
118
119 mtx_assert(&sc->sc_mtx, MA_OWNED);
120
121 if (!TAILQ_EMPTY(&sc->sc_usedlist)) {
122 dp = TAILQ_FIRST(&sc->sc_usedlist);
123 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
124 sc->sc_nused--;
125 dp->d_flags = 0;
126 LIST_REMOVE(dp, d_next);
127 return (dp);
128 }
129 if (sc->sc_nent > sc->sc_maxent) {
130 sc->sc_cachefull++;
131 return (NULL);
132 }
133 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO);
134 if (dp == NULL)
135 return (NULL);
136 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT);
137 if (dp->d_data == NULL) {
138 free(dp, M_GCACHE);
139 return (NULL);
140 }
141 sc->sc_nent++;
142 return (dp);
143 }
144
145 static void
146 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp)
147 {
148
149 mtx_assert(&sc->sc_mtx, MA_OWNED);
150
151 uma_zfree(sc->sc_zone, dp->d_data);
152 free(dp, M_GCACHE);
153 sc->sc_nent--;
154 }
155
156 static void
157 g_cache_free_used(struct g_cache_softc *sc)
158 {
159 struct g_cache_desc *dp;
160 u_int n;
161
162 mtx_assert(&sc->sc_mtx, MA_OWNED);
163
164 n = g_cache_used_lo * sc->sc_maxent / 100;
165 while (sc->sc_nused > n) {
166 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty"));
167 dp = TAILQ_FIRST(&sc->sc_usedlist);
168 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
169 sc->sc_nused--;
170 LIST_REMOVE(dp, d_next);
171 g_cache_free(sc, dp);
172 }
173 }
174
175 static void
176 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp,
177 struct g_cache_desc *dp, int error)
178 {
179 off_t off1, off, len;
180
181 mtx_assert(&sc->sc_mtx, MA_OWNED);
182 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry"));
183 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >=
184 dp->d_bno, ("wrong entry"));
185
186 off1 = BNO2OFF(dp->d_bno, sc);
187 off = MAX(bp->bio_offset, off1);
188 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off;
189
190 if (bp->bio_error == 0)
191 bp->bio_error = error;
192 if (bp->bio_error == 0) {
193 bcopy(dp->d_data + (off - off1),
194 bp->bio_data + (off - bp->bio_offset), len);
195 }
196 bp->bio_completed += len;
197 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data"));
198 if (bp->bio_completed == bp->bio_length) {
199 if (bp->bio_error != 0)
200 bp->bio_completed = 0;
201 g_io_deliver(bp, bp->bio_error);
202 }
203
204 if (dp->d_flags & D_FLAG_USED) {
205 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
206 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
207 } else if (OFF2BNO(off + len, sc) > dp->d_bno) {
208 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
209 sc->sc_nused++;
210 dp->d_flags |= D_FLAG_USED;
211 }
212 dp->d_atime = time_uptime;
213 }
214
215 static void
216 g_cache_done(struct bio *bp)
217 {
218 struct g_cache_softc *sc;
219 struct g_cache_desc *dp;
220 struct bio *bp2, *tmpbp;
221
222 sc = bp->bio_from->geom->softc;
223 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()"));
224 dp = G_CACHE_DESC2(bp);
225 mtx_lock(&sc->sc_mtx);
226 bp2 = dp->d_biolist;
227 while (bp2 != NULL) {
228 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()"));
229 tmpbp = G_CACHE_NEXT_BIO2(bp2);
230 g_cache_deliver(sc, bp2, dp, bp->bio_error);
231 bp2 = tmpbp;
232 }
233 dp->d_biolist = NULL;
234 if (dp->d_flags & D_FLAG_INVALID) {
235 sc->sc_invalid--;
236 g_cache_free(sc, dp);
237 } else if (bp->bio_error) {
238 LIST_REMOVE(dp, d_next);
239 if (dp->d_flags & D_FLAG_USED) {
240 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
241 sc->sc_nused--;
242 }
243 g_cache_free(sc, dp);
244 }
245 mtx_unlock(&sc->sc_mtx);
246 g_destroy_bio(bp);
247 }
248
249 static struct g_cache_desc *
250 g_cache_lookup(struct g_cache_softc *sc, off_t bno)
251 {
252 struct g_cache_desc *dp;
253
254 mtx_assert(&sc->sc_mtx, MA_OWNED);
255
256 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next)
257 if (dp->d_bno == bno)
258 return (dp);
259 return (NULL);
260 }
261
262 static int
263 g_cache_read(struct g_cache_softc *sc, struct bio *bp)
264 {
265 struct bio *cbp;
266 struct g_cache_desc *dp;
267
268 mtx_lock(&sc->sc_mtx);
269 dp = g_cache_lookup(sc,
270 OFF2BNO(bp->bio_offset + bp->bio_completed, sc));
271 if (dp != NULL) {
272 /* Add to waiters list or deliver. */
273 sc->sc_cachehits++;
274 if (dp->d_biolist != NULL) {
275 G_CACHE_NEXT_BIO1(bp) = sc;
276 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist;
277 dp->d_biolist = bp;
278 } else
279 g_cache_deliver(sc, bp, dp, 0);
280 mtx_unlock(&sc->sc_mtx);
281 return (0);
282 }
283
284 /* Cache miss. Allocate entry and schedule bio. */
285 sc->sc_cachemisses++;
286 dp = g_cache_alloc(sc);
287 if (dp == NULL) {
288 mtx_unlock(&sc->sc_mtx);
289 return (ENOMEM);
290 }
291 cbp = g_clone_bio(bp);
292 if (cbp == NULL) {
293 g_cache_free(sc, dp);
294 mtx_unlock(&sc->sc_mtx);
295 return (ENOMEM);
296 }
297
298 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc);
299 G_CACHE_NEXT_BIO1(bp) = sc;
300 G_CACHE_NEXT_BIO2(bp) = NULL;
301 dp->d_biolist = bp;
302 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)],
303 dp, d_next);
304 mtx_unlock(&sc->sc_mtx);
305
306 G_CACHE_DESC1(cbp) = sc;
307 G_CACHE_DESC2(cbp) = dp;
308 cbp->bio_done = g_cache_done;
309 cbp->bio_offset = BNO2OFF(dp->d_bno, sc);
310 cbp->bio_data = dp->d_data;
311 cbp->bio_length = sc->sc_bsize;
312 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer));
313 return (0);
314 }
315
316 static void
317 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp)
318 {
319 struct g_cache_desc *dp;
320 off_t bno, lim;
321
322 mtx_lock(&sc->sc_mtx);
323 bno = OFF2BNO(bp->bio_offset, sc);
324 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc);
325 do {
326 if ((dp = g_cache_lookup(sc, bno)) != NULL) {
327 LIST_REMOVE(dp, d_next);
328 if (dp->d_flags & D_FLAG_USED) {
329 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used);
330 sc->sc_nused--;
331 }
332 if (dp->d_biolist == NULL)
333 g_cache_free(sc, dp);
334 else {
335 dp->d_flags = D_FLAG_INVALID;
336 sc->sc_invalid++;
337 }
338 }
339 bno++;
340 } while (bno <= lim);
341 mtx_unlock(&sc->sc_mtx);
342 }
343
344 static void
345 g_cache_start(struct bio *bp)
346 {
347 struct g_cache_softc *sc;
348 struct g_geom *gp;
349 struct g_cache_desc *dp;
350 struct bio *cbp;
351
352 gp = bp->bio_to->geom;
353 sc = gp->softc;
354 G_CACHE_LOGREQ(bp, "Request received.");
355 switch (bp->bio_cmd) {
356 case BIO_READ:
357 sc->sc_reads++;
358 sc->sc_readbytes += bp->bio_length;
359 if (!g_cache_enable)
360 break;
361 if (bp->bio_offset + bp->bio_length > sc->sc_tail)
362 break;
363 if (OFF2BNO(bp->bio_offset, sc) ==
364 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
365 sc->sc_cachereads++;
366 sc->sc_cachereadbytes += bp->bio_length;
367 if (g_cache_read(sc, bp) == 0)
368 return;
369 sc->sc_cachereads--;
370 sc->sc_cachereadbytes -= bp->bio_length;
371 break;
372 } else if (OFF2BNO(bp->bio_offset, sc) + 1 ==
373 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) {
374 mtx_lock(&sc->sc_mtx);
375 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc));
376 if (dp == NULL || dp->d_biolist != NULL) {
377 mtx_unlock(&sc->sc_mtx);
378 break;
379 }
380 sc->sc_cachereads++;
381 sc->sc_cachereadbytes += bp->bio_length;
382 g_cache_deliver(sc, bp, dp, 0);
383 mtx_unlock(&sc->sc_mtx);
384 if (g_cache_read(sc, bp) == 0)
385 return;
386 sc->sc_cachereads--;
387 sc->sc_cachereadbytes -= bp->bio_length;
388 break;
389 }
390 break;
391 case BIO_WRITE:
392 sc->sc_writes++;
393 sc->sc_wrotebytes += bp->bio_length;
394 g_cache_invalidate(sc, bp);
395 break;
396 }
397 cbp = g_clone_bio(bp);
398 if (cbp == NULL) {
399 g_io_deliver(bp, ENOMEM);
400 return;
401 }
402 cbp->bio_done = g_std_done;
403 G_CACHE_LOGREQ(cbp, "Sending request.");
404 g_io_request(cbp, LIST_FIRST(&gp->consumer));
405 }
406
407 static void
408 g_cache_go(void *arg)
409 {
410 struct g_cache_softc *sc = arg;
411 struct g_cache_desc *dp;
412 int i;
413
414 mtx_assert(&sc->sc_mtx, MA_OWNED);
415
416 /* Forcibly mark idle ready entries as used. */
417 for (i = 0; i < G_CACHE_BUCKETS; i++) {
418 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) {
419 if (dp->d_flags & D_FLAG_USED ||
420 dp->d_biolist != NULL ||
421 time_uptime - dp->d_atime < g_cache_idletime)
422 continue;
423 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used);
424 sc->sc_nused++;
425 dp->d_flags |= D_FLAG_USED;
426 }
427 }
428
429 /* Keep the number of used entries low. */
430 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100)
431 g_cache_free_used(sc);
432
433 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
434 }
435
436 static int
437 g_cache_access(struct g_provider *pp, int dr, int dw, int de)
438 {
439 struct g_geom *gp;
440 struct g_consumer *cp;
441 int error;
442
443 gp = pp->geom;
444 cp = LIST_FIRST(&gp->consumer);
445 error = g_access(cp, dr, dw, de);
446
447 return (error);
448 }
449
450 static void
451 g_cache_orphan(struct g_consumer *cp)
452 {
453
454 g_topology_assert();
455 g_cache_destroy(cp->geom->softc, 1);
456 }
457
458 static struct g_cache_softc *
459 g_cache_find_device(struct g_class *mp, const char *name)
460 {
461 struct g_geom *gp;
462
463 LIST_FOREACH(gp, &mp->geom, geom) {
464 if (strcmp(gp->name, name) == 0)
465 return (gp->softc);
466 }
467 return (NULL);
468 }
469
470 static struct g_geom *
471 g_cache_create(struct g_class *mp, struct g_provider *pp,
472 const struct g_cache_metadata *md, u_int type)
473 {
474 struct g_cache_softc *sc;
475 struct g_geom *gp;
476 struct g_provider *newpp;
477 struct g_consumer *cp;
478 u_int bshift;
479 int i;
480
481 g_topology_assert();
482
483 gp = NULL;
484 newpp = NULL;
485 cp = NULL;
486
487 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name);
488
489 /* Cache size is minimum 100. */
490 if (md->md_size < 100) {
491 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name);
492 return (NULL);
493 }
494
495 /* Block size restrictions. */
496 bshift = ffs(md->md_bsize) - 1;
497 if (md->md_bsize == 0 || md->md_bsize > maxphys ||
498 md->md_bsize != 1 << bshift ||
499 (md->md_bsize % pp->sectorsize) != 0) {
500 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name);
501 return (NULL);
502 }
503
504 /* Check for duplicate unit. */
505 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) {
506 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name);
507 return (NULL);
508 }
509
510 gp = g_new_geomf(mp, "%s", md->md_name);
511 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
512 sc->sc_type = type;
513 sc->sc_bshift = bshift;
514 sc->sc_bsize = 1 << bshift;
515 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL,
516 UMA_ALIGN_PTR, 0);
517 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF);
518 for (i = 0; i < G_CACHE_BUCKETS; i++)
519 LIST_INIT(&sc->sc_desclist[i]);
520 TAILQ_INIT(&sc->sc_usedlist);
521 sc->sc_maxent = md->md_size;
522 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0);
523 gp->softc = sc;
524 sc->sc_geom = gp;
525 gp->start = g_cache_start;
526 gp->orphan = g_cache_orphan;
527 gp->access = g_cache_access;
528 gp->dumpconf = g_cache_dumpconf;
529
530 newpp = g_new_providerf(gp, "cache/%s", gp->name);
531 newpp->sectorsize = pp->sectorsize;
532 newpp->mediasize = pp->mediasize;
533 if (type == G_CACHE_TYPE_AUTOMATIC)
534 newpp->mediasize -= pp->sectorsize;
535 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc);
536
537 cp = g_new_consumer(gp);
538 if (g_attach(cp, pp) != 0) {
539 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name);
540 g_destroy_consumer(cp);
541 g_destroy_provider(newpp);
542 mtx_destroy(&sc->sc_mtx);
543 g_free(sc);
544 g_destroy_geom(gp);
545 return (NULL);
546 }
547
548 g_error_provider(newpp, 0);
549 G_CACHE_DEBUG(0, "Device %s created.", gp->name);
550 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc);
551 return (gp);
552 }
553
554 static int
555 g_cache_destroy(struct g_cache_softc *sc, boolean_t force)
556 {
557 struct g_geom *gp;
558 struct g_provider *pp;
559 struct g_cache_desc *dp, *dp2;
560 int i;
561
562 g_topology_assert();
563 if (sc == NULL)
564 return (ENXIO);
565 gp = sc->sc_geom;
566 pp = LIST_FIRST(&gp->provider);
567 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
568 if (force) {
569 G_CACHE_DEBUG(0, "Device %s is still open, so it "
570 "can't be definitely removed.", pp->name);
571 } else {
572 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).",
573 pp->name, pp->acr, pp->acw, pp->ace);
574 return (EBUSY);
575 }
576 } else {
577 G_CACHE_DEBUG(0, "Device %s removed.", gp->name);
578 }
579 callout_drain(&sc->sc_callout);
580 mtx_lock(&sc->sc_mtx);
581 for (i = 0; i < G_CACHE_BUCKETS; i++) {
582 dp = LIST_FIRST(&sc->sc_desclist[i]);
583 while (dp != NULL) {
584 dp2 = LIST_NEXT(dp, d_next);
585 g_cache_free(sc, dp);
586 dp = dp2;
587 }
588 }
589 mtx_unlock(&sc->sc_mtx);
590 mtx_destroy(&sc->sc_mtx);
591 uma_zdestroy(sc->sc_zone);
592 g_free(sc);
593 gp->softc = NULL;
594 g_wither_geom(gp, ENXIO);
595
596 return (0);
597 }
598
599 static int
600 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
601 {
602
603 return (g_cache_destroy(gp->softc, 0));
604 }
605
606 static int
607 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
608 {
609 struct g_provider *pp;
610 u_char *buf;
611 int error;
612
613 g_topology_assert();
614
615 error = g_access(cp, 1, 0, 0);
616 if (error != 0)
617 return (error);
618 pp = cp->provider;
619 g_topology_unlock();
620 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
621 &error);
622 g_topology_lock();
623 g_access(cp, -1, 0, 0);
624 if (buf == NULL)
625 return (error);
626
627 /* Decode metadata. */
628 cache_metadata_decode(buf, md);
629 g_free(buf);
630
631 return (0);
632 }
633
634 static int
635 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md)
636 {
637 struct g_provider *pp;
638 u_char *buf;
639 int error;
640
641 g_topology_assert();
642
643 error = g_access(cp, 0, 1, 0);
644 if (error != 0)
645 return (error);
646 pp = cp->provider;
647 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO);
648 cache_metadata_encode(md, buf);
649 g_topology_unlock();
650 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize);
651 g_topology_lock();
652 g_access(cp, 0, -1, 0);
653 free(buf, M_GCACHE);
654
655 return (error);
656 }
657
658 static struct g_geom *
659 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
660 {
661 struct g_cache_metadata md;
662 struct g_consumer *cp;
663 struct g_geom *gp;
664 int error;
665
666 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
667 g_topology_assert();
668
669 G_CACHE_DEBUG(3, "Tasting %s.", pp->name);
670
671 gp = g_new_geomf(mp, "cache:taste");
672 gp->start = g_cache_start;
673 gp->orphan = g_cache_orphan;
674 gp->access = g_cache_access;
675 cp = g_new_consumer(gp);
676 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
677 error = g_attach(cp, pp);
678 if (error == 0) {
679 error = g_cache_read_metadata(cp, &md);
680 g_detach(cp);
681 }
682 g_destroy_consumer(cp);
683 g_destroy_geom(gp);
684 if (error != 0)
685 return (NULL);
686
687 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0)
688 return (NULL);
689 if (md.md_version > G_CACHE_VERSION) {
690 printf("geom_cache.ko module is too old to handle %s.\n",
691 pp->name);
692 return (NULL);
693 }
694 if (md.md_provsize != pp->mediasize)
695 return (NULL);
696
697 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC);
698 if (gp == NULL) {
699 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name);
700 return (NULL);
701 }
702 return (gp);
703 }
704
705 static void
706 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp)
707 {
708 struct g_cache_metadata md;
709 struct g_provider *pp;
710 struct g_geom *gp;
711 intmax_t *bsize, *size;
712 const char *name;
713 int *nargs;
714
715 g_topology_assert();
716
717 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
718 if (nargs == NULL) {
719 gctl_error(req, "No '%s' argument", "nargs");
720 return;
721 }
722 if (*nargs != 2) {
723 gctl_error(req, "Invalid number of arguments.");
724 return;
725 }
726
727 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
728 md.md_version = G_CACHE_VERSION;
729 name = gctl_get_asciiparam(req, "arg0");
730 if (name == NULL) {
731 gctl_error(req, "No 'arg0' argument");
732 return;
733 }
734 strlcpy(md.md_name, name, sizeof(md.md_name));
735
736 size = gctl_get_paraml(req, "size", sizeof(*size));
737 if (size == NULL) {
738 gctl_error(req, "No '%s' argument", "size");
739 return;
740 }
741 if ((u_int)*size < 100) {
742 gctl_error(req, "Invalid '%s' argument", "size");
743 return;
744 }
745 md.md_size = (u_int)*size;
746
747 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
748 if (bsize == NULL) {
749 gctl_error(req, "No '%s' argument", "blocksize");
750 return;
751 }
752 if (*bsize < 0) {
753 gctl_error(req, "Invalid '%s' argument", "blocksize");
754 return;
755 }
756 md.md_bsize = (u_int)*bsize;
757
758 /* This field is not important here. */
759 md.md_provsize = 0;
760
761 pp = gctl_get_provider(req, "arg1");
762 if (pp == NULL)
763 return;
764 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL);
765 if (gp == NULL) {
766 gctl_error(req, "Can't create %s.", md.md_name);
767 return;
768 }
769 }
770
771 static void
772 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp)
773 {
774 struct g_cache_metadata md;
775 struct g_cache_softc *sc;
776 struct g_consumer *cp;
777 intmax_t *bsize, *size;
778 const char *name;
779 int error, *nargs;
780
781 g_topology_assert();
782
783 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
784 if (nargs == NULL) {
785 gctl_error(req, "No '%s' argument", "nargs");
786 return;
787 }
788 if (*nargs != 1) {
789 gctl_error(req, "Missing device.");
790 return;
791 }
792
793 name = gctl_get_asciiparam(req, "arg0");
794 if (name == NULL) {
795 gctl_error(req, "No 'arg0' argument");
796 return;
797 }
798 sc = g_cache_find_device(mp, name);
799 if (sc == NULL) {
800 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
801 gctl_error(req, "Device %s is invalid.", name);
802 return;
803 }
804
805 size = gctl_get_paraml(req, "size", sizeof(*size));
806 if (size == NULL) {
807 gctl_error(req, "No '%s' argument", "size");
808 return;
809 }
810 if ((u_int)*size != 0 && (u_int)*size < 100) {
811 gctl_error(req, "Invalid '%s' argument", "size");
812 return;
813 }
814 if ((u_int)*size != 0)
815 sc->sc_maxent = (u_int)*size;
816
817 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize));
818 if (bsize == NULL) {
819 gctl_error(req, "No '%s' argument", "blocksize");
820 return;
821 }
822 if (*bsize < 0) {
823 gctl_error(req, "Invalid '%s' argument", "blocksize");
824 return;
825 }
826
827 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC)
828 return;
829
830 strlcpy(md.md_name, name, sizeof(md.md_name));
831 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic));
832 md.md_version = G_CACHE_VERSION;
833 if ((u_int)*size != 0)
834 md.md_size = (u_int)*size;
835 else
836 md.md_size = sc->sc_maxent;
837 if ((u_int)*bsize != 0)
838 md.md_bsize = (u_int)*bsize;
839 else
840 md.md_bsize = sc->sc_bsize;
841 cp = LIST_FIRST(&sc->sc_geom->consumer);
842 md.md_provsize = cp->provider->mediasize;
843 error = g_cache_write_metadata(cp, &md);
844 if (error == 0)
845 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name);
846 else
847 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).",
848 cp->provider->name, error);
849 }
850
851 static void
852 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp)
853 {
854 int *nargs, *force, error, i;
855 struct g_cache_softc *sc;
856 const char *name;
857 char param[16];
858
859 g_topology_assert();
860
861 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
862 if (nargs == NULL) {
863 gctl_error(req, "No '%s' argument", "nargs");
864 return;
865 }
866 if (*nargs <= 0) {
867 gctl_error(req, "Missing device(s).");
868 return;
869 }
870 force = gctl_get_paraml(req, "force", sizeof(*force));
871 if (force == NULL) {
872 gctl_error(req, "No 'force' argument");
873 return;
874 }
875
876 for (i = 0; i < *nargs; i++) {
877 snprintf(param, sizeof(param), "arg%d", i);
878 name = gctl_get_asciiparam(req, param);
879 if (name == NULL) {
880 gctl_error(req, "No 'arg%d' argument", i);
881 return;
882 }
883 sc = g_cache_find_device(mp, name);
884 if (sc == NULL) {
885 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
886 gctl_error(req, "Device %s is invalid.", name);
887 return;
888 }
889 error = g_cache_destroy(sc, *force);
890 if (error != 0) {
891 gctl_error(req, "Cannot destroy device %s (error=%d).",
892 sc->sc_name, error);
893 return;
894 }
895 }
896 }
897
898 static void
899 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp)
900 {
901 struct g_cache_softc *sc;
902 const char *name;
903 char param[16];
904 int i, *nargs;
905
906 g_topology_assert();
907
908 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
909 if (nargs == NULL) {
910 gctl_error(req, "No '%s' argument", "nargs");
911 return;
912 }
913 if (*nargs <= 0) {
914 gctl_error(req, "Missing device(s).");
915 return;
916 }
917
918 for (i = 0; i < *nargs; i++) {
919 snprintf(param, sizeof(param), "arg%d", i);
920 name = gctl_get_asciiparam(req, param);
921 if (name == NULL) {
922 gctl_error(req, "No 'arg%d' argument", i);
923 return;
924 }
925 sc = g_cache_find_device(mp, name);
926 if (sc == NULL) {
927 G_CACHE_DEBUG(1, "Device %s is invalid.", name);
928 gctl_error(req, "Device %s is invalid.", name);
929 return;
930 }
931 sc->sc_reads = 0;
932 sc->sc_readbytes = 0;
933 sc->sc_cachereads = 0;
934 sc->sc_cachereadbytes = 0;
935 sc->sc_cachehits = 0;
936 sc->sc_cachemisses = 0;
937 sc->sc_cachefull = 0;
938 sc->sc_writes = 0;
939 sc->sc_wrotebytes = 0;
940 }
941 }
942
943 static void
944 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb)
945 {
946 uint32_t *version;
947
948 g_topology_assert();
949
950 version = gctl_get_paraml(req, "version", sizeof(*version));
951 if (version == NULL) {
952 gctl_error(req, "No '%s' argument.", "version");
953 return;
954 }
955 if (*version != G_CACHE_VERSION) {
956 gctl_error(req, "Userland and kernel parts are out of sync.");
957 return;
958 }
959
960 if (strcmp(verb, "create") == 0) {
961 g_cache_ctl_create(req, mp);
962 return;
963 } else if (strcmp(verb, "configure") == 0) {
964 g_cache_ctl_configure(req, mp);
965 return;
966 } else if (strcmp(verb, "destroy") == 0 ||
967 strcmp(verb, "stop") == 0) {
968 g_cache_ctl_destroy(req, mp);
969 return;
970 } else if (strcmp(verb, "reset") == 0) {
971 g_cache_ctl_reset(req, mp);
972 return;
973 }
974
975 gctl_error(req, "Unknown verb.");
976 }
977
978 static void
979 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
980 struct g_consumer *cp, struct g_provider *pp)
981 {
982 struct g_cache_softc *sc;
983
984 if (pp != NULL || cp != NULL)
985 return;
986 sc = gp->softc;
987 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent);
988 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize);
989 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent,
990 (uintmax_t)sc->sc_tail);
991 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent);
992 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent,
993 sc->sc_nused);
994 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent,
995 sc->sc_invalid);
996 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads);
997 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent,
998 sc->sc_readbytes);
999 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent,
1000 sc->sc_cachereads);
1001 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent,
1002 sc->sc_cachereadbytes);
1003 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent,
1004 sc->sc_cachehits);
1005 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent,
1006 sc->sc_cachemisses);
1007 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent,
1008 sc->sc_cachefull);
1009 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes);
1010 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent,
1011 sc->sc_wrotebytes);
1012 }
1013
1014 DECLARE_GEOM_CLASS(g_cache_class, g_cache);
1015 MODULE_VERSION(geom_cache, 0);
Cache object: 0887fec7839d17ef9136b46cc6b9eee4
|