FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Copyright (c) 2009-2010 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Pawel Jakub Dawidek
9 * under sponsorship from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/conf.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/fcntl.h>
43 #include <sys/linker.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/queue.h>
50 #include <sys/sbuf.h>
51 #include <sys/sysctl.h>
52 #include <sys/signalvar.h>
53 #include <sys/time.h>
54 #include <machine/atomic.h>
55
56 #include <geom/geom.h>
57 #include <geom/gate/g_gate.h>
58
59 FEATURE(geom_gate, "GEOM Gate module");
60
61 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
62
63 SYSCTL_DECL(_kern_geom);
64 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
65 "GEOM_GATE configuration");
66 static int g_gate_debug = 0;
67 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
68 "Debug level");
69 static u_int g_gate_maxunits = 256;
70 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
71 &g_gate_maxunits, 0, "Maximum number of ggate devices");
72
73 struct g_class g_gate_class = {
74 .name = G_GATE_CLASS_NAME,
75 .version = G_VERSION,
76 };
77
78 static struct cdev *status_dev;
79 static d_ioctl_t g_gate_ioctl;
80 static struct cdevsw g_gate_cdevsw = {
81 .d_version = D_VERSION,
82 .d_ioctl = g_gate_ioctl,
83 .d_name = G_GATE_CTL_NAME
84 };
85
86
87 static struct g_gate_softc **g_gate_units;
88 static u_int g_gate_nunits;
89 static struct mtx g_gate_units_lock;
90
91 static void
92 g_gate_detach(void *arg, int flags __unused)
93 {
94 struct g_consumer *cp = arg;
95
96 g_topology_assert();
97 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
98 cp->provider->name);
99 (void)g_access(cp, -1, 0, 0);
100 g_detach(cp);
101 g_destroy_consumer(cp);
102 }
103
104 static int
105 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
106 {
107 struct bio_queue_head queue;
108 struct g_provider *pp;
109 struct g_consumer *cp;
110 struct g_geom *gp;
111 struct bio *bp;
112
113 g_topology_assert();
114 mtx_assert(&g_gate_units_lock, MA_OWNED);
115 pp = sc->sc_provider;
116 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
117 mtx_unlock(&g_gate_units_lock);
118 return (EBUSY);
119 }
120 mtx_unlock(&g_gate_units_lock);
121 mtx_lock(&sc->sc_queue_mtx);
122 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
123 sc->sc_flags |= G_GATE_FLAG_DESTROY;
124 wakeup(sc);
125 mtx_unlock(&sc->sc_queue_mtx);
126 gp = pp->geom;
127 g_wither_provider(pp, ENXIO);
128 callout_drain(&sc->sc_callout);
129 bioq_init(&queue);
130 mtx_lock(&sc->sc_queue_mtx);
131 while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
132 sc->sc_queue_count--;
133 bioq_insert_tail(&queue, bp);
134 }
135 while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
136 sc->sc_queue_count--;
137 bioq_insert_tail(&queue, bp);
138 }
139 mtx_unlock(&sc->sc_queue_mtx);
140 g_topology_unlock();
141 while ((bp = bioq_takefirst(&queue)) != NULL) {
142 G_GATE_LOGREQ(1, bp, "Request canceled.");
143 g_io_deliver(bp, ENXIO);
144 }
145 mtx_lock(&g_gate_units_lock);
146 /* One reference is ours. */
147 sc->sc_ref--;
148 while (sc->sc_ref > 0)
149 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
150 g_gate_units[sc->sc_unit] = NULL;
151 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
152 g_gate_nunits--;
153 mtx_unlock(&g_gate_units_lock);
154 mtx_destroy(&sc->sc_queue_mtx);
155 mtx_destroy(&sc->sc_read_mtx);
156 g_topology_lock();
157 if ((cp = sc->sc_readcons) != NULL) {
158 sc->sc_readcons = NULL;
159 (void)g_access(cp, -1, 0, 0);
160 g_detach(cp);
161 g_destroy_consumer(cp);
162 }
163 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
164 gp->softc = NULL;
165 g_wither_geom(gp, ENXIO);
166 sc->sc_provider = NULL;
167 free(sc, M_GATE);
168 return (0);
169 }
170
171 static int
172 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
173 {
174 struct g_gate_softc *sc;
175
176 if (dr <= 0 && dw <= 0 && de <= 0)
177 return (0);
178 sc = pp->geom->softc;
179 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
180 return (ENXIO);
181 /* XXX: Hack to allow read-only mounts. */
182 #if 0
183 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
184 return (EPERM);
185 #endif
186 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
187 return (EPERM);
188 return (0);
189 }
190
191 static void
192 g_gate_queue_io(struct bio *bp)
193 {
194 struct g_gate_softc *sc;
195
196 sc = bp->bio_to->geom->softc;
197 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
198 g_io_deliver(bp, ENXIO);
199 return;
200 }
201
202 mtx_lock(&sc->sc_queue_mtx);
203
204 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
205 mtx_unlock(&sc->sc_queue_mtx);
206 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
207 g_io_deliver(bp, ENOMEM);
208 return;
209 }
210
211 bp->bio_driver1 = (void *)sc->sc_seq;
212 sc->sc_seq++;
213 sc->sc_queue_count++;
214
215 bioq_insert_tail(&sc->sc_inqueue, bp);
216 wakeup(sc);
217
218 mtx_unlock(&sc->sc_queue_mtx);
219 }
220
221 static void
222 g_gate_done(struct bio *cbp)
223 {
224 struct g_gate_softc *sc;
225 struct bio *pbp;
226 struct g_consumer *cp;
227
228 cp = cbp->bio_from;
229 pbp = cbp->bio_parent;
230 if (cbp->bio_error == 0) {
231 pbp->bio_completed = cbp->bio_completed;
232 g_destroy_bio(cbp);
233 pbp->bio_inbed++;
234 g_io_deliver(pbp, 0);
235 } else {
236 /* If direct read failed, pass it through userland daemon. */
237 g_destroy_bio(cbp);
238 pbp->bio_children--;
239 g_gate_queue_io(pbp);
240 }
241
242 sc = cp->geom->softc;
243 mtx_lock(&sc->sc_read_mtx);
244 if (--cp->index == 0 && sc->sc_readcons != cp)
245 g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
246 mtx_unlock(&sc->sc_read_mtx);
247 }
248
249 static void
250 g_gate_start(struct bio *pbp)
251 {
252 struct g_gate_softc *sc;
253 struct g_consumer *cp;
254 struct bio *cbp;
255
256 sc = pbp->bio_to->geom->softc;
257 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
258 g_io_deliver(pbp, ENXIO);
259 return;
260 }
261 G_GATE_LOGREQ(2, pbp, "Request received.");
262 switch (pbp->bio_cmd) {
263 case BIO_READ:
264 if (sc->sc_readcons == NULL)
265 break;
266 cbp = g_clone_bio(pbp);
267 if (cbp == NULL) {
268 g_io_deliver(pbp, ENOMEM);
269 return;
270 }
271 mtx_lock(&sc->sc_read_mtx);
272 if ((cp = sc->sc_readcons) == NULL) {
273 mtx_unlock(&sc->sc_read_mtx);
274 g_destroy_bio(cbp);
275 pbp->bio_children--;
276 break;
277 }
278 cp->index++;
279 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
280 mtx_unlock(&sc->sc_read_mtx);
281 cbp->bio_done = g_gate_done;
282 g_io_request(cbp, cp);
283 return;
284 case BIO_DELETE:
285 case BIO_WRITE:
286 case BIO_FLUSH:
287 /* XXX: Hack to allow read-only mounts. */
288 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
289 g_io_deliver(pbp, EPERM);
290 return;
291 }
292 break;
293 case BIO_GETATTR:
294 default:
295 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
296 g_io_deliver(pbp, EOPNOTSUPP);
297 return;
298 }
299
300 g_gate_queue_io(pbp);
301 }
302
303 static struct g_gate_softc *
304 g_gate_hold(int unit, const char *name)
305 {
306 struct g_gate_softc *sc = NULL;
307
308 mtx_lock(&g_gate_units_lock);
309 if (unit >= 0 && unit < g_gate_maxunits)
310 sc = g_gate_units[unit];
311 else if (unit == G_GATE_NAME_GIVEN) {
312 KASSERT(name != NULL, ("name is NULL"));
313 for (unit = 0; unit < g_gate_maxunits; unit++) {
314 if (g_gate_units[unit] == NULL)
315 continue;
316 if (strcmp(name,
317 g_gate_units[unit]->sc_provider->name) != 0) {
318 continue;
319 }
320 sc = g_gate_units[unit];
321 break;
322 }
323 }
324 if (sc != NULL)
325 sc->sc_ref++;
326 mtx_unlock(&g_gate_units_lock);
327 return (sc);
328 }
329
330 static void
331 g_gate_release(struct g_gate_softc *sc)
332 {
333
334 g_topology_assert_not();
335 mtx_lock(&g_gate_units_lock);
336 sc->sc_ref--;
337 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
338 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
339 wakeup(&sc->sc_ref);
340 mtx_unlock(&g_gate_units_lock);
341 }
342
343 static int
344 g_gate_getunit(int unit, int *errorp)
345 {
346
347 mtx_assert(&g_gate_units_lock, MA_OWNED);
348 if (unit >= 0) {
349 if (unit >= g_gate_maxunits)
350 *errorp = EINVAL;
351 else if (g_gate_units[unit] == NULL)
352 return (unit);
353 else
354 *errorp = EEXIST;
355 } else {
356 for (unit = 0; unit < g_gate_maxunits; unit++) {
357 if (g_gate_units[unit] == NULL)
358 return (unit);
359 }
360 *errorp = ENFILE;
361 }
362 return (-1);
363 }
364
365 static void
366 g_gate_guard(void *arg)
367 {
368 struct bio_queue_head queue;
369 struct g_gate_softc *sc;
370 struct bintime curtime;
371 struct bio *bp, *bp2;
372
373 sc = arg;
374 binuptime(&curtime);
375 g_gate_hold(sc->sc_unit, NULL);
376 bioq_init(&queue);
377 mtx_lock(&sc->sc_queue_mtx);
378 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
379 if (curtime.sec - bp->bio_t0.sec < 5)
380 continue;
381 bioq_remove(&sc->sc_inqueue, bp);
382 sc->sc_queue_count--;
383 bioq_insert_tail(&queue, bp);
384 }
385 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
386 if (curtime.sec - bp->bio_t0.sec < 5)
387 continue;
388 bioq_remove(&sc->sc_outqueue, bp);
389 sc->sc_queue_count--;
390 bioq_insert_tail(&queue, bp);
391 }
392 mtx_unlock(&sc->sc_queue_mtx);
393 while ((bp = bioq_takefirst(&queue)) != NULL) {
394 G_GATE_LOGREQ(1, bp, "Request timeout.");
395 g_io_deliver(bp, EIO);
396 }
397 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
398 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
399 g_gate_guard, sc);
400 }
401 g_gate_release(sc);
402 }
403
404 static void
405 g_gate_orphan(struct g_consumer *cp)
406 {
407 struct g_gate_softc *sc;
408 struct g_geom *gp;
409 int done;
410
411 g_topology_assert();
412 gp = cp->geom;
413 sc = gp->softc;
414 mtx_lock(&sc->sc_read_mtx);
415 if (sc->sc_readcons == cp)
416 sc->sc_readcons = NULL;
417 done = (cp->index == 0);
418 mtx_unlock(&sc->sc_read_mtx);
419 if (done)
420 g_gate_detach(cp, 0);
421 }
422
423 static void
424 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
425 struct g_consumer *cp, struct g_provider *pp)
426 {
427 struct g_gate_softc *sc;
428
429 sc = gp->softc;
430 if (sc == NULL || pp != NULL || cp != NULL)
431 return;
432 sc = g_gate_hold(sc->sc_unit, NULL);
433 if (sc == NULL)
434 return;
435 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
436 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
437 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
438 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
439 "write-only");
440 } else {
441 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
442 "read-write");
443 }
444 if (sc->sc_readcons != NULL) {
445 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
446 indent, (intmax_t)sc->sc_readoffset);
447 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
448 indent, sc->sc_readcons->provider->name);
449 }
450 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
451 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
452 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
453 sc->sc_queue_count);
454 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
455 sc->sc_queue_size);
456 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
457 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
458 g_topology_unlock();
459 g_gate_release(sc);
460 g_topology_lock();
461 }
462
463 static int
464 g_gate_create(struct g_gate_ctl_create *ggio)
465 {
466 struct g_gate_softc *sc;
467 struct g_geom *gp;
468 struct g_provider *pp, *ropp;
469 struct g_consumer *cp;
470 char name[NAME_MAX];
471 int error = 0, unit;
472
473 if (ggio->gctl_mediasize <= 0) {
474 G_GATE_DEBUG(1, "Invalid media size.");
475 return (EINVAL);
476 }
477 if (ggio->gctl_sectorsize <= 0) {
478 G_GATE_DEBUG(1, "Invalid sector size.");
479 return (EINVAL);
480 }
481 if (!powerof2(ggio->gctl_sectorsize)) {
482 G_GATE_DEBUG(1, "Invalid sector size.");
483 return (EINVAL);
484 }
485 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
486 G_GATE_DEBUG(1, "Invalid media size.");
487 return (EINVAL);
488 }
489 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
490 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
491 G_GATE_DEBUG(1, "Invalid flags.");
492 return (EINVAL);
493 }
494 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
495 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
496 ggio->gctl_unit < 0) {
497 G_GATE_DEBUG(1, "Invalid unit number.");
498 return (EINVAL);
499 }
500 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
501 ggio->gctl_name[0] == '\0') {
502 G_GATE_DEBUG(1, "No device name.");
503 return (EINVAL);
504 }
505
506 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
507 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
508 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
509 sc->sc_seq = 1;
510 bioq_init(&sc->sc_inqueue);
511 bioq_init(&sc->sc_outqueue);
512 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
513 mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
514 sc->sc_queue_count = 0;
515 sc->sc_queue_size = ggio->gctl_maxcount;
516 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
517 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
518 sc->sc_timeout = ggio->gctl_timeout;
519 callout_init(&sc->sc_callout, 1);
520
521 mtx_lock(&g_gate_units_lock);
522 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
523 if (sc->sc_unit < 0)
524 goto fail1;
525 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
526 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
527 else {
528 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
529 sc->sc_unit);
530 }
531 /* Check for name collision. */
532 for (unit = 0; unit < g_gate_maxunits; unit++) {
533 if (g_gate_units[unit] == NULL)
534 continue;
535 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
536 continue;
537 error = EEXIST;
538 goto fail1;
539 }
540 sc->sc_name = name;
541 g_gate_units[sc->sc_unit] = sc;
542 g_gate_nunits++;
543 mtx_unlock(&g_gate_units_lock);
544
545 g_topology_lock();
546
547 if (ggio->gctl_readprov[0] == '\0') {
548 ropp = NULL;
549 } else {
550 ropp = g_provider_by_name(ggio->gctl_readprov);
551 if (ropp == NULL) {
552 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
553 ggio->gctl_readprov);
554 error = EINVAL;
555 goto fail2;
556 }
557 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
558 G_GATE_DEBUG(1, "Invalid read offset.");
559 error = EINVAL;
560 goto fail2;
561 }
562 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
563 ropp->mediasize) {
564 G_GATE_DEBUG(1, "Invalid read offset or media size.");
565 error = EINVAL;
566 goto fail2;
567 }
568 }
569
570 gp = g_new_geomf(&g_gate_class, "%s", name);
571 gp->start = g_gate_start;
572 gp->access = g_gate_access;
573 gp->orphan = g_gate_orphan;
574 gp->dumpconf = g_gate_dumpconf;
575 gp->softc = sc;
576
577 if (ropp != NULL) {
578 cp = g_new_consumer(gp);
579 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
580 error = g_attach(cp, ropp);
581 if (error != 0) {
582 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
583 goto fail3;
584 }
585 error = g_access(cp, 1, 0, 0);
586 if (error != 0) {
587 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
588 g_detach(cp);
589 goto fail3;
590 }
591 sc->sc_readcons = cp;
592 sc->sc_readoffset = ggio->gctl_readoffset;
593 }
594
595 ggio->gctl_unit = sc->sc_unit;
596
597 pp = g_new_providerf(gp, "%s", name);
598 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
599 pp->mediasize = ggio->gctl_mediasize;
600 pp->sectorsize = ggio->gctl_sectorsize;
601 sc->sc_provider = pp;
602 g_error_provider(pp, 0);
603
604 g_topology_unlock();
605 mtx_lock(&g_gate_units_lock);
606 sc->sc_name = sc->sc_provider->name;
607 mtx_unlock(&g_gate_units_lock);
608 G_GATE_DEBUG(1, "Device %s created.", gp->name);
609
610 if (sc->sc_timeout > 0) {
611 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
612 g_gate_guard, sc);
613 }
614 return (0);
615 fail3:
616 g_destroy_consumer(cp);
617 g_destroy_geom(gp);
618 fail2:
619 g_topology_unlock();
620 mtx_lock(&g_gate_units_lock);
621 g_gate_units[sc->sc_unit] = NULL;
622 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
623 g_gate_nunits--;
624 fail1:
625 mtx_unlock(&g_gate_units_lock);
626 mtx_destroy(&sc->sc_queue_mtx);
627 mtx_destroy(&sc->sc_read_mtx);
628 free(sc, M_GATE);
629 return (error);
630 }
631
632 static int
633 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
634 {
635 struct g_provider *pp;
636 struct g_consumer *cp;
637 int done, error;
638
639 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
640 if (ggio->gctl_mediasize <= 0) {
641 G_GATE_DEBUG(1, "Invalid media size.");
642 return (EINVAL);
643 }
644 pp = sc->sc_provider;
645 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
646 G_GATE_DEBUG(1, "Invalid media size.");
647 return (EINVAL);
648 }
649 g_resize_provider(pp, ggio->gctl_mediasize);
650 return (0);
651 }
652
653 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
654 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
655
656 cp = NULL;
657
658 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
659 g_topology_lock();
660 mtx_lock(&sc->sc_read_mtx);
661 if ((cp = sc->sc_readcons) != NULL) {
662 sc->sc_readcons = NULL;
663 done = (cp->index == 0);
664 mtx_unlock(&sc->sc_read_mtx);
665 if (done)
666 g_gate_detach(cp, 0);
667 } else
668 mtx_unlock(&sc->sc_read_mtx);
669 if (ggio->gctl_readprov[0] != '\0') {
670 pp = g_provider_by_name(ggio->gctl_readprov);
671 if (pp == NULL) {
672 g_topology_unlock();
673 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
674 ggio->gctl_readprov);
675 return (EINVAL);
676 }
677 cp = g_new_consumer(sc->sc_provider->geom);
678 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
679 error = g_attach(cp, pp);
680 if (error != 0) {
681 G_GATE_DEBUG(1, "Unable to attach to %s.",
682 pp->name);
683 } else {
684 error = g_access(cp, 1, 0, 0);
685 if (error != 0) {
686 G_GATE_DEBUG(1, "Unable to access %s.",
687 pp->name);
688 g_detach(cp);
689 }
690 }
691 if (error != 0) {
692 g_destroy_consumer(cp);
693 g_topology_unlock();
694 return (error);
695 }
696 }
697 } else {
698 cp = sc->sc_readcons;
699 }
700
701 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
702 if (cp == NULL) {
703 G_GATE_DEBUG(1, "No read provider.");
704 return (EINVAL);
705 }
706 pp = sc->sc_provider;
707 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
708 G_GATE_DEBUG(1, "Invalid read offset.");
709 return (EINVAL);
710 }
711 if (pp->mediasize + ggio->gctl_readoffset >
712 cp->provider->mediasize) {
713 G_GATE_DEBUG(1, "Invalid read offset or media size.");
714 return (EINVAL);
715 }
716 sc->sc_readoffset = ggio->gctl_readoffset;
717 }
718
719 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
720 sc->sc_readcons = cp;
721 g_topology_unlock();
722 }
723
724 return (0);
725 }
726
727 #define G_GATE_CHECK_VERSION(ggio) do { \
728 if ((ggio)->gctl_version != G_GATE_VERSION) { \
729 printf("Version mismatch %d != %d.\n", \
730 ggio->gctl_version, G_GATE_VERSION); \
731 return (EINVAL); \
732 } \
733 } while (0)
734 static int
735 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
736 {
737 struct g_gate_softc *sc;
738 struct bio *bp;
739 int error = 0;
740
741 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
742 flags, td);
743
744 switch (cmd) {
745 case G_GATE_CMD_CREATE:
746 {
747 struct g_gate_ctl_create *ggio = (void *)addr;
748
749 G_GATE_CHECK_VERSION(ggio);
750 error = g_gate_create(ggio);
751 /*
752 * Reset TDP_GEOM flag.
753 * There are pending events for sure, because we just created
754 * new provider and other classes want to taste it, but we
755 * cannot answer on I/O requests until we're here.
756 */
757 td->td_pflags &= ~TDP_GEOM;
758 return (error);
759 }
760 case G_GATE_CMD_MODIFY:
761 {
762 struct g_gate_ctl_modify *ggio = (void *)addr;
763
764 G_GATE_CHECK_VERSION(ggio);
765 sc = g_gate_hold(ggio->gctl_unit, NULL);
766 if (sc == NULL)
767 return (ENXIO);
768 error = g_gate_modify(sc, ggio);
769 g_gate_release(sc);
770 return (error);
771 }
772 case G_GATE_CMD_DESTROY:
773 {
774 struct g_gate_ctl_destroy *ggio = (void *)addr;
775
776 G_GATE_CHECK_VERSION(ggio);
777 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
778 if (sc == NULL)
779 return (ENXIO);
780 g_topology_lock();
781 mtx_lock(&g_gate_units_lock);
782 error = g_gate_destroy(sc, ggio->gctl_force);
783 g_topology_unlock();
784 if (error != 0)
785 g_gate_release(sc);
786 return (error);
787 }
788 case G_GATE_CMD_CANCEL:
789 {
790 struct g_gate_ctl_cancel *ggio = (void *)addr;
791 struct bio *tbp, *lbp;
792
793 G_GATE_CHECK_VERSION(ggio);
794 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
795 if (sc == NULL)
796 return (ENXIO);
797 lbp = NULL;
798 mtx_lock(&sc->sc_queue_mtx);
799 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
800 if (ggio->gctl_seq == 0 ||
801 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
802 G_GATE_LOGREQ(1, bp, "Request canceled.");
803 bioq_remove(&sc->sc_outqueue, bp);
804 /*
805 * Be sure to put requests back onto incoming
806 * queue in the proper order.
807 */
808 if (lbp == NULL)
809 bioq_insert_head(&sc->sc_inqueue, bp);
810 else {
811 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
812 lbp, bp, bio_queue);
813 }
814 lbp = bp;
815 /*
816 * If only one request was canceled, leave now.
817 */
818 if (ggio->gctl_seq != 0)
819 break;
820 }
821 }
822 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
823 ggio->gctl_unit = sc->sc_unit;
824 mtx_unlock(&sc->sc_queue_mtx);
825 g_gate_release(sc);
826 return (error);
827 }
828 case G_GATE_CMD_START:
829 {
830 struct g_gate_ctl_io *ggio = (void *)addr;
831
832 G_GATE_CHECK_VERSION(ggio);
833 sc = g_gate_hold(ggio->gctl_unit, NULL);
834 if (sc == NULL)
835 return (ENXIO);
836 error = 0;
837 for (;;) {
838 mtx_lock(&sc->sc_queue_mtx);
839 bp = bioq_first(&sc->sc_inqueue);
840 if (bp != NULL)
841 break;
842 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
843 ggio->gctl_error = ECANCELED;
844 mtx_unlock(&sc->sc_queue_mtx);
845 goto start_end;
846 }
847 if (msleep(sc, &sc->sc_queue_mtx,
848 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
849 ggio->gctl_error = ECANCELED;
850 goto start_end;
851 }
852 }
853 ggio->gctl_cmd = bp->bio_cmd;
854 if (bp->bio_cmd == BIO_WRITE &&
855 bp->bio_length > ggio->gctl_length) {
856 mtx_unlock(&sc->sc_queue_mtx);
857 ggio->gctl_length = bp->bio_length;
858 ggio->gctl_error = ENOMEM;
859 goto start_end;
860 }
861 bioq_remove(&sc->sc_inqueue, bp);
862 bioq_insert_tail(&sc->sc_outqueue, bp);
863 mtx_unlock(&sc->sc_queue_mtx);
864
865 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
866 ggio->gctl_offset = bp->bio_offset;
867 ggio->gctl_length = bp->bio_length;
868
869 switch (bp->bio_cmd) {
870 case BIO_READ:
871 case BIO_DELETE:
872 case BIO_FLUSH:
873 break;
874 case BIO_WRITE:
875 error = copyout(bp->bio_data, ggio->gctl_data,
876 bp->bio_length);
877 if (error != 0) {
878 mtx_lock(&sc->sc_queue_mtx);
879 bioq_remove(&sc->sc_outqueue, bp);
880 bioq_insert_head(&sc->sc_inqueue, bp);
881 mtx_unlock(&sc->sc_queue_mtx);
882 goto start_end;
883 }
884 break;
885 }
886 start_end:
887 g_gate_release(sc);
888 return (error);
889 }
890 case G_GATE_CMD_DONE:
891 {
892 struct g_gate_ctl_io *ggio = (void *)addr;
893
894 G_GATE_CHECK_VERSION(ggio);
895 sc = g_gate_hold(ggio->gctl_unit, NULL);
896 if (sc == NULL)
897 return (ENOENT);
898 error = 0;
899 mtx_lock(&sc->sc_queue_mtx);
900 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
901 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
902 break;
903 }
904 if (bp != NULL) {
905 bioq_remove(&sc->sc_outqueue, bp);
906 sc->sc_queue_count--;
907 }
908 mtx_unlock(&sc->sc_queue_mtx);
909 if (bp == NULL) {
910 /*
911 * Request was probably canceled.
912 */
913 goto done_end;
914 }
915 if (ggio->gctl_error == EAGAIN) {
916 bp->bio_error = 0;
917 G_GATE_LOGREQ(1, bp, "Request desisted.");
918 mtx_lock(&sc->sc_queue_mtx);
919 sc->sc_queue_count++;
920 bioq_insert_head(&sc->sc_inqueue, bp);
921 wakeup(sc);
922 mtx_unlock(&sc->sc_queue_mtx);
923 } else {
924 bp->bio_error = ggio->gctl_error;
925 if (bp->bio_error == 0) {
926 bp->bio_completed = bp->bio_length;
927 switch (bp->bio_cmd) {
928 case BIO_READ:
929 error = copyin(ggio->gctl_data,
930 bp->bio_data, bp->bio_length);
931 if (error != 0)
932 bp->bio_error = error;
933 break;
934 case BIO_DELETE:
935 case BIO_WRITE:
936 case BIO_FLUSH:
937 break;
938 }
939 }
940 G_GATE_LOGREQ(2, bp, "Request done.");
941 g_io_deliver(bp, bp->bio_error);
942 }
943 done_end:
944 g_gate_release(sc);
945 return (error);
946 }
947 }
948 return (ENOIOCTL);
949 }
950
951 static void
952 g_gate_device(void)
953 {
954
955 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
956 G_GATE_CTL_NAME);
957 }
958
959 static int
960 g_gate_modevent(module_t mod, int type, void *data)
961 {
962 int error = 0;
963
964 switch (type) {
965 case MOD_LOAD:
966 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
967 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
968 M_GATE, M_WAITOK | M_ZERO);
969 g_gate_nunits = 0;
970 g_gate_device();
971 break;
972 case MOD_UNLOAD:
973 mtx_lock(&g_gate_units_lock);
974 if (g_gate_nunits > 0) {
975 mtx_unlock(&g_gate_units_lock);
976 error = EBUSY;
977 break;
978 }
979 mtx_unlock(&g_gate_units_lock);
980 mtx_destroy(&g_gate_units_lock);
981 if (status_dev != NULL)
982 destroy_dev(status_dev);
983 free(g_gate_units, M_GATE);
984 break;
985 default:
986 return (EOPNOTSUPP);
987 break;
988 }
989
990 return (error);
991 }
992 static moduledata_t g_gate_module = {
993 G_GATE_MOD_NAME,
994 g_gate_modevent,
995 NULL
996 };
997 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
998 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
999 MODULE_VERSION(geom_gate, 0);
Cache object: 3f53dd3a85db608bc428c58eec294e1d
|