FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <sys/signalvar.h>
51 #include <sys/time.h>
52 #include <machine/atomic.h>
53
54 #include <geom/geom.h>
55 #include <geom/gate/g_gate.h>
56
57 FEATURE(geom_gate, "GEOM Gate module");
58
59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
60
61 SYSCTL_DECL(_kern_geom);
62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
63 "GEOM_GATE configuration");
64 static int g_gate_debug = 0;
65 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
66 "Debug level");
67 static u_int g_gate_maxunits = 256;
68 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
69 &g_gate_maxunits, 0, "Maximum number of ggate devices");
70
71 struct g_class g_gate_class = {
72 .name = G_GATE_CLASS_NAME,
73 .version = G_VERSION,
74 };
75
76 static struct cdev *status_dev;
77 static d_ioctl_t g_gate_ioctl;
78 static struct cdevsw g_gate_cdevsw = {
79 .d_version = D_VERSION,
80 .d_ioctl = g_gate_ioctl,
81 .d_name = G_GATE_CTL_NAME
82 };
83
84
85 static struct g_gate_softc **g_gate_units;
86 static u_int g_gate_nunits;
87 static struct mtx g_gate_units_lock;
88
89 static void
90 g_gate_detach(void *arg, int flags __unused)
91 {
92 struct g_consumer *cp = arg;
93
94 g_topology_assert();
95 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
96 cp->provider->name);
97 (void)g_access(cp, -1, 0, 0);
98 g_detach(cp);
99 g_destroy_consumer(cp);
100 }
101
102 static int
103 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
104 {
105 struct bio_queue_head queue;
106 struct g_provider *pp;
107 struct g_consumer *cp;
108 struct g_geom *gp;
109 struct bio *bp;
110
111 g_topology_assert();
112 mtx_assert(&g_gate_units_lock, MA_OWNED);
113 pp = sc->sc_provider;
114 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
115 mtx_unlock(&g_gate_units_lock);
116 return (EBUSY);
117 }
118 mtx_unlock(&g_gate_units_lock);
119 mtx_lock(&sc->sc_queue_mtx);
120 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
121 sc->sc_flags |= G_GATE_FLAG_DESTROY;
122 wakeup(sc);
123 mtx_unlock(&sc->sc_queue_mtx);
124 gp = pp->geom;
125 g_wither_provider(pp, ENXIO);
126 callout_drain(&sc->sc_callout);
127 bioq_init(&queue);
128 mtx_lock(&sc->sc_queue_mtx);
129 while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
130 sc->sc_queue_count--;
131 bioq_insert_tail(&queue, bp);
132 }
133 while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
134 sc->sc_queue_count--;
135 bioq_insert_tail(&queue, bp);
136 }
137 mtx_unlock(&sc->sc_queue_mtx);
138 g_topology_unlock();
139 while ((bp = bioq_takefirst(&queue)) != NULL) {
140 G_GATE_LOGREQ(1, bp, "Request canceled.");
141 g_io_deliver(bp, ENXIO);
142 }
143 mtx_lock(&g_gate_units_lock);
144 /* One reference is ours. */
145 sc->sc_ref--;
146 while (sc->sc_ref > 0)
147 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
148 g_gate_units[sc->sc_unit] = NULL;
149 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
150 g_gate_nunits--;
151 mtx_unlock(&g_gate_units_lock);
152 mtx_destroy(&sc->sc_queue_mtx);
153 mtx_destroy(&sc->sc_read_mtx);
154 g_topology_lock();
155 if ((cp = sc->sc_readcons) != NULL) {
156 sc->sc_readcons = NULL;
157 (void)g_access(cp, -1, 0, 0);
158 g_detach(cp);
159 g_destroy_consumer(cp);
160 }
161 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
162 gp->softc = NULL;
163 g_wither_geom(gp, ENXIO);
164 sc->sc_provider = NULL;
165 free(sc, M_GATE);
166 return (0);
167 }
168
169 static int
170 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
171 {
172 struct g_gate_softc *sc;
173
174 if (dr <= 0 && dw <= 0 && de <= 0)
175 return (0);
176 sc = pp->geom->softc;
177 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
178 return (ENXIO);
179 /* XXX: Hack to allow read-only mounts. */
180 #if 0
181 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
182 return (EPERM);
183 #endif
184 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
185 return (EPERM);
186 return (0);
187 }
188
189 static void
190 g_gate_queue_io(struct bio *bp)
191 {
192 struct g_gate_softc *sc;
193
194 sc = bp->bio_to->geom->softc;
195 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
196 g_io_deliver(bp, ENXIO);
197 return;
198 }
199
200 mtx_lock(&sc->sc_queue_mtx);
201
202 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
203 mtx_unlock(&sc->sc_queue_mtx);
204 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
205 g_io_deliver(bp, ENOMEM);
206 return;
207 }
208
209 bp->bio_driver1 = (void *)sc->sc_seq;
210 sc->sc_seq++;
211 sc->sc_queue_count++;
212
213 bioq_insert_tail(&sc->sc_inqueue, bp);
214 wakeup(sc);
215
216 mtx_unlock(&sc->sc_queue_mtx);
217 }
218
219 static void
220 g_gate_done(struct bio *cbp)
221 {
222 struct g_gate_softc *sc;
223 struct bio *pbp;
224 struct g_consumer *cp;
225
226 cp = cbp->bio_from;
227 pbp = cbp->bio_parent;
228 if (cbp->bio_error == 0) {
229 pbp->bio_completed = cbp->bio_completed;
230 g_destroy_bio(cbp);
231 pbp->bio_inbed++;
232 g_io_deliver(pbp, 0);
233 } else {
234 /* If direct read failed, pass it through userland daemon. */
235 g_destroy_bio(cbp);
236 pbp->bio_children--;
237 g_gate_queue_io(pbp);
238 }
239
240 sc = cp->geom->softc;
241 mtx_lock(&sc->sc_read_mtx);
242 if (--cp->index == 0 && sc->sc_readcons != cp)
243 g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
244 mtx_unlock(&sc->sc_read_mtx);
245 }
246
247 static void
248 g_gate_start(struct bio *pbp)
249 {
250 struct g_gate_softc *sc;
251 struct g_consumer *cp;
252 struct bio *cbp;
253
254 sc = pbp->bio_to->geom->softc;
255 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
256 g_io_deliver(pbp, ENXIO);
257 return;
258 }
259 G_GATE_LOGREQ(2, pbp, "Request received.");
260 switch (pbp->bio_cmd) {
261 case BIO_READ:
262 if (sc->sc_readcons == NULL)
263 break;
264 cbp = g_clone_bio(pbp);
265 if (cbp == NULL) {
266 g_io_deliver(pbp, ENOMEM);
267 return;
268 }
269 mtx_lock(&sc->sc_read_mtx);
270 if ((cp = sc->sc_readcons) == NULL) {
271 mtx_unlock(&sc->sc_read_mtx);
272 g_destroy_bio(cbp);
273 pbp->bio_children--;
274 break;
275 }
276 cp->index++;
277 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
278 mtx_unlock(&sc->sc_read_mtx);
279 cbp->bio_done = g_gate_done;
280 g_io_request(cbp, cp);
281 return;
282 case BIO_DELETE:
283 case BIO_WRITE:
284 case BIO_FLUSH:
285 /* XXX: Hack to allow read-only mounts. */
286 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
287 g_io_deliver(pbp, EPERM);
288 return;
289 }
290 break;
291 case BIO_GETATTR:
292 default:
293 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
294 g_io_deliver(pbp, EOPNOTSUPP);
295 return;
296 }
297
298 g_gate_queue_io(pbp);
299 }
300
301 static struct g_gate_softc *
302 g_gate_hold(int unit, const char *name)
303 {
304 struct g_gate_softc *sc = NULL;
305
306 mtx_lock(&g_gate_units_lock);
307 if (unit >= 0 && unit < g_gate_maxunits)
308 sc = g_gate_units[unit];
309 else if (unit == G_GATE_NAME_GIVEN) {
310 KASSERT(name != NULL, ("name is NULL"));
311 for (unit = 0; unit < g_gate_maxunits; unit++) {
312 if (g_gate_units[unit] == NULL)
313 continue;
314 if (strcmp(name,
315 g_gate_units[unit]->sc_provider->name) != 0) {
316 continue;
317 }
318 sc = g_gate_units[unit];
319 break;
320 }
321 }
322 if (sc != NULL)
323 sc->sc_ref++;
324 mtx_unlock(&g_gate_units_lock);
325 return (sc);
326 }
327
328 static void
329 g_gate_release(struct g_gate_softc *sc)
330 {
331
332 g_topology_assert_not();
333 mtx_lock(&g_gate_units_lock);
334 sc->sc_ref--;
335 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
336 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
337 wakeup(&sc->sc_ref);
338 mtx_unlock(&g_gate_units_lock);
339 }
340
341 static int
342 g_gate_getunit(int unit, int *errorp)
343 {
344
345 mtx_assert(&g_gate_units_lock, MA_OWNED);
346 if (unit >= 0) {
347 if (unit >= g_gate_maxunits)
348 *errorp = EINVAL;
349 else if (g_gate_units[unit] == NULL)
350 return (unit);
351 else
352 *errorp = EEXIST;
353 } else {
354 for (unit = 0; unit < g_gate_maxunits; unit++) {
355 if (g_gate_units[unit] == NULL)
356 return (unit);
357 }
358 *errorp = ENFILE;
359 }
360 return (-1);
361 }
362
363 static void
364 g_gate_guard(void *arg)
365 {
366 struct bio_queue_head queue;
367 struct g_gate_softc *sc;
368 struct bintime curtime;
369 struct bio *bp, *bp2;
370
371 sc = arg;
372 binuptime(&curtime);
373 g_gate_hold(sc->sc_unit, NULL);
374 bioq_init(&queue);
375 mtx_lock(&sc->sc_queue_mtx);
376 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
377 if (curtime.sec - bp->bio_t0.sec < 5)
378 continue;
379 bioq_remove(&sc->sc_inqueue, bp);
380 sc->sc_queue_count--;
381 bioq_insert_tail(&queue, bp);
382 }
383 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
384 if (curtime.sec - bp->bio_t0.sec < 5)
385 continue;
386 bioq_remove(&sc->sc_outqueue, bp);
387 sc->sc_queue_count--;
388 bioq_insert_tail(&queue, bp);
389 }
390 mtx_unlock(&sc->sc_queue_mtx);
391 while ((bp = bioq_takefirst(&queue)) != NULL) {
392 G_GATE_LOGREQ(1, bp, "Request timeout.");
393 g_io_deliver(bp, EIO);
394 }
395 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
396 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
397 g_gate_guard, sc);
398 }
399 g_gate_release(sc);
400 }
401
402 static void
403 g_gate_orphan(struct g_consumer *cp)
404 {
405 struct g_gate_softc *sc;
406 struct g_geom *gp;
407 int done;
408
409 g_topology_assert();
410 gp = cp->geom;
411 sc = gp->softc;
412 mtx_lock(&sc->sc_read_mtx);
413 if (sc->sc_readcons == cp)
414 sc->sc_readcons = NULL;
415 done = (cp->index == 0);
416 mtx_unlock(&sc->sc_read_mtx);
417 if (done)
418 g_gate_detach(cp, 0);
419 }
420
421 static void
422 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
423 struct g_consumer *cp, struct g_provider *pp)
424 {
425 struct g_gate_softc *sc;
426
427 sc = gp->softc;
428 if (sc == NULL || pp != NULL || cp != NULL)
429 return;
430 sc = g_gate_hold(sc->sc_unit, NULL);
431 if (sc == NULL)
432 return;
433 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
434 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
435 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
436 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
437 "write-only");
438 } else {
439 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
440 "read-write");
441 }
442 if (sc->sc_readcons != NULL) {
443 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
444 indent, (intmax_t)sc->sc_readoffset);
445 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
446 indent, sc->sc_readcons->provider->name);
447 }
448 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
449 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
450 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
451 sc->sc_queue_count);
452 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
453 sc->sc_queue_size);
454 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
455 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
456 g_topology_unlock();
457 g_gate_release(sc);
458 g_topology_lock();
459 }
460
461 static int
462 g_gate_create(struct g_gate_ctl_create *ggio)
463 {
464 struct g_gate_softc *sc;
465 struct g_geom *gp;
466 struct g_provider *pp, *ropp;
467 struct g_consumer *cp;
468 char name[NAME_MAX];
469 int error = 0, unit;
470
471 if (ggio->gctl_mediasize <= 0) {
472 G_GATE_DEBUG(1, "Invalid media size.");
473 return (EINVAL);
474 }
475 if (ggio->gctl_sectorsize <= 0) {
476 G_GATE_DEBUG(1, "Invalid sector size.");
477 return (EINVAL);
478 }
479 if (!powerof2(ggio->gctl_sectorsize)) {
480 G_GATE_DEBUG(1, "Invalid sector size.");
481 return (EINVAL);
482 }
483 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
484 G_GATE_DEBUG(1, "Invalid media size.");
485 return (EINVAL);
486 }
487 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
488 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
489 G_GATE_DEBUG(1, "Invalid flags.");
490 return (EINVAL);
491 }
492 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
493 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
494 ggio->gctl_unit < 0) {
495 G_GATE_DEBUG(1, "Invalid unit number.");
496 return (EINVAL);
497 }
498 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
499 ggio->gctl_name[0] == '\0') {
500 G_GATE_DEBUG(1, "No device name.");
501 return (EINVAL);
502 }
503
504 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
505 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
506 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
507 sc->sc_seq = 1;
508 bioq_init(&sc->sc_inqueue);
509 bioq_init(&sc->sc_outqueue);
510 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
511 mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
512 sc->sc_queue_count = 0;
513 sc->sc_queue_size = ggio->gctl_maxcount;
514 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
515 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
516 sc->sc_timeout = ggio->gctl_timeout;
517 callout_init(&sc->sc_callout, 1);
518
519 mtx_lock(&g_gate_units_lock);
520 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
521 if (sc->sc_unit < 0)
522 goto fail1;
523 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
524 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
525 else {
526 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
527 sc->sc_unit);
528 }
529 /* Check for name collision. */
530 for (unit = 0; unit < g_gate_maxunits; unit++) {
531 if (g_gate_units[unit] == NULL)
532 continue;
533 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
534 continue;
535 error = EEXIST;
536 goto fail1;
537 }
538 sc->sc_name = name;
539 g_gate_units[sc->sc_unit] = sc;
540 g_gate_nunits++;
541 mtx_unlock(&g_gate_units_lock);
542
543 g_topology_lock();
544
545 if (ggio->gctl_readprov[0] == '\0') {
546 ropp = NULL;
547 } else {
548 ropp = g_provider_by_name(ggio->gctl_readprov);
549 if (ropp == NULL) {
550 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
551 ggio->gctl_readprov);
552 error = EINVAL;
553 goto fail2;
554 }
555 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
556 G_GATE_DEBUG(1, "Invalid read offset.");
557 error = EINVAL;
558 goto fail2;
559 }
560 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
561 ropp->mediasize) {
562 G_GATE_DEBUG(1, "Invalid read offset or media size.");
563 error = EINVAL;
564 goto fail2;
565 }
566 }
567
568 gp = g_new_geomf(&g_gate_class, "%s", name);
569 gp->start = g_gate_start;
570 gp->access = g_gate_access;
571 gp->orphan = g_gate_orphan;
572 gp->dumpconf = g_gate_dumpconf;
573 gp->softc = sc;
574
575 if (ropp != NULL) {
576 cp = g_new_consumer(gp);
577 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
578 error = g_attach(cp, ropp);
579 if (error != 0) {
580 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
581 goto fail3;
582 }
583 error = g_access(cp, 1, 0, 0);
584 if (error != 0) {
585 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
586 g_detach(cp);
587 goto fail3;
588 }
589 sc->sc_readcons = cp;
590 sc->sc_readoffset = ggio->gctl_readoffset;
591 }
592
593 ggio->gctl_unit = sc->sc_unit;
594
595 pp = g_new_providerf(gp, "%s", name);
596 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
597 pp->mediasize = ggio->gctl_mediasize;
598 pp->sectorsize = ggio->gctl_sectorsize;
599 sc->sc_provider = pp;
600 g_error_provider(pp, 0);
601
602 g_topology_unlock();
603 mtx_lock(&g_gate_units_lock);
604 sc->sc_name = sc->sc_provider->name;
605 mtx_unlock(&g_gate_units_lock);
606 G_GATE_DEBUG(1, "Device %s created.", gp->name);
607
608 if (sc->sc_timeout > 0) {
609 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
610 g_gate_guard, sc);
611 }
612 return (0);
613 fail3:
614 g_destroy_consumer(cp);
615 g_destroy_geom(gp);
616 fail2:
617 g_topology_unlock();
618 mtx_lock(&g_gate_units_lock);
619 g_gate_units[sc->sc_unit] = NULL;
620 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
621 g_gate_nunits--;
622 fail1:
623 mtx_unlock(&g_gate_units_lock);
624 mtx_destroy(&sc->sc_queue_mtx);
625 mtx_destroy(&sc->sc_read_mtx);
626 free(sc, M_GATE);
627 return (error);
628 }
629
630 static int
631 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
632 {
633 struct g_provider *pp;
634 struct g_consumer *cp;
635 int done, error;
636
637 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
638 if (ggio->gctl_mediasize <= 0) {
639 G_GATE_DEBUG(1, "Invalid media size.");
640 return (EINVAL);
641 }
642 pp = sc->sc_provider;
643 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
644 G_GATE_DEBUG(1, "Invalid media size.");
645 return (EINVAL);
646 }
647 /* TODO */
648 return (EOPNOTSUPP);
649 }
650
651 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
652 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
653
654 cp = NULL;
655
656 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
657 g_topology_lock();
658 mtx_lock(&sc->sc_read_mtx);
659 if ((cp = sc->sc_readcons) != NULL) {
660 sc->sc_readcons = NULL;
661 done = (cp->index == 0);
662 mtx_unlock(&sc->sc_read_mtx);
663 if (done)
664 g_gate_detach(cp, 0);
665 } else
666 mtx_unlock(&sc->sc_read_mtx);
667 if (ggio->gctl_readprov[0] != '\0') {
668 pp = g_provider_by_name(ggio->gctl_readprov);
669 if (pp == NULL) {
670 g_topology_unlock();
671 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
672 ggio->gctl_readprov);
673 return (EINVAL);
674 }
675 cp = g_new_consumer(sc->sc_provider->geom);
676 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
677 error = g_attach(cp, pp);
678 if (error != 0) {
679 G_GATE_DEBUG(1, "Unable to attach to %s.",
680 pp->name);
681 } else {
682 error = g_access(cp, 1, 0, 0);
683 if (error != 0) {
684 G_GATE_DEBUG(1, "Unable to access %s.",
685 pp->name);
686 g_detach(cp);
687 }
688 }
689 if (error != 0) {
690 g_destroy_consumer(cp);
691 g_topology_unlock();
692 return (error);
693 }
694 }
695 } else {
696 cp = sc->sc_readcons;
697 }
698
699 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
700 if (cp == NULL) {
701 G_GATE_DEBUG(1, "No read provider.");
702 return (EINVAL);
703 }
704 pp = sc->sc_provider;
705 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
706 G_GATE_DEBUG(1, "Invalid read offset.");
707 return (EINVAL);
708 }
709 if (pp->mediasize + ggio->gctl_readoffset >
710 cp->provider->mediasize) {
711 G_GATE_DEBUG(1, "Invalid read offset or media size.");
712 return (EINVAL);
713 }
714 sc->sc_readoffset = ggio->gctl_readoffset;
715 }
716
717 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
718 sc->sc_readcons = cp;
719 g_topology_unlock();
720 }
721
722 return (0);
723 }
724
725 #define G_GATE_CHECK_VERSION(ggio) do { \
726 if ((ggio)->gctl_version != G_GATE_VERSION) { \
727 printf("Version mismatch %d != %d.\n", \
728 ggio->gctl_version, G_GATE_VERSION); \
729 return (EINVAL); \
730 } \
731 } while (0)
732 static int
733 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
734 {
735 struct g_gate_softc *sc;
736 struct bio *bp;
737 int error = 0;
738
739 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
740 flags, td);
741
742 switch (cmd) {
743 case G_GATE_CMD_CREATE:
744 {
745 struct g_gate_ctl_create *ggio = (void *)addr;
746
747 G_GATE_CHECK_VERSION(ggio);
748 error = g_gate_create(ggio);
749 /*
750 * Reset TDP_GEOM flag.
751 * There are pending events for sure, because we just created
752 * new provider and other classes want to taste it, but we
753 * cannot answer on I/O requests until we're here.
754 */
755 td->td_pflags &= ~TDP_GEOM;
756 return (error);
757 }
758 case G_GATE_CMD_MODIFY:
759 {
760 struct g_gate_ctl_modify *ggio = (void *)addr;
761
762 G_GATE_CHECK_VERSION(ggio);
763 sc = g_gate_hold(ggio->gctl_unit, NULL);
764 if (sc == NULL)
765 return (ENXIO);
766 error = g_gate_modify(sc, ggio);
767 g_gate_release(sc);
768 return (error);
769 }
770 case G_GATE_CMD_DESTROY:
771 {
772 struct g_gate_ctl_destroy *ggio = (void *)addr;
773
774 G_GATE_CHECK_VERSION(ggio);
775 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
776 if (sc == NULL)
777 return (ENXIO);
778 g_topology_lock();
779 mtx_lock(&g_gate_units_lock);
780 error = g_gate_destroy(sc, ggio->gctl_force);
781 g_topology_unlock();
782 if (error != 0)
783 g_gate_release(sc);
784 return (error);
785 }
786 case G_GATE_CMD_CANCEL:
787 {
788 struct g_gate_ctl_cancel *ggio = (void *)addr;
789 struct bio *tbp, *lbp;
790
791 G_GATE_CHECK_VERSION(ggio);
792 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
793 if (sc == NULL)
794 return (ENXIO);
795 lbp = NULL;
796 mtx_lock(&sc->sc_queue_mtx);
797 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
798 if (ggio->gctl_seq == 0 ||
799 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
800 G_GATE_LOGREQ(1, bp, "Request canceled.");
801 bioq_remove(&sc->sc_outqueue, bp);
802 /*
803 * Be sure to put requests back onto incoming
804 * queue in the proper order.
805 */
806 if (lbp == NULL)
807 bioq_insert_head(&sc->sc_inqueue, bp);
808 else {
809 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
810 lbp, bp, bio_queue);
811 }
812 lbp = bp;
813 /*
814 * If only one request was canceled, leave now.
815 */
816 if (ggio->gctl_seq != 0)
817 break;
818 }
819 }
820 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
821 ggio->gctl_unit = sc->sc_unit;
822 mtx_unlock(&sc->sc_queue_mtx);
823 g_gate_release(sc);
824 return (error);
825 }
826 case G_GATE_CMD_START:
827 {
828 struct g_gate_ctl_io *ggio = (void *)addr;
829
830 G_GATE_CHECK_VERSION(ggio);
831 sc = g_gate_hold(ggio->gctl_unit, NULL);
832 if (sc == NULL)
833 return (ENXIO);
834 error = 0;
835 for (;;) {
836 mtx_lock(&sc->sc_queue_mtx);
837 bp = bioq_first(&sc->sc_inqueue);
838 if (bp != NULL)
839 break;
840 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
841 ggio->gctl_error = ECANCELED;
842 mtx_unlock(&sc->sc_queue_mtx);
843 goto start_end;
844 }
845 if (msleep(sc, &sc->sc_queue_mtx,
846 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
847 ggio->gctl_error = ECANCELED;
848 goto start_end;
849 }
850 }
851 ggio->gctl_cmd = bp->bio_cmd;
852 if (bp->bio_cmd == BIO_WRITE &&
853 bp->bio_length > ggio->gctl_length) {
854 mtx_unlock(&sc->sc_queue_mtx);
855 ggio->gctl_length = bp->bio_length;
856 ggio->gctl_error = ENOMEM;
857 goto start_end;
858 }
859 bioq_remove(&sc->sc_inqueue, bp);
860 bioq_insert_tail(&sc->sc_outqueue, bp);
861 mtx_unlock(&sc->sc_queue_mtx);
862
863 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
864 ggio->gctl_offset = bp->bio_offset;
865 ggio->gctl_length = bp->bio_length;
866
867 switch (bp->bio_cmd) {
868 case BIO_READ:
869 case BIO_DELETE:
870 case BIO_FLUSH:
871 break;
872 case BIO_WRITE:
873 error = copyout(bp->bio_data, ggio->gctl_data,
874 bp->bio_length);
875 if (error != 0) {
876 mtx_lock(&sc->sc_queue_mtx);
877 bioq_remove(&sc->sc_outqueue, bp);
878 bioq_insert_head(&sc->sc_inqueue, bp);
879 mtx_unlock(&sc->sc_queue_mtx);
880 goto start_end;
881 }
882 break;
883 }
884 start_end:
885 g_gate_release(sc);
886 return (error);
887 }
888 case G_GATE_CMD_DONE:
889 {
890 struct g_gate_ctl_io *ggio = (void *)addr;
891
892 G_GATE_CHECK_VERSION(ggio);
893 sc = g_gate_hold(ggio->gctl_unit, NULL);
894 if (sc == NULL)
895 return (ENOENT);
896 error = 0;
897 mtx_lock(&sc->sc_queue_mtx);
898 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
899 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
900 break;
901 }
902 if (bp != NULL) {
903 bioq_remove(&sc->sc_outqueue, bp);
904 sc->sc_queue_count--;
905 }
906 mtx_unlock(&sc->sc_queue_mtx);
907 if (bp == NULL) {
908 /*
909 * Request was probably canceled.
910 */
911 goto done_end;
912 }
913 if (ggio->gctl_error == EAGAIN) {
914 bp->bio_error = 0;
915 G_GATE_LOGREQ(1, bp, "Request desisted.");
916 mtx_lock(&sc->sc_queue_mtx);
917 sc->sc_queue_count++;
918 bioq_insert_head(&sc->sc_inqueue, bp);
919 wakeup(sc);
920 mtx_unlock(&sc->sc_queue_mtx);
921 } else {
922 bp->bio_error = ggio->gctl_error;
923 if (bp->bio_error == 0) {
924 bp->bio_completed = bp->bio_length;
925 switch (bp->bio_cmd) {
926 case BIO_READ:
927 error = copyin(ggio->gctl_data,
928 bp->bio_data, bp->bio_length);
929 if (error != 0)
930 bp->bio_error = error;
931 break;
932 case BIO_DELETE:
933 case BIO_WRITE:
934 case BIO_FLUSH:
935 break;
936 }
937 }
938 G_GATE_LOGREQ(2, bp, "Request done.");
939 g_io_deliver(bp, bp->bio_error);
940 }
941 done_end:
942 g_gate_release(sc);
943 return (error);
944 }
945 }
946 return (ENOIOCTL);
947 }
948
949 static void
950 g_gate_device(void)
951 {
952
953 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
954 G_GATE_CTL_NAME);
955 }
956
957 static int
958 g_gate_modevent(module_t mod, int type, void *data)
959 {
960 int error = 0;
961
962 switch (type) {
963 case MOD_LOAD:
964 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
965 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
966 M_GATE, M_WAITOK | M_ZERO);
967 g_gate_nunits = 0;
968 g_gate_device();
969 break;
970 case MOD_UNLOAD:
971 mtx_lock(&g_gate_units_lock);
972 if (g_gate_nunits > 0) {
973 mtx_unlock(&g_gate_units_lock);
974 error = EBUSY;
975 break;
976 }
977 mtx_unlock(&g_gate_units_lock);
978 mtx_destroy(&g_gate_units_lock);
979 if (status_dev != NULL)
980 destroy_dev(status_dev);
981 free(g_gate_units, M_GATE);
982 break;
983 default:
984 return (EOPNOTSUPP);
985 break;
986 }
987
988 return (error);
989 }
990 static moduledata_t g_gate_module = {
991 G_GATE_MOD_NAME,
992 g_gate_modevent,
993 NULL
994 };
995 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
996 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
997 MODULE_VERSION(geom_gate, 0);
Cache object: 5e2cb87f63bad516d2f93786a6494acf
|