FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/signalvar.h>
50 #include <sys/time.h>
51 #include <machine/atomic.h>
52
53 #include <geom/geom.h>
54 #include <geom/gate/g_gate.h>
55
56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
57
58 SYSCTL_DECL(_kern_geom);
59 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
60 "GEOM_GATE configuration");
61 static int g_gate_debug = 0;
62 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
63 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
64 "Debug level");
65 static u_int g_gate_maxunits = 256;
66 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
67 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
68 &g_gate_maxunits, 0, "Maximum number of ggate devices");
69
70 struct g_class g_gate_class = {
71 .name = G_GATE_CLASS_NAME,
72 .version = G_VERSION,
73 };
74
75 static struct cdev *status_dev;
76 static d_ioctl_t g_gate_ioctl;
77 static struct cdevsw g_gate_cdevsw = {
78 .d_version = D_VERSION,
79 .d_ioctl = g_gate_ioctl,
80 .d_name = G_GATE_CTL_NAME
81 };
82
83
84 static struct g_gate_softc **g_gate_units;
85 static u_int g_gate_nunits;
86 static struct mtx g_gate_units_lock;
87
88 static int
89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
90 {
91 struct g_provider *pp;
92 struct g_consumer *cp;
93 struct g_geom *gp;
94 struct bio *bp;
95
96 g_topology_assert();
97 mtx_assert(&g_gate_units_lock, MA_OWNED);
98 pp = sc->sc_provider;
99 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
100 mtx_unlock(&g_gate_units_lock);
101 return (EBUSY);
102 }
103 mtx_unlock(&g_gate_units_lock);
104 mtx_lock(&sc->sc_queue_mtx);
105 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
106 sc->sc_flags |= G_GATE_FLAG_DESTROY;
107 wakeup(sc);
108 mtx_unlock(&sc->sc_queue_mtx);
109 gp = pp->geom;
110 pp->flags |= G_PF_WITHER;
111 g_orphan_provider(pp, ENXIO);
112 callout_drain(&sc->sc_callout);
113 mtx_lock(&sc->sc_queue_mtx);
114 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
115 bioq_remove(&sc->sc_inqueue, bp);
116 sc->sc_queue_count--;
117 G_GATE_LOGREQ(1, bp, "Request canceled.");
118 g_io_deliver(bp, ENXIO);
119 }
120 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
121 bioq_remove(&sc->sc_outqueue, bp);
122 sc->sc_queue_count--;
123 G_GATE_LOGREQ(1, bp, "Request canceled.");
124 g_io_deliver(bp, ENXIO);
125 }
126 mtx_unlock(&sc->sc_queue_mtx);
127 g_topology_unlock();
128 mtx_lock(&g_gate_units_lock);
129 /* One reference is ours. */
130 sc->sc_ref--;
131 while (sc->sc_ref > 0)
132 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
133 g_gate_units[sc->sc_unit] = NULL;
134 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
135 g_gate_nunits--;
136 mtx_unlock(&g_gate_units_lock);
137 mtx_destroy(&sc->sc_queue_mtx);
138 g_topology_lock();
139 if ((cp = sc->sc_readcons) != NULL) {
140 sc->sc_readcons = NULL;
141 (void)g_access(cp, -1, 0, 0);
142 g_detach(cp);
143 g_destroy_consumer(cp);
144 }
145 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
146 gp->softc = NULL;
147 g_wither_geom(gp, ENXIO);
148 sc->sc_provider = NULL;
149 free(sc, M_GATE);
150 return (0);
151 }
152
153 static int
154 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
155 {
156 struct g_gate_softc *sc;
157
158 if (dr <= 0 && dw <= 0 && de <= 0)
159 return (0);
160 sc = pp->geom->softc;
161 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
162 return (ENXIO);
163 /* XXX: Hack to allow read-only mounts. */
164 #if 0
165 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
166 return (EPERM);
167 #endif
168 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
169 return (EPERM);
170 return (0);
171 }
172
173 static void
174 g_gate_queue_io(struct bio *bp)
175 {
176 struct g_gate_softc *sc;
177
178 sc = bp->bio_to->geom->softc;
179 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
180 g_io_deliver(bp, ENXIO);
181 return;
182 }
183
184 mtx_lock(&sc->sc_queue_mtx);
185
186 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
187 mtx_unlock(&sc->sc_queue_mtx);
188 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
189 g_io_deliver(bp, ENOMEM);
190 return;
191 }
192
193 bp->bio_driver1 = (void *)sc->sc_seq;
194 sc->sc_seq++;
195 sc->sc_queue_count++;
196
197 bioq_insert_tail(&sc->sc_inqueue, bp);
198 wakeup(sc);
199
200 mtx_unlock(&sc->sc_queue_mtx);
201 }
202
203 static void
204 g_gate_done(struct bio *cbp)
205 {
206 struct bio *pbp;
207
208 pbp = cbp->bio_parent;
209 if (cbp->bio_error == 0) {
210 pbp->bio_completed = cbp->bio_completed;
211 g_destroy_bio(cbp);
212 pbp->bio_inbed++;
213 g_io_deliver(pbp, 0);
214 } else {
215 /* If direct read failed, pass it through userland daemon. */
216 g_destroy_bio(cbp);
217 pbp->bio_children--;
218 g_gate_queue_io(pbp);
219 }
220 }
221
222 static void
223 g_gate_start(struct bio *pbp)
224 {
225 struct g_gate_softc *sc;
226
227 sc = pbp->bio_to->geom->softc;
228 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
229 g_io_deliver(pbp, ENXIO);
230 return;
231 }
232 G_GATE_LOGREQ(2, pbp, "Request received.");
233 switch (pbp->bio_cmd) {
234 case BIO_READ:
235 if (sc->sc_readcons != NULL) {
236 struct bio *cbp;
237
238 cbp = g_clone_bio(pbp);
239 if (cbp == NULL) {
240 g_io_deliver(pbp, ENOMEM);
241 return;
242 }
243 cbp->bio_done = g_gate_done;
244 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
245 cbp->bio_to = sc->sc_readcons->provider;
246 g_io_request(cbp, sc->sc_readcons);
247 return;
248 }
249 break;
250 case BIO_DELETE:
251 case BIO_WRITE:
252 case BIO_FLUSH:
253 /* XXX: Hack to allow read-only mounts. */
254 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
255 g_io_deliver(pbp, EPERM);
256 return;
257 }
258 break;
259 case BIO_GETATTR:
260 default:
261 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
262 g_io_deliver(pbp, EOPNOTSUPP);
263 return;
264 }
265
266 g_gate_queue_io(pbp);
267 }
268
269 static struct g_gate_softc *
270 g_gate_hold(int unit, const char *name)
271 {
272 struct g_gate_softc *sc = NULL;
273
274 mtx_lock(&g_gate_units_lock);
275 if (unit >= 0 && unit < g_gate_maxunits)
276 sc = g_gate_units[unit];
277 else if (unit == G_GATE_NAME_GIVEN) {
278 KASSERT(name != NULL, ("name is NULL"));
279 for (unit = 0; unit < g_gate_maxunits; unit++) {
280 if (g_gate_units[unit] == NULL)
281 continue;
282 if (strcmp(name,
283 g_gate_units[unit]->sc_provider->name) != 0) {
284 continue;
285 }
286 sc = g_gate_units[unit];
287 break;
288 }
289 }
290 if (sc != NULL)
291 sc->sc_ref++;
292 mtx_unlock(&g_gate_units_lock);
293 return (sc);
294 }
295
296 static void
297 g_gate_release(struct g_gate_softc *sc)
298 {
299
300 g_topology_assert_not();
301 mtx_lock(&g_gate_units_lock);
302 sc->sc_ref--;
303 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
304 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
305 wakeup(&sc->sc_ref);
306 mtx_unlock(&g_gate_units_lock);
307 }
308
309 static int
310 g_gate_getunit(int unit, int *errorp)
311 {
312
313 mtx_assert(&g_gate_units_lock, MA_OWNED);
314 if (unit >= 0) {
315 if (unit >= g_gate_maxunits)
316 *errorp = EINVAL;
317 else if (g_gate_units[unit] == NULL)
318 return (unit);
319 else
320 *errorp = EEXIST;
321 } else {
322 for (unit = 0; unit < g_gate_maxunits; unit++) {
323 if (g_gate_units[unit] == NULL)
324 return (unit);
325 }
326 *errorp = ENFILE;
327 }
328 return (-1);
329 }
330
331 static void
332 g_gate_guard(void *arg)
333 {
334 struct g_gate_softc *sc;
335 struct bintime curtime;
336 struct bio *bp, *bp2;
337
338 sc = arg;
339 binuptime(&curtime);
340 g_gate_hold(sc->sc_unit, NULL);
341 mtx_lock(&sc->sc_queue_mtx);
342 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
343 if (curtime.sec - bp->bio_t0.sec < 5)
344 continue;
345 bioq_remove(&sc->sc_inqueue, bp);
346 sc->sc_queue_count--;
347 G_GATE_LOGREQ(1, bp, "Request timeout.");
348 g_io_deliver(bp, EIO);
349 }
350 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
351 if (curtime.sec - bp->bio_t0.sec < 5)
352 continue;
353 bioq_remove(&sc->sc_outqueue, bp);
354 sc->sc_queue_count--;
355 G_GATE_LOGREQ(1, bp, "Request timeout.");
356 g_io_deliver(bp, EIO);
357 }
358 mtx_unlock(&sc->sc_queue_mtx);
359 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
360 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
361 g_gate_guard, sc);
362 }
363 g_gate_release(sc);
364 }
365
366 static void
367 g_gate_orphan(struct g_consumer *cp)
368 {
369 struct g_gate_softc *sc;
370 struct g_geom *gp;
371
372 g_topology_assert();
373 gp = cp->geom;
374 sc = gp->softc;
375 if (sc == NULL)
376 return;
377 KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
378 sc->sc_readcons));
379 sc->sc_readcons = NULL;
380 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
381 cp->provider->name);
382 (void)g_access(cp, -1, 0, 0);
383 g_detach(cp);
384 g_destroy_consumer(cp);
385 }
386
387 static void
388 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
389 struct g_consumer *cp, struct g_provider *pp)
390 {
391 struct g_gate_softc *sc;
392
393 sc = gp->softc;
394 if (sc == NULL || pp != NULL || cp != NULL)
395 return;
396 sc = g_gate_hold(sc->sc_unit, NULL);
397 if (sc == NULL)
398 return;
399 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
400 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
401 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
402 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
403 "write-only");
404 } else {
405 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
406 "read-write");
407 }
408 if (sc->sc_readcons != NULL) {
409 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
410 indent, (intmax_t)sc->sc_readoffset);
411 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
412 indent, sc->sc_readcons->provider->name);
413 }
414 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
415 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
416 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
417 sc->sc_queue_count);
418 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
419 sc->sc_queue_size);
420 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
421 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
422 g_topology_unlock();
423 g_gate_release(sc);
424 g_topology_lock();
425 }
426
427 static int
428 g_gate_create(struct g_gate_ctl_create *ggio)
429 {
430 struct g_gate_softc *sc;
431 struct g_geom *gp;
432 struct g_provider *pp, *ropp;
433 struct g_consumer *cp;
434 char name[NAME_MAX];
435 int error = 0, unit;
436
437 if (ggio->gctl_mediasize <= 0) {
438 G_GATE_DEBUG(1, "Invalid media size.");
439 return (EINVAL);
440 }
441 if (ggio->gctl_sectorsize <= 0) {
442 G_GATE_DEBUG(1, "Invalid sector size.");
443 return (EINVAL);
444 }
445 if (!powerof2(ggio->gctl_sectorsize)) {
446 G_GATE_DEBUG(1, "Invalid sector size.");
447 return (EINVAL);
448 }
449 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
450 G_GATE_DEBUG(1, "Invalid media size.");
451 return (EINVAL);
452 }
453 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
454 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
455 G_GATE_DEBUG(1, "Invalid flags.");
456 return (EINVAL);
457 }
458 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
459 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
460 ggio->gctl_unit < 0) {
461 G_GATE_DEBUG(1, "Invalid unit number.");
462 return (EINVAL);
463 }
464 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
465 ggio->gctl_name[0] == '\0') {
466 G_GATE_DEBUG(1, "No device name.");
467 return (EINVAL);
468 }
469
470 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
471 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
472 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
473 sc->sc_seq = 1;
474 bioq_init(&sc->sc_inqueue);
475 bioq_init(&sc->sc_outqueue);
476 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
477 sc->sc_queue_count = 0;
478 sc->sc_queue_size = ggio->gctl_maxcount;
479 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
480 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
481 sc->sc_timeout = ggio->gctl_timeout;
482 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
483
484 mtx_lock(&g_gate_units_lock);
485 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
486 if (sc->sc_unit < 0)
487 goto fail1;
488 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
489 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
490 else {
491 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
492 sc->sc_unit);
493 }
494 /* Check for name collision. */
495 for (unit = 0; unit < g_gate_maxunits; unit++) {
496 if (g_gate_units[unit] == NULL)
497 continue;
498 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
499 continue;
500 error = EEXIST;
501 goto fail1;
502 }
503 sc->sc_name = name;
504 g_gate_units[sc->sc_unit] = sc;
505 g_gate_nunits++;
506 mtx_unlock(&g_gate_units_lock);
507
508 g_topology_lock();
509
510 if (ggio->gctl_readprov[0] == '\0') {
511 ropp = NULL;
512 } else {
513 ropp = g_provider_by_name(ggio->gctl_readprov);
514 if (ropp == NULL) {
515 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
516 ggio->gctl_readprov);
517 error = EINVAL;
518 goto fail2;
519 }
520 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
521 G_GATE_DEBUG(1, "Invalid read offset.");
522 error = EINVAL;
523 goto fail2;
524 }
525 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
526 ropp->mediasize) {
527 G_GATE_DEBUG(1, "Invalid read offset or media size.");
528 error = EINVAL;
529 goto fail2;
530 }
531 }
532
533 gp = g_new_geomf(&g_gate_class, "%s", name);
534 gp->start = g_gate_start;
535 gp->access = g_gate_access;
536 gp->orphan = g_gate_orphan;
537 gp->dumpconf = g_gate_dumpconf;
538 gp->softc = sc;
539
540 if (ropp != NULL) {
541 cp = g_new_consumer(gp);
542 error = g_attach(cp, ropp);
543 if (error != 0) {
544 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
545 goto fail3;
546 }
547 error = g_access(cp, 1, 0, 0);
548 if (error != 0) {
549 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
550 g_detach(cp);
551 goto fail3;
552 }
553 sc->sc_readcons = cp;
554 sc->sc_readoffset = ggio->gctl_readoffset;
555 }
556
557 ggio->gctl_unit = sc->sc_unit;
558
559 pp = g_new_providerf(gp, "%s", name);
560 pp->mediasize = ggio->gctl_mediasize;
561 pp->sectorsize = ggio->gctl_sectorsize;
562 sc->sc_provider = pp;
563 g_error_provider(pp, 0);
564
565 g_topology_unlock();
566 mtx_lock(&g_gate_units_lock);
567 sc->sc_name = sc->sc_provider->name;
568 mtx_unlock(&g_gate_units_lock);
569 G_GATE_DEBUG(1, "Device %s created.", gp->name);
570
571 if (sc->sc_timeout > 0) {
572 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
573 g_gate_guard, sc);
574 }
575 return (0);
576 fail3:
577 g_destroy_consumer(cp);
578 g_destroy_geom(gp);
579 fail2:
580 g_topology_unlock();
581 mtx_lock(&g_gate_units_lock);
582 g_gate_units[sc->sc_unit] = NULL;
583 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
584 g_gate_nunits--;
585 fail1:
586 mtx_unlock(&g_gate_units_lock);
587 mtx_destroy(&sc->sc_queue_mtx);
588 free(sc, M_GATE);
589 return (error);
590 }
591
592 static int
593 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
594 {
595 struct g_provider *pp;
596 struct g_consumer *cp;
597 int error;
598
599 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
600 if (ggio->gctl_mediasize <= 0) {
601 G_GATE_DEBUG(1, "Invalid media size.");
602 return (EINVAL);
603 }
604 pp = sc->sc_provider;
605 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
606 G_GATE_DEBUG(1, "Invalid media size.");
607 return (EINVAL);
608 }
609 /* TODO */
610 return (EOPNOTSUPP);
611 }
612
613 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
614 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
615
616 cp = NULL;
617
618 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
619 g_topology_lock();
620 if (sc->sc_readcons != NULL) {
621 cp = sc->sc_readcons;
622 sc->sc_readcons = NULL;
623 (void)g_access(cp, -1, 0, 0);
624 g_detach(cp);
625 g_destroy_consumer(cp);
626 }
627 if (ggio->gctl_readprov[0] != '\0') {
628 pp = g_provider_by_name(ggio->gctl_readprov);
629 if (pp == NULL) {
630 g_topology_unlock();
631 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
632 ggio->gctl_readprov);
633 return (EINVAL);
634 }
635 cp = g_new_consumer(sc->sc_provider->geom);
636 error = g_attach(cp, pp);
637 if (error != 0) {
638 G_GATE_DEBUG(1, "Unable to attach to %s.",
639 pp->name);
640 } else {
641 error = g_access(cp, 1, 0, 0);
642 if (error != 0) {
643 G_GATE_DEBUG(1, "Unable to access %s.",
644 pp->name);
645 g_detach(cp);
646 }
647 }
648 if (error != 0) {
649 g_destroy_consumer(cp);
650 g_topology_unlock();
651 return (error);
652 }
653 }
654 } else {
655 cp = sc->sc_readcons;
656 }
657
658 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
659 if (cp == NULL) {
660 G_GATE_DEBUG(1, "No read provider.");
661 return (EINVAL);
662 }
663 pp = sc->sc_provider;
664 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
665 G_GATE_DEBUG(1, "Invalid read offset.");
666 return (EINVAL);
667 }
668 if (pp->mediasize + ggio->gctl_readoffset >
669 cp->provider->mediasize) {
670 G_GATE_DEBUG(1, "Invalid read offset or media size.");
671 return (EINVAL);
672 }
673 sc->sc_readoffset = ggio->gctl_readoffset;
674 }
675
676 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
677 sc->sc_readcons = cp;
678 g_topology_unlock();
679 }
680
681 return (0);
682 }
683
684 #define G_GATE_CHECK_VERSION(ggio) do { \
685 if ((ggio)->gctl_version != G_GATE_VERSION) { \
686 printf("Version mismatch %d != %d.\n", \
687 ggio->gctl_version, G_GATE_VERSION); \
688 return (EINVAL); \
689 } \
690 } while (0)
691 static int
692 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
693 {
694 struct g_gate_softc *sc;
695 struct bio *bp;
696 int error = 0;
697
698 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
699 flags, td);
700
701 switch (cmd) {
702 case G_GATE_CMD_CREATE:
703 {
704 struct g_gate_ctl_create *ggio = (void *)addr;
705
706 G_GATE_CHECK_VERSION(ggio);
707 error = g_gate_create(ggio);
708 /*
709 * Reset TDP_GEOM flag.
710 * There are pending events for sure, because we just created
711 * new provider and other classes want to taste it, but we
712 * cannot answer on I/O requests until we're here.
713 */
714 td->td_pflags &= ~TDP_GEOM;
715 return (error);
716 }
717 case G_GATE_CMD_MODIFY:
718 {
719 struct g_gate_ctl_modify *ggio = (void *)addr;
720
721 G_GATE_CHECK_VERSION(ggio);
722 sc = g_gate_hold(ggio->gctl_unit, NULL);
723 if (sc == NULL)
724 return (ENXIO);
725 error = g_gate_modify(sc, ggio);
726 g_gate_release(sc);
727 return (error);
728 }
729 case G_GATE_CMD_DESTROY:
730 {
731 struct g_gate_ctl_destroy *ggio = (void *)addr;
732
733 G_GATE_CHECK_VERSION(ggio);
734 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
735 if (sc == NULL)
736 return (ENXIO);
737 g_topology_lock();
738 mtx_lock(&g_gate_units_lock);
739 error = g_gate_destroy(sc, ggio->gctl_force);
740 g_topology_unlock();
741 if (error != 0)
742 g_gate_release(sc);
743 return (error);
744 }
745 case G_GATE_CMD_CANCEL:
746 {
747 struct g_gate_ctl_cancel *ggio = (void *)addr;
748 struct bio *tbp, *lbp;
749
750 G_GATE_CHECK_VERSION(ggio);
751 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
752 if (sc == NULL)
753 return (ENXIO);
754 lbp = NULL;
755 mtx_lock(&sc->sc_queue_mtx);
756 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
757 if (ggio->gctl_seq == 0 ||
758 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
759 G_GATE_LOGREQ(1, bp, "Request canceled.");
760 bioq_remove(&sc->sc_outqueue, bp);
761 /*
762 * Be sure to put requests back onto incoming
763 * queue in the proper order.
764 */
765 if (lbp == NULL)
766 bioq_insert_head(&sc->sc_inqueue, bp);
767 else {
768 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
769 lbp, bp, bio_queue);
770 }
771 lbp = bp;
772 /*
773 * If only one request was canceled, leave now.
774 */
775 if (ggio->gctl_seq != 0)
776 break;
777 }
778 }
779 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
780 ggio->gctl_unit = sc->sc_unit;
781 mtx_unlock(&sc->sc_queue_mtx);
782 g_gate_release(sc);
783 return (error);
784 }
785 case G_GATE_CMD_START:
786 {
787 struct g_gate_ctl_io *ggio = (void *)addr;
788
789 G_GATE_CHECK_VERSION(ggio);
790 sc = g_gate_hold(ggio->gctl_unit, NULL);
791 if (sc == NULL)
792 return (ENXIO);
793 error = 0;
794 for (;;) {
795 mtx_lock(&sc->sc_queue_mtx);
796 bp = bioq_first(&sc->sc_inqueue);
797 if (bp != NULL)
798 break;
799 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
800 ggio->gctl_error = ECANCELED;
801 mtx_unlock(&sc->sc_queue_mtx);
802 goto start_end;
803 }
804 if (msleep(sc, &sc->sc_queue_mtx,
805 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
806 ggio->gctl_error = ECANCELED;
807 goto start_end;
808 }
809 }
810 ggio->gctl_cmd = bp->bio_cmd;
811 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
812 bp->bio_length > ggio->gctl_length) {
813 mtx_unlock(&sc->sc_queue_mtx);
814 ggio->gctl_length = bp->bio_length;
815 ggio->gctl_error = ENOMEM;
816 goto start_end;
817 }
818 bioq_remove(&sc->sc_inqueue, bp);
819 bioq_insert_tail(&sc->sc_outqueue, bp);
820 mtx_unlock(&sc->sc_queue_mtx);
821
822 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
823 ggio->gctl_offset = bp->bio_offset;
824 ggio->gctl_length = bp->bio_length;
825
826 switch (bp->bio_cmd) {
827 case BIO_READ:
828 case BIO_DELETE:
829 case BIO_FLUSH:
830 break;
831 case BIO_WRITE:
832 error = copyout(bp->bio_data, ggio->gctl_data,
833 bp->bio_length);
834 if (error != 0) {
835 mtx_lock(&sc->sc_queue_mtx);
836 bioq_remove(&sc->sc_outqueue, bp);
837 bioq_insert_head(&sc->sc_inqueue, bp);
838 mtx_unlock(&sc->sc_queue_mtx);
839 goto start_end;
840 }
841 break;
842 }
843 start_end:
844 g_gate_release(sc);
845 return (error);
846 }
847 case G_GATE_CMD_DONE:
848 {
849 struct g_gate_ctl_io *ggio = (void *)addr;
850
851 G_GATE_CHECK_VERSION(ggio);
852 sc = g_gate_hold(ggio->gctl_unit, NULL);
853 if (sc == NULL)
854 return (ENOENT);
855 error = 0;
856 mtx_lock(&sc->sc_queue_mtx);
857 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
858 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
859 break;
860 }
861 if (bp != NULL) {
862 bioq_remove(&sc->sc_outqueue, bp);
863 sc->sc_queue_count--;
864 }
865 mtx_unlock(&sc->sc_queue_mtx);
866 if (bp == NULL) {
867 /*
868 * Request was probably canceled.
869 */
870 goto done_end;
871 }
872 if (ggio->gctl_error == EAGAIN) {
873 bp->bio_error = 0;
874 G_GATE_LOGREQ(1, bp, "Request desisted.");
875 mtx_lock(&sc->sc_queue_mtx);
876 sc->sc_queue_count++;
877 bioq_insert_head(&sc->sc_inqueue, bp);
878 wakeup(sc);
879 mtx_unlock(&sc->sc_queue_mtx);
880 } else {
881 bp->bio_error = ggio->gctl_error;
882 if (bp->bio_error == 0) {
883 bp->bio_completed = bp->bio_length;
884 switch (bp->bio_cmd) {
885 case BIO_READ:
886 error = copyin(ggio->gctl_data,
887 bp->bio_data, bp->bio_length);
888 if (error != 0)
889 bp->bio_error = error;
890 break;
891 case BIO_DELETE:
892 case BIO_WRITE:
893 case BIO_FLUSH:
894 break;
895 }
896 }
897 G_GATE_LOGREQ(2, bp, "Request done.");
898 g_io_deliver(bp, bp->bio_error);
899 }
900 done_end:
901 g_gate_release(sc);
902 return (error);
903 }
904 }
905 return (ENOIOCTL);
906 }
907
908 static void
909 g_gate_device(void)
910 {
911
912 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
913 G_GATE_CTL_NAME);
914 }
915
916 static int
917 g_gate_modevent(module_t mod, int type, void *data)
918 {
919 int error = 0;
920
921 switch (type) {
922 case MOD_LOAD:
923 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
924 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
925 M_GATE, M_WAITOK | M_ZERO);
926 g_gate_nunits = 0;
927 g_gate_device();
928 break;
929 case MOD_UNLOAD:
930 mtx_lock(&g_gate_units_lock);
931 if (g_gate_nunits > 0) {
932 mtx_unlock(&g_gate_units_lock);
933 error = EBUSY;
934 break;
935 }
936 mtx_unlock(&g_gate_units_lock);
937 mtx_destroy(&g_gate_units_lock);
938 if (status_dev != 0)
939 destroy_dev(status_dev);
940 free(g_gate_units, M_GATE);
941 break;
942 default:
943 return (EOPNOTSUPP);
944 break;
945 }
946
947 return (error);
948 }
949 static moduledata_t g_gate_module = {
950 G_GATE_MOD_NAME,
951 g_gate_modevent,
952 NULL
953 };
954 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
955 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
Cache object: 69d1124811258d232d97f92e1341b757
|