FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/8.4/sys/geom/gate/g_gate.c 240266 2012-09-09 08:21:06Z trociny $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/signalvar.h>
50 #include <sys/time.h>
51 #include <machine/atomic.h>
52
53 #include <geom/geom.h>
54 #include <geom/gate/g_gate.h>
55
56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
57
58 SYSCTL_DECL(_kern_geom);
59 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
60 "GEOM_GATE configuration");
61 static int g_gate_debug = 0;
62 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
63 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
64 "Debug level");
65 static u_int g_gate_maxunits = 256;
66 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
67 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
68 &g_gate_maxunits, 0, "Maximum number of ggate devices");
69
70 struct g_class g_gate_class = {
71 .name = G_GATE_CLASS_NAME,
72 .version = G_VERSION,
73 };
74
75 static struct cdev *status_dev;
76 static d_ioctl_t g_gate_ioctl;
77 static struct cdevsw g_gate_cdevsw = {
78 .d_version = D_VERSION,
79 .d_ioctl = g_gate_ioctl,
80 .d_name = G_GATE_CTL_NAME
81 };
82
83
84 static struct g_gate_softc **g_gate_units;
85 static u_int g_gate_nunits;
86 static struct mtx g_gate_units_lock;
87
88 static int
89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
90 {
91 struct g_provider *pp;
92 struct g_consumer *cp;
93 struct g_geom *gp;
94 struct bio *bp;
95
96 g_topology_assert();
97 mtx_assert(&g_gate_units_lock, MA_OWNED);
98 pp = sc->sc_provider;
99 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
100 mtx_unlock(&g_gate_units_lock);
101 return (EBUSY);
102 }
103 mtx_unlock(&g_gate_units_lock);
104 mtx_lock(&sc->sc_queue_mtx);
105 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
106 sc->sc_flags |= G_GATE_FLAG_DESTROY;
107 wakeup(sc);
108 mtx_unlock(&sc->sc_queue_mtx);
109 gp = pp->geom;
110 pp->flags |= G_PF_WITHER;
111 g_orphan_provider(pp, ENXIO);
112 callout_drain(&sc->sc_callout);
113 mtx_lock(&sc->sc_queue_mtx);
114 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
115 bioq_remove(&sc->sc_inqueue, bp);
116 sc->sc_queue_count--;
117 G_GATE_LOGREQ(1, bp, "Request canceled.");
118 g_io_deliver(bp, ENXIO);
119 }
120 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
121 bioq_remove(&sc->sc_outqueue, bp);
122 sc->sc_queue_count--;
123 G_GATE_LOGREQ(1, bp, "Request canceled.");
124 g_io_deliver(bp, ENXIO);
125 }
126 mtx_unlock(&sc->sc_queue_mtx);
127 g_topology_unlock();
128 mtx_lock(&g_gate_units_lock);
129 /* One reference is ours. */
130 sc->sc_ref--;
131 while (sc->sc_ref > 0)
132 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
133 g_gate_units[sc->sc_unit] = NULL;
134 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
135 g_gate_nunits--;
136 mtx_unlock(&g_gate_units_lock);
137 mtx_destroy(&sc->sc_queue_mtx);
138 g_topology_lock();
139 if ((cp = sc->sc_readcons) != NULL) {
140 sc->sc_readcons = NULL;
141 (void)g_access(cp, -1, 0, 0);
142 g_detach(cp);
143 g_destroy_consumer(cp);
144 }
145 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
146 gp->softc = NULL;
147 g_wither_geom(gp, ENXIO);
148 sc->sc_provider = NULL;
149 free(sc, M_GATE);
150 return (0);
151 }
152
153 static int
154 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
155 {
156 struct g_gate_softc *sc;
157
158 if (dr <= 0 && dw <= 0 && de <= 0)
159 return (0);
160 sc = pp->geom->softc;
161 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
162 return (ENXIO);
163 /* XXX: Hack to allow read-only mounts. */
164 #if 0
165 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
166 return (EPERM);
167 #endif
168 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
169 return (EPERM);
170 return (0);
171 }
172
173 static void
174 g_gate_queue_io(struct bio *bp)
175 {
176 struct g_gate_softc *sc;
177
178 sc = bp->bio_to->geom->softc;
179 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
180 g_io_deliver(bp, ENXIO);
181 return;
182 }
183
184 mtx_lock(&sc->sc_queue_mtx);
185
186 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
187 mtx_unlock(&sc->sc_queue_mtx);
188 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
189 g_io_deliver(bp, ENOMEM);
190 return;
191 }
192
193 bp->bio_driver1 = (void *)sc->sc_seq;
194 sc->sc_seq++;
195 sc->sc_queue_count++;
196
197 bioq_insert_tail(&sc->sc_inqueue, bp);
198 wakeup(sc);
199
200 mtx_unlock(&sc->sc_queue_mtx);
201 }
202
203 static void
204 g_gate_done(struct bio *cbp)
205 {
206 struct bio *pbp;
207
208 pbp = cbp->bio_parent;
209 if (cbp->bio_error == 0) {
210 pbp->bio_completed = cbp->bio_completed;
211 g_destroy_bio(cbp);
212 pbp->bio_inbed++;
213 g_io_deliver(pbp, 0);
214 } else {
215 /* If direct read failed, pass it through userland daemon. */
216 g_destroy_bio(cbp);
217 pbp->bio_children--;
218 g_gate_queue_io(pbp);
219 }
220 }
221
222 static void
223 g_gate_start(struct bio *pbp)
224 {
225 struct g_gate_softc *sc;
226
227 sc = pbp->bio_to->geom->softc;
228 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
229 g_io_deliver(pbp, ENXIO);
230 return;
231 }
232 G_GATE_LOGREQ(2, pbp, "Request received.");
233 switch (pbp->bio_cmd) {
234 case BIO_READ:
235 if (sc->sc_readcons != NULL) {
236 struct bio *cbp;
237
238 cbp = g_clone_bio(pbp);
239 if (cbp == NULL) {
240 g_io_deliver(pbp, ENOMEM);
241 return;
242 }
243 cbp->bio_done = g_gate_done;
244 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
245 cbp->bio_data = pbp->bio_data;
246 cbp->bio_length = pbp->bio_length;
247 cbp->bio_to = sc->sc_readcons->provider;
248 g_io_request(cbp, sc->sc_readcons);
249 return;
250 }
251 break;
252 case BIO_DELETE:
253 case BIO_WRITE:
254 case BIO_FLUSH:
255 /* XXX: Hack to allow read-only mounts. */
256 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
257 g_io_deliver(pbp, EPERM);
258 return;
259 }
260 break;
261 case BIO_GETATTR:
262 default:
263 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
264 g_io_deliver(pbp, EOPNOTSUPP);
265 return;
266 }
267
268 g_gate_queue_io(pbp);
269 }
270
271 static struct g_gate_softc *
272 g_gate_hold(int unit, const char *name)
273 {
274 struct g_gate_softc *sc = NULL;
275
276 mtx_lock(&g_gate_units_lock);
277 if (unit >= 0 && unit < g_gate_maxunits)
278 sc = g_gate_units[unit];
279 else if (unit == G_GATE_NAME_GIVEN) {
280 KASSERT(name != NULL, ("name is NULL"));
281 for (unit = 0; unit < g_gate_maxunits; unit++) {
282 if (g_gate_units[unit] == NULL)
283 continue;
284 if (strcmp(name,
285 g_gate_units[unit]->sc_provider->name) != 0) {
286 continue;
287 }
288 sc = g_gate_units[unit];
289 break;
290 }
291 }
292 if (sc != NULL)
293 sc->sc_ref++;
294 mtx_unlock(&g_gate_units_lock);
295 return (sc);
296 }
297
298 static void
299 g_gate_release(struct g_gate_softc *sc)
300 {
301
302 g_topology_assert_not();
303 mtx_lock(&g_gate_units_lock);
304 sc->sc_ref--;
305 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
306 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
307 wakeup(&sc->sc_ref);
308 mtx_unlock(&g_gate_units_lock);
309 }
310
311 static int
312 g_gate_getunit(int unit, int *errorp)
313 {
314
315 mtx_assert(&g_gate_units_lock, MA_OWNED);
316 if (unit >= 0) {
317 if (unit >= g_gate_maxunits)
318 *errorp = EINVAL;
319 else if (g_gate_units[unit] == NULL)
320 return (unit);
321 else
322 *errorp = EEXIST;
323 } else {
324 for (unit = 0; unit < g_gate_maxunits; unit++) {
325 if (g_gate_units[unit] == NULL)
326 return (unit);
327 }
328 *errorp = ENFILE;
329 }
330 return (-1);
331 }
332
333 static void
334 g_gate_guard(void *arg)
335 {
336 struct g_gate_softc *sc;
337 struct bintime curtime;
338 struct bio *bp, *bp2;
339
340 sc = arg;
341 binuptime(&curtime);
342 g_gate_hold(sc->sc_unit, NULL);
343 mtx_lock(&sc->sc_queue_mtx);
344 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
345 if (curtime.sec - bp->bio_t0.sec < 5)
346 continue;
347 bioq_remove(&sc->sc_inqueue, bp);
348 sc->sc_queue_count--;
349 G_GATE_LOGREQ(1, bp, "Request timeout.");
350 g_io_deliver(bp, EIO);
351 }
352 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
353 if (curtime.sec - bp->bio_t0.sec < 5)
354 continue;
355 bioq_remove(&sc->sc_outqueue, bp);
356 sc->sc_queue_count--;
357 G_GATE_LOGREQ(1, bp, "Request timeout.");
358 g_io_deliver(bp, EIO);
359 }
360 mtx_unlock(&sc->sc_queue_mtx);
361 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
362 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
363 g_gate_guard, sc);
364 }
365 g_gate_release(sc);
366 }
367
368 static void
369 g_gate_orphan(struct g_consumer *cp)
370 {
371 struct g_gate_softc *sc;
372 struct g_geom *gp;
373
374 g_topology_assert();
375 gp = cp->geom;
376 sc = gp->softc;
377 if (sc == NULL)
378 return;
379 KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
380 sc->sc_readcons));
381 sc->sc_readcons = NULL;
382 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
383 cp->provider->name);
384 (void)g_access(cp, -1, 0, 0);
385 g_detach(cp);
386 g_destroy_consumer(cp);
387 }
388
389 static void
390 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
391 struct g_consumer *cp, struct g_provider *pp)
392 {
393 struct g_gate_softc *sc;
394
395 sc = gp->softc;
396 if (sc == NULL || pp != NULL || cp != NULL)
397 return;
398 sc = g_gate_hold(sc->sc_unit, NULL);
399 if (sc == NULL)
400 return;
401 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
402 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
403 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
404 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
405 "write-only");
406 } else {
407 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
408 "read-write");
409 }
410 if (sc->sc_readcons != NULL) {
411 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
412 indent, (intmax_t)sc->sc_readoffset);
413 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
414 indent, sc->sc_readcons->provider->name);
415 }
416 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
417 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
418 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
419 sc->sc_queue_count);
420 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
421 sc->sc_queue_size);
422 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
423 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
424 g_topology_unlock();
425 g_gate_release(sc);
426 g_topology_lock();
427 }
428
429 static int
430 g_gate_create(struct g_gate_ctl_create *ggio)
431 {
432 struct g_gate_softc *sc;
433 struct g_geom *gp;
434 struct g_provider *pp, *ropp;
435 struct g_consumer *cp;
436 char name[NAME_MAX];
437 int error = 0, unit;
438
439 if (ggio->gctl_mediasize <= 0) {
440 G_GATE_DEBUG(1, "Invalid media size.");
441 return (EINVAL);
442 }
443 if (ggio->gctl_sectorsize <= 0) {
444 G_GATE_DEBUG(1, "Invalid sector size.");
445 return (EINVAL);
446 }
447 if (!powerof2(ggio->gctl_sectorsize)) {
448 G_GATE_DEBUG(1, "Invalid sector size.");
449 return (EINVAL);
450 }
451 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
452 G_GATE_DEBUG(1, "Invalid media size.");
453 return (EINVAL);
454 }
455 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
456 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
457 G_GATE_DEBUG(1, "Invalid flags.");
458 return (EINVAL);
459 }
460 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
461 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
462 ggio->gctl_unit < 0) {
463 G_GATE_DEBUG(1, "Invalid unit number.");
464 return (EINVAL);
465 }
466 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
467 ggio->gctl_name[0] == '\0') {
468 G_GATE_DEBUG(1, "No device name.");
469 return (EINVAL);
470 }
471
472 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
473 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
474 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
475 sc->sc_seq = 1;
476 bioq_init(&sc->sc_inqueue);
477 bioq_init(&sc->sc_outqueue);
478 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
479 sc->sc_queue_count = 0;
480 sc->sc_queue_size = ggio->gctl_maxcount;
481 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
482 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
483 sc->sc_timeout = ggio->gctl_timeout;
484 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
485
486 mtx_lock(&g_gate_units_lock);
487 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
488 if (sc->sc_unit < 0)
489 goto fail1;
490 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
491 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
492 else {
493 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
494 sc->sc_unit);
495 }
496 /* Check for name collision. */
497 for (unit = 0; unit < g_gate_maxunits; unit++) {
498 if (g_gate_units[unit] == NULL)
499 continue;
500 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
501 continue;
502 error = EEXIST;
503 goto fail1;
504 }
505 sc->sc_name = name;
506 g_gate_units[sc->sc_unit] = sc;
507 g_gate_nunits++;
508 mtx_unlock(&g_gate_units_lock);
509
510 g_topology_lock();
511
512 if (ggio->gctl_readprov[0] == '\0') {
513 ropp = NULL;
514 } else {
515 ropp = g_provider_by_name(ggio->gctl_readprov);
516 if (ropp == NULL) {
517 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
518 ggio->gctl_readprov);
519 error = EINVAL;
520 goto fail2;
521 }
522 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
523 G_GATE_DEBUG(1, "Invalid read offset.");
524 error = EINVAL;
525 goto fail2;
526 }
527 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
528 ropp->mediasize) {
529 G_GATE_DEBUG(1, "Invalid read offset or media size.");
530 error = EINVAL;
531 goto fail2;
532 }
533 }
534
535 gp = g_new_geomf(&g_gate_class, "%s", name);
536 gp->start = g_gate_start;
537 gp->access = g_gate_access;
538 gp->orphan = g_gate_orphan;
539 gp->dumpconf = g_gate_dumpconf;
540 gp->softc = sc;
541
542 if (ropp != NULL) {
543 cp = g_new_consumer(gp);
544 error = g_attach(cp, ropp);
545 if (error != 0) {
546 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
547 goto fail3;
548 }
549 error = g_access(cp, 1, 0, 0);
550 if (error != 0) {
551 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
552 g_detach(cp);
553 goto fail3;
554 }
555 sc->sc_readcons = cp;
556 sc->sc_readoffset = ggio->gctl_readoffset;
557 }
558
559 ggio->gctl_unit = sc->sc_unit;
560
561 pp = g_new_providerf(gp, "%s", name);
562 pp->mediasize = ggio->gctl_mediasize;
563 pp->sectorsize = ggio->gctl_sectorsize;
564 sc->sc_provider = pp;
565 g_error_provider(pp, 0);
566
567 g_topology_unlock();
568 mtx_lock(&g_gate_units_lock);
569 sc->sc_name = sc->sc_provider->name;
570 mtx_unlock(&g_gate_units_lock);
571 G_GATE_DEBUG(1, "Device %s created.", gp->name);
572
573 if (sc->sc_timeout > 0) {
574 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
575 g_gate_guard, sc);
576 }
577 return (0);
578 fail3:
579 g_destroy_consumer(cp);
580 g_destroy_geom(gp);
581 fail2:
582 g_topology_unlock();
583 mtx_lock(&g_gate_units_lock);
584 g_gate_units[sc->sc_unit] = NULL;
585 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
586 g_gate_nunits--;
587 fail1:
588 mtx_unlock(&g_gate_units_lock);
589 mtx_destroy(&sc->sc_queue_mtx);
590 free(sc, M_GATE);
591 return (error);
592 }
593
594 static int
595 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
596 {
597 struct g_provider *pp;
598 struct g_consumer *cp;
599 int error;
600
601 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
602 if (ggio->gctl_mediasize <= 0) {
603 G_GATE_DEBUG(1, "Invalid media size.");
604 return (EINVAL);
605 }
606 pp = sc->sc_provider;
607 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
608 G_GATE_DEBUG(1, "Invalid media size.");
609 return (EINVAL);
610 }
611 /* TODO */
612 return (EOPNOTSUPP);
613 }
614
615 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
616 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
617
618 cp = NULL;
619
620 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
621 g_topology_lock();
622 if (sc->sc_readcons != NULL) {
623 cp = sc->sc_readcons;
624 sc->sc_readcons = NULL;
625 (void)g_access(cp, -1, 0, 0);
626 g_detach(cp);
627 g_destroy_consumer(cp);
628 }
629 if (ggio->gctl_readprov[0] != '\0') {
630 pp = g_provider_by_name(ggio->gctl_readprov);
631 if (pp == NULL) {
632 g_topology_unlock();
633 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
634 ggio->gctl_readprov);
635 return (EINVAL);
636 }
637 cp = g_new_consumer(sc->sc_provider->geom);
638 error = g_attach(cp, pp);
639 if (error != 0) {
640 G_GATE_DEBUG(1, "Unable to attach to %s.",
641 pp->name);
642 } else {
643 error = g_access(cp, 1, 0, 0);
644 if (error != 0) {
645 G_GATE_DEBUG(1, "Unable to access %s.",
646 pp->name);
647 g_detach(cp);
648 }
649 }
650 if (error != 0) {
651 g_destroy_consumer(cp);
652 g_topology_unlock();
653 return (error);
654 }
655 }
656 } else {
657 cp = sc->sc_readcons;
658 }
659
660 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
661 if (cp == NULL) {
662 G_GATE_DEBUG(1, "No read provider.");
663 return (EINVAL);
664 }
665 pp = sc->sc_provider;
666 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
667 G_GATE_DEBUG(1, "Invalid read offset.");
668 return (EINVAL);
669 }
670 if (pp->mediasize + ggio->gctl_readoffset >
671 cp->provider->mediasize) {
672 G_GATE_DEBUG(1, "Invalid read offset or media size.");
673 return (EINVAL);
674 }
675 sc->sc_readoffset = ggio->gctl_readoffset;
676 }
677
678 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
679 sc->sc_readcons = cp;
680 g_topology_unlock();
681 }
682
683 return (0);
684 }
685
686 #define G_GATE_CHECK_VERSION(ggio) do { \
687 if ((ggio)->gctl_version != G_GATE_VERSION) { \
688 printf("Version mismatch %d != %d.\n", \
689 ggio->gctl_version, G_GATE_VERSION); \
690 return (EINVAL); \
691 } \
692 } while (0)
693 static int
694 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
695 {
696 struct g_gate_softc *sc;
697 struct bio *bp;
698 int error = 0;
699
700 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
701 flags, td);
702
703 switch (cmd) {
704 case G_GATE_CMD_CREATE:
705 {
706 struct g_gate_ctl_create *ggio = (void *)addr;
707
708 G_GATE_CHECK_VERSION(ggio);
709 error = g_gate_create(ggio);
710 /*
711 * Reset TDP_GEOM flag.
712 * There are pending events for sure, because we just created
713 * new provider and other classes want to taste it, but we
714 * cannot answer on I/O requests until we're here.
715 */
716 td->td_pflags &= ~TDP_GEOM;
717 return (error);
718 }
719 case G_GATE_CMD_MODIFY:
720 {
721 struct g_gate_ctl_modify *ggio = (void *)addr;
722
723 G_GATE_CHECK_VERSION(ggio);
724 sc = g_gate_hold(ggio->gctl_unit, NULL);
725 if (sc == NULL)
726 return (ENXIO);
727 error = g_gate_modify(sc, ggio);
728 g_gate_release(sc);
729 return (error);
730 }
731 case G_GATE_CMD_DESTROY:
732 {
733 struct g_gate_ctl_destroy *ggio = (void *)addr;
734
735 G_GATE_CHECK_VERSION(ggio);
736 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
737 if (sc == NULL)
738 return (ENXIO);
739 g_topology_lock();
740 mtx_lock(&g_gate_units_lock);
741 error = g_gate_destroy(sc, ggio->gctl_force);
742 g_topology_unlock();
743 if (error != 0)
744 g_gate_release(sc);
745 return (error);
746 }
747 case G_GATE_CMD_CANCEL:
748 {
749 struct g_gate_ctl_cancel *ggio = (void *)addr;
750 struct bio *tbp, *lbp;
751
752 G_GATE_CHECK_VERSION(ggio);
753 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
754 if (sc == NULL)
755 return (ENXIO);
756 lbp = NULL;
757 mtx_lock(&sc->sc_queue_mtx);
758 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
759 if (ggio->gctl_seq == 0 ||
760 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
761 G_GATE_LOGREQ(1, bp, "Request canceled.");
762 bioq_remove(&sc->sc_outqueue, bp);
763 /*
764 * Be sure to put requests back onto incoming
765 * queue in the proper order.
766 */
767 if (lbp == NULL)
768 bioq_insert_head(&sc->sc_inqueue, bp);
769 else {
770 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
771 lbp, bp, bio_queue);
772 }
773 lbp = bp;
774 /*
775 * If only one request was canceled, leave now.
776 */
777 if (ggio->gctl_seq != 0)
778 break;
779 }
780 }
781 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
782 ggio->gctl_unit = sc->sc_unit;
783 mtx_unlock(&sc->sc_queue_mtx);
784 g_gate_release(sc);
785 return (error);
786 }
787 case G_GATE_CMD_START:
788 {
789 struct g_gate_ctl_io *ggio = (void *)addr;
790
791 G_GATE_CHECK_VERSION(ggio);
792 sc = g_gate_hold(ggio->gctl_unit, NULL);
793 if (sc == NULL)
794 return (ENXIO);
795 error = 0;
796 for (;;) {
797 mtx_lock(&sc->sc_queue_mtx);
798 bp = bioq_first(&sc->sc_inqueue);
799 if (bp != NULL)
800 break;
801 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
802 ggio->gctl_error = ECANCELED;
803 mtx_unlock(&sc->sc_queue_mtx);
804 goto start_end;
805 }
806 if (msleep(sc, &sc->sc_queue_mtx,
807 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
808 ggio->gctl_error = ECANCELED;
809 goto start_end;
810 }
811 }
812 ggio->gctl_cmd = bp->bio_cmd;
813 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
814 bp->bio_length > ggio->gctl_length) {
815 mtx_unlock(&sc->sc_queue_mtx);
816 ggio->gctl_length = bp->bio_length;
817 ggio->gctl_error = ENOMEM;
818 goto start_end;
819 }
820 bioq_remove(&sc->sc_inqueue, bp);
821 bioq_insert_tail(&sc->sc_outqueue, bp);
822 mtx_unlock(&sc->sc_queue_mtx);
823
824 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
825 ggio->gctl_offset = bp->bio_offset;
826 ggio->gctl_length = bp->bio_length;
827
828 switch (bp->bio_cmd) {
829 case BIO_READ:
830 case BIO_DELETE:
831 case BIO_FLUSH:
832 break;
833 case BIO_WRITE:
834 error = copyout(bp->bio_data, ggio->gctl_data,
835 bp->bio_length);
836 if (error != 0) {
837 mtx_lock(&sc->sc_queue_mtx);
838 bioq_remove(&sc->sc_outqueue, bp);
839 bioq_insert_head(&sc->sc_inqueue, bp);
840 mtx_unlock(&sc->sc_queue_mtx);
841 goto start_end;
842 }
843 break;
844 }
845 start_end:
846 g_gate_release(sc);
847 return (error);
848 }
849 case G_GATE_CMD_DONE:
850 {
851 struct g_gate_ctl_io *ggio = (void *)addr;
852
853 G_GATE_CHECK_VERSION(ggio);
854 sc = g_gate_hold(ggio->gctl_unit, NULL);
855 if (sc == NULL)
856 return (ENOENT);
857 error = 0;
858 mtx_lock(&sc->sc_queue_mtx);
859 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
860 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
861 break;
862 }
863 if (bp != NULL) {
864 bioq_remove(&sc->sc_outqueue, bp);
865 sc->sc_queue_count--;
866 }
867 mtx_unlock(&sc->sc_queue_mtx);
868 if (bp == NULL) {
869 /*
870 * Request was probably canceled.
871 */
872 goto done_end;
873 }
874 if (ggio->gctl_error == EAGAIN) {
875 bp->bio_error = 0;
876 G_GATE_LOGREQ(1, bp, "Request desisted.");
877 mtx_lock(&sc->sc_queue_mtx);
878 sc->sc_queue_count++;
879 bioq_insert_head(&sc->sc_inqueue, bp);
880 wakeup(sc);
881 mtx_unlock(&sc->sc_queue_mtx);
882 } else {
883 bp->bio_error = ggio->gctl_error;
884 if (bp->bio_error == 0) {
885 bp->bio_completed = bp->bio_length;
886 switch (bp->bio_cmd) {
887 case BIO_READ:
888 error = copyin(ggio->gctl_data,
889 bp->bio_data, bp->bio_length);
890 if (error != 0)
891 bp->bio_error = error;
892 break;
893 case BIO_DELETE:
894 case BIO_WRITE:
895 case BIO_FLUSH:
896 break;
897 }
898 }
899 G_GATE_LOGREQ(2, bp, "Request done.");
900 g_io_deliver(bp, bp->bio_error);
901 }
902 done_end:
903 g_gate_release(sc);
904 return (error);
905 }
906 }
907 return (ENOIOCTL);
908 }
909
910 static void
911 g_gate_device(void)
912 {
913
914 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
915 G_GATE_CTL_NAME);
916 }
917
918 static int
919 g_gate_modevent(module_t mod, int type, void *data)
920 {
921 int error = 0;
922
923 switch (type) {
924 case MOD_LOAD:
925 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
926 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
927 M_GATE, M_WAITOK | M_ZERO);
928 g_gate_nunits = 0;
929 g_gate_device();
930 break;
931 case MOD_UNLOAD:
932 mtx_lock(&g_gate_units_lock);
933 if (g_gate_nunits > 0) {
934 mtx_unlock(&g_gate_units_lock);
935 error = EBUSY;
936 break;
937 }
938 mtx_unlock(&g_gate_units_lock);
939 mtx_destroy(&g_gate_units_lock);
940 if (status_dev != 0)
941 destroy_dev(status_dev);
942 free(g_gate_units, M_GATE);
943 break;
944 default:
945 return (EOPNOTSUPP);
946 break;
947 }
948
949 return (error);
950 }
951 static moduledata_t g_gate_module = {
952 G_GATE_MOD_NAME,
953 g_gate_modevent,
954 NULL
955 };
956 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
957 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
Cache object: db4ee37f23dba41d1f3eba1efe5ff4d2
|