FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Copyright (c) 2009-2010 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Pawel Jakub Dawidek
9 * under sponsorship from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/conf.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/fcntl.h>
43 #include <sys/linker.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/queue.h>
50 #include <sys/sbuf.h>
51 #include <sys/sysctl.h>
52 #include <sys/signalvar.h>
53 #include <sys/time.h>
54 #include <machine/atomic.h>
55
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
58 #include <geom/gate/g_gate.h>
59
60 FEATURE(geom_gate, "GEOM Gate module");
61
62 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
63
64 SYSCTL_DECL(_kern_geom);
65 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
66 "GEOM_GATE configuration");
67 static int g_gate_debug = 0;
68 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
69 "Debug level");
70 static u_int g_gate_maxunits = 256;
71 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
72 &g_gate_maxunits, 0, "Maximum number of ggate devices");
73
74 struct g_class g_gate_class = {
75 .name = G_GATE_CLASS_NAME,
76 .version = G_VERSION,
77 };
78
79 static struct cdev *status_dev;
80 static d_ioctl_t g_gate_ioctl;
81 static struct cdevsw g_gate_cdevsw = {
82 .d_version = D_VERSION,
83 .d_ioctl = g_gate_ioctl,
84 .d_name = G_GATE_CTL_NAME
85 };
86
87 static struct g_gate_softc **g_gate_units;
88 static u_int g_gate_nunits;
89 static struct mtx g_gate_units_lock;
90
91 static void
92 g_gate_detach(void *arg, int flags __unused)
93 {
94 struct g_consumer *cp = arg;
95
96 g_topology_assert();
97 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
98 cp->provider->name);
99 (void)g_access(cp, -1, 0, 0);
100 g_detach(cp);
101 g_destroy_consumer(cp);
102 }
103
104 static int
105 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
106 {
107 struct bio_queue_head queue;
108 struct g_provider *pp;
109 struct g_consumer *cp;
110 struct g_geom *gp;
111 struct bio *bp;
112
113 g_topology_assert();
114 mtx_assert(&g_gate_units_lock, MA_OWNED);
115 pp = sc->sc_provider;
116 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
117 mtx_unlock(&g_gate_units_lock);
118 return (EBUSY);
119 }
120 mtx_unlock(&g_gate_units_lock);
121 mtx_lock(&sc->sc_queue_mtx);
122 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
123 sc->sc_flags |= G_GATE_FLAG_DESTROY;
124 wakeup(sc);
125 mtx_unlock(&sc->sc_queue_mtx);
126 gp = pp->geom;
127 g_wither_provider(pp, ENXIO);
128 callout_drain(&sc->sc_callout);
129 bioq_init(&queue);
130 mtx_lock(&sc->sc_queue_mtx);
131 while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
132 sc->sc_queue_count--;
133 bioq_insert_tail(&queue, bp);
134 }
135 while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
136 sc->sc_queue_count--;
137 bioq_insert_tail(&queue, bp);
138 }
139 mtx_unlock(&sc->sc_queue_mtx);
140 g_topology_unlock();
141 while ((bp = bioq_takefirst(&queue)) != NULL) {
142 G_GATE_LOGREQ(1, bp, "Request canceled.");
143 g_io_deliver(bp, ENXIO);
144 }
145 mtx_lock(&g_gate_units_lock);
146 /* One reference is ours. */
147 sc->sc_ref--;
148 while (sc->sc_ref > 0)
149 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
150 g_gate_units[sc->sc_unit] = NULL;
151 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
152 g_gate_nunits--;
153 mtx_unlock(&g_gate_units_lock);
154 mtx_destroy(&sc->sc_queue_mtx);
155 mtx_destroy(&sc->sc_read_mtx);
156 g_topology_lock();
157 if ((cp = sc->sc_readcons) != NULL) {
158 sc->sc_readcons = NULL;
159 (void)g_access(cp, -1, 0, 0);
160 g_detach(cp);
161 g_destroy_consumer(cp);
162 }
163 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
164 gp->softc = NULL;
165 g_wither_geom(gp, ENXIO);
166 sc->sc_provider = NULL;
167 free(sc, M_GATE);
168 return (0);
169 }
170
171 static int
172 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
173 {
174 struct g_gate_softc *sc;
175
176 if (dr <= 0 && dw <= 0 && de <= 0)
177 return (0);
178 sc = pp->geom->softc;
179 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
180 return (ENXIO);
181 /* XXX: Hack to allow read-only mounts. */
182 #if 0
183 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
184 return (EPERM);
185 #endif
186 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
187 return (EPERM);
188 return (0);
189 }
190
191 static void
192 g_gate_queue_io(struct bio *bp)
193 {
194 struct g_gate_softc *sc;
195
196 sc = bp->bio_to->geom->softc;
197 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
198 g_io_deliver(bp, ENXIO);
199 return;
200 }
201
202 mtx_lock(&sc->sc_queue_mtx);
203
204 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
205 mtx_unlock(&sc->sc_queue_mtx);
206 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
207 g_io_deliver(bp, ENOMEM);
208 return;
209 }
210
211 bp->bio_driver1 = (void *)sc->sc_seq;
212 sc->sc_seq++;
213 sc->sc_queue_count++;
214
215 bioq_insert_tail(&sc->sc_inqueue, bp);
216 wakeup(sc);
217
218 mtx_unlock(&sc->sc_queue_mtx);
219 }
220
221 static void
222 g_gate_done(struct bio *cbp)
223 {
224 struct g_gate_softc *sc;
225 struct bio *pbp;
226 struct g_consumer *cp;
227
228 cp = cbp->bio_from;
229 pbp = cbp->bio_parent;
230 if (cbp->bio_error == 0) {
231 pbp->bio_completed = cbp->bio_completed;
232 g_destroy_bio(cbp);
233 pbp->bio_inbed++;
234 g_io_deliver(pbp, 0);
235 } else {
236 /* If direct read failed, pass it through userland daemon. */
237 g_destroy_bio(cbp);
238 pbp->bio_children--;
239 g_gate_queue_io(pbp);
240 }
241
242 sc = cp->geom->softc;
243 mtx_lock(&sc->sc_read_mtx);
244 if (--cp->index == 0 && sc->sc_readcons != cp)
245 g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
246 mtx_unlock(&sc->sc_read_mtx);
247 }
248
249 static void
250 g_gate_start(struct bio *pbp)
251 {
252 struct g_gate_softc *sc;
253 struct g_consumer *cp;
254 struct bio *cbp;
255
256 sc = pbp->bio_to->geom->softc;
257 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
258 g_io_deliver(pbp, ENXIO);
259 return;
260 }
261 G_GATE_LOGREQ(2, pbp, "Request received.");
262 switch (pbp->bio_cmd) {
263 case BIO_READ:
264 if (sc->sc_readcons == NULL)
265 break;
266 cbp = g_clone_bio(pbp);
267 if (cbp == NULL) {
268 g_io_deliver(pbp, ENOMEM);
269 return;
270 }
271 mtx_lock(&sc->sc_read_mtx);
272 if ((cp = sc->sc_readcons) == NULL) {
273 mtx_unlock(&sc->sc_read_mtx);
274 g_destroy_bio(cbp);
275 pbp->bio_children--;
276 break;
277 }
278 cp->index++;
279 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
280 mtx_unlock(&sc->sc_read_mtx);
281 cbp->bio_done = g_gate_done;
282 g_io_request(cbp, cp);
283 return;
284 case BIO_DELETE:
285 case BIO_WRITE:
286 case BIO_FLUSH:
287 case BIO_SPEEDUP:
288 /* XXX: Hack to allow read-only mounts. */
289 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
290 g_io_deliver(pbp, EPERM);
291 return;
292 }
293 break;
294 case BIO_GETATTR:
295 default:
296 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
297 g_io_deliver(pbp, EOPNOTSUPP);
298 return;
299 }
300
301 g_gate_queue_io(pbp);
302 }
303
304 static struct g_gate_softc *
305 g_gate_hold(int unit, const char *name)
306 {
307 struct g_gate_softc *sc = NULL;
308
309 mtx_lock(&g_gate_units_lock);
310 if (unit >= 0 && unit < g_gate_maxunits)
311 sc = g_gate_units[unit];
312 else if (unit == G_GATE_NAME_GIVEN) {
313 KASSERT(name != NULL, ("name is NULL"));
314 for (unit = 0; unit < g_gate_maxunits; unit++) {
315 if (g_gate_units[unit] == NULL)
316 continue;
317 if (strcmp(name,
318 g_gate_units[unit]->sc_provider->name) != 0) {
319 continue;
320 }
321 sc = g_gate_units[unit];
322 break;
323 }
324 }
325 if (sc != NULL)
326 sc->sc_ref++;
327 mtx_unlock(&g_gate_units_lock);
328 return (sc);
329 }
330
331 static void
332 g_gate_release(struct g_gate_softc *sc)
333 {
334
335 g_topology_assert_not();
336 mtx_lock(&g_gate_units_lock);
337 sc->sc_ref--;
338 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
339 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
340 wakeup(&sc->sc_ref);
341 mtx_unlock(&g_gate_units_lock);
342 }
343
344 static int
345 g_gate_getunit(int unit, int *errorp)
346 {
347
348 mtx_assert(&g_gate_units_lock, MA_OWNED);
349 if (unit >= 0) {
350 if (unit >= g_gate_maxunits)
351 *errorp = EINVAL;
352 else if (g_gate_units[unit] == NULL)
353 return (unit);
354 else
355 *errorp = EEXIST;
356 } else {
357 for (unit = 0; unit < g_gate_maxunits; unit++) {
358 if (g_gate_units[unit] == NULL)
359 return (unit);
360 }
361 *errorp = ENFILE;
362 }
363 return (-1);
364 }
365
366 static void
367 g_gate_guard(void *arg)
368 {
369 struct bio_queue_head queue;
370 struct g_gate_softc *sc;
371 struct bintime curtime;
372 struct bio *bp, *bp2;
373
374 sc = arg;
375 binuptime(&curtime);
376 g_gate_hold(sc->sc_unit, NULL);
377 bioq_init(&queue);
378 mtx_lock(&sc->sc_queue_mtx);
379 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
380 if (curtime.sec - bp->bio_t0.sec < 5)
381 continue;
382 bioq_remove(&sc->sc_inqueue, bp);
383 sc->sc_queue_count--;
384 bioq_insert_tail(&queue, bp);
385 }
386 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
387 if (curtime.sec - bp->bio_t0.sec < 5)
388 continue;
389 bioq_remove(&sc->sc_outqueue, bp);
390 sc->sc_queue_count--;
391 bioq_insert_tail(&queue, bp);
392 }
393 mtx_unlock(&sc->sc_queue_mtx);
394 while ((bp = bioq_takefirst(&queue)) != NULL) {
395 G_GATE_LOGREQ(1, bp, "Request timeout.");
396 g_io_deliver(bp, EIO);
397 }
398 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
399 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
400 g_gate_guard, sc);
401 }
402 g_gate_release(sc);
403 }
404
405 static void
406 g_gate_orphan(struct g_consumer *cp)
407 {
408 struct g_gate_softc *sc;
409 struct g_geom *gp;
410 int done;
411
412 g_topology_assert();
413 gp = cp->geom;
414 sc = gp->softc;
415 mtx_lock(&sc->sc_read_mtx);
416 if (sc->sc_readcons == cp)
417 sc->sc_readcons = NULL;
418 done = (cp->index == 0);
419 mtx_unlock(&sc->sc_read_mtx);
420 if (done)
421 g_gate_detach(cp, 0);
422 }
423
424 static void
425 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
426 struct g_consumer *cp, struct g_provider *pp)
427 {
428 struct g_gate_softc *sc;
429
430 sc = gp->softc;
431 if (sc == NULL || pp != NULL || cp != NULL)
432 return;
433 sc = g_gate_hold(sc->sc_unit, NULL);
434 if (sc == NULL)
435 return;
436 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
437 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
438 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
439 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
440 "write-only");
441 } else {
442 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
443 "read-write");
444 }
445 if (sc->sc_readcons != NULL) {
446 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
447 indent, (intmax_t)sc->sc_readoffset);
448 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
449 indent, sc->sc_readcons->provider->name);
450 }
451 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
452 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
453 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
454 sc->sc_queue_count);
455 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
456 sc->sc_queue_size);
457 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
458 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
459 g_topology_unlock();
460 g_gate_release(sc);
461 g_topology_lock();
462 }
463
464 static int
465 g_gate_create(struct g_gate_ctl_create *ggio)
466 {
467 struct g_gate_softc *sc;
468 struct g_geom *gp;
469 struct g_provider *pp, *ropp;
470 struct g_consumer *cp;
471 char name[NAME_MAX + 1];
472 char readprov[NAME_MAX + 1];
473 int error = 0, unit;
474
475 if (ggio->gctl_mediasize <= 0) {
476 G_GATE_DEBUG(1, "Invalid media size.");
477 return (EINVAL);
478 }
479 if (ggio->gctl_sectorsize <= 0) {
480 G_GATE_DEBUG(1, "Invalid sector size.");
481 return (EINVAL);
482 }
483 if (!powerof2(ggio->gctl_sectorsize)) {
484 G_GATE_DEBUG(1, "Invalid sector size.");
485 return (EINVAL);
486 }
487 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
488 G_GATE_DEBUG(1, "Invalid media size.");
489 return (EINVAL);
490 }
491 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
492 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
493 G_GATE_DEBUG(1, "Invalid flags.");
494 return (EINVAL);
495 }
496 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
497 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
498 ggio->gctl_unit < 0) {
499 G_GATE_DEBUG(1, "Invalid unit number.");
500 return (EINVAL);
501 }
502 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
503 ggio->gctl_name[0] == '\0') {
504 G_GATE_DEBUG(1, "No device name.");
505 return (EINVAL);
506 }
507
508 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
509 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
510 memset(sc->sc_info, 0, sizeof(sc->sc_info));
511 strncpy(sc->sc_info, ggio->gctl_info,
512 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info)));
513 sc->sc_seq = 1;
514 bioq_init(&sc->sc_inqueue);
515 bioq_init(&sc->sc_outqueue);
516 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
517 mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
518 sc->sc_queue_count = 0;
519 sc->sc_queue_size = ggio->gctl_maxcount;
520 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
521 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
522 sc->sc_timeout = ggio->gctl_timeout;
523 callout_init(&sc->sc_callout, 1);
524
525 mtx_lock(&g_gate_units_lock);
526 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
527 if (sc->sc_unit < 0)
528 goto fail1;
529 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) {
530 memset(name, 0, sizeof(name));
531 strncpy(name, ggio->gctl_name,
532 MIN(sizeof(name) - 1, sizeof(ggio->gctl_name)));
533 } else {
534 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
535 sc->sc_unit);
536 }
537 /* Check for name collision. */
538 for (unit = 0; unit < g_gate_maxunits; unit++) {
539 if (g_gate_units[unit] == NULL)
540 continue;
541 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
542 continue;
543 error = EEXIST;
544 goto fail1;
545 }
546 // local stack buffer 'name' assigned here temporarily only.
547 // the real provider name is assigned below.
548 sc->sc_name = name;
549 g_gate_units[sc->sc_unit] = sc;
550 g_gate_nunits++;
551 mtx_unlock(&g_gate_units_lock);
552
553 g_topology_lock();
554
555 if (ggio->gctl_readprov[0] == '\0') {
556 ropp = NULL;
557 } else {
558 memset(readprov, 0, sizeof(readprov));
559 strncpy(readprov, ggio->gctl_readprov,
560 MIN(sizeof(readprov) - 1, sizeof(ggio->gctl_readprov)));
561 ropp = g_provider_by_name(readprov);
562 if (ropp == NULL) {
563 G_GATE_DEBUG(1, "Provider %s doesn't exist.", readprov);
564 error = EINVAL;
565 goto fail2;
566 }
567 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
568 G_GATE_DEBUG(1, "Invalid read offset.");
569 error = EINVAL;
570 goto fail2;
571 }
572 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
573 ropp->mediasize) {
574 G_GATE_DEBUG(1, "Invalid read offset or media size.");
575 error = EINVAL;
576 goto fail2;
577 }
578 }
579
580 gp = g_new_geomf(&g_gate_class, "%s", name);
581 gp->start = g_gate_start;
582 gp->access = g_gate_access;
583 gp->orphan = g_gate_orphan;
584 gp->dumpconf = g_gate_dumpconf;
585 gp->softc = sc;
586
587 if (ropp != NULL) {
588 cp = g_new_consumer(gp);
589 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
590 error = g_attach(cp, ropp);
591 if (error != 0) {
592 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
593 goto fail3;
594 }
595 error = g_access(cp, 1, 0, 0);
596 if (error != 0) {
597 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
598 g_detach(cp);
599 goto fail3;
600 }
601 sc->sc_readcons = cp;
602 sc->sc_readoffset = ggio->gctl_readoffset;
603 }
604
605 ggio->gctl_unit = sc->sc_unit;
606
607 pp = g_new_providerf(gp, "%s", name);
608 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
609 pp->mediasize = ggio->gctl_mediasize;
610 pp->sectorsize = ggio->gctl_sectorsize;
611 sc->sc_provider = pp;
612 g_error_provider(pp, 0);
613
614 g_topology_unlock();
615 mtx_lock(&g_gate_units_lock);
616 sc->sc_name = sc->sc_provider->name;
617 mtx_unlock(&g_gate_units_lock);
618 G_GATE_DEBUG(1, "Device %s created.", gp->name);
619
620 if (sc->sc_timeout > 0) {
621 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
622 g_gate_guard, sc);
623 }
624 return (0);
625 fail3:
626 g_destroy_consumer(cp);
627 g_destroy_geom(gp);
628 fail2:
629 g_topology_unlock();
630 mtx_lock(&g_gate_units_lock);
631 g_gate_units[sc->sc_unit] = NULL;
632 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
633 g_gate_nunits--;
634 fail1:
635 mtx_unlock(&g_gate_units_lock);
636 mtx_destroy(&sc->sc_queue_mtx);
637 mtx_destroy(&sc->sc_read_mtx);
638 free(sc, M_GATE);
639 return (error);
640 }
641
642 static int
643 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
644 {
645 char readprov[NAME_MAX + 1];
646 struct g_provider *pp;
647 struct g_consumer *cp;
648 int done, error;
649
650 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
651 if (ggio->gctl_mediasize <= 0) {
652 G_GATE_DEBUG(1, "Invalid media size.");
653 return (EINVAL);
654 }
655 pp = sc->sc_provider;
656 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
657 G_GATE_DEBUG(1, "Invalid media size.");
658 return (EINVAL);
659 }
660 g_resize_provider(pp, ggio->gctl_mediasize);
661 return (0);
662 }
663
664 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0) {
665 memset(sc->sc_info, 0, sizeof(sc->sc_info));
666 strncpy(sc->sc_info, ggio->gctl_info,
667 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info)));
668 }
669 cp = NULL;
670
671 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
672 g_topology_lock();
673 mtx_lock(&sc->sc_read_mtx);
674 if ((cp = sc->sc_readcons) != NULL) {
675 sc->sc_readcons = NULL;
676 done = (cp->index == 0);
677 mtx_unlock(&sc->sc_read_mtx);
678 if (done)
679 g_gate_detach(cp, 0);
680 } else
681 mtx_unlock(&sc->sc_read_mtx);
682 if (ggio->gctl_readprov[0] != '\0') {
683 memset(readprov, 0, sizeof(readprov));
684 strncpy(readprov, ggio->gctl_readprov,
685 MIN(sizeof(readprov) - 1,
686 sizeof(ggio->gctl_readprov)));
687 pp = g_provider_by_name(readprov);
688 if (pp == NULL) {
689 g_topology_unlock();
690 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
691 readprov);
692 return (EINVAL);
693 }
694 cp = g_new_consumer(sc->sc_provider->geom);
695 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
696 error = g_attach(cp, pp);
697 if (error != 0) {
698 G_GATE_DEBUG(1, "Unable to attach to %s.",
699 pp->name);
700 } else {
701 error = g_access(cp, 1, 0, 0);
702 if (error != 0) {
703 G_GATE_DEBUG(1, "Unable to access %s.",
704 pp->name);
705 g_detach(cp);
706 }
707 }
708 if (error != 0) {
709 g_destroy_consumer(cp);
710 g_topology_unlock();
711 return (error);
712 }
713 }
714 } else {
715 cp = sc->sc_readcons;
716 }
717
718 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
719 if (cp == NULL) {
720 G_GATE_DEBUG(1, "No read provider.");
721 return (EINVAL);
722 }
723 pp = sc->sc_provider;
724 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
725 G_GATE_DEBUG(1, "Invalid read offset.");
726 return (EINVAL);
727 }
728 if (pp->mediasize + ggio->gctl_readoffset >
729 cp->provider->mediasize) {
730 G_GATE_DEBUG(1, "Invalid read offset or media size.");
731 return (EINVAL);
732 }
733 sc->sc_readoffset = ggio->gctl_readoffset;
734 }
735
736 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
737 sc->sc_readcons = cp;
738 g_topology_unlock();
739 }
740
741 return (0);
742 }
743
744 #define G_GATE_CHECK_VERSION(ggio) do { \
745 if ((ggio)->gctl_version != G_GATE_VERSION) { \
746 printf("Version mismatch %d != %d.\n", \
747 ggio->gctl_version, G_GATE_VERSION); \
748 return (EINVAL); \
749 } \
750 } while (0)
751 static int
752 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
753 {
754 struct g_gate_softc *sc;
755 struct bio *bp;
756 int error = 0;
757
758 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
759 flags, td);
760
761 switch (cmd) {
762 case G_GATE_CMD_CREATE:
763 {
764 struct g_gate_ctl_create *ggio = (void *)addr;
765
766 G_GATE_CHECK_VERSION(ggio);
767 error = g_gate_create(ggio);
768 /*
769 * Reset TDP_GEOM flag.
770 * There are pending events for sure, because we just created
771 * new provider and other classes want to taste it, but we
772 * cannot answer on I/O requests until we're here.
773 */
774 td->td_pflags &= ~TDP_GEOM;
775 return (error);
776 }
777 case G_GATE_CMD_MODIFY:
778 {
779 struct g_gate_ctl_modify *ggio = (void *)addr;
780
781 G_GATE_CHECK_VERSION(ggio);
782 sc = g_gate_hold(ggio->gctl_unit, NULL);
783 if (sc == NULL)
784 return (ENXIO);
785 error = g_gate_modify(sc, ggio);
786 g_gate_release(sc);
787 return (error);
788 }
789 case G_GATE_CMD_DESTROY:
790 {
791 struct g_gate_ctl_destroy *ggio = (void *)addr;
792
793 G_GATE_CHECK_VERSION(ggio);
794 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
795 if (sc == NULL)
796 return (ENXIO);
797 g_topology_lock();
798 mtx_lock(&g_gate_units_lock);
799 error = g_gate_destroy(sc, ggio->gctl_force);
800 g_topology_unlock();
801 if (error != 0)
802 g_gate_release(sc);
803 return (error);
804 }
805 case G_GATE_CMD_CANCEL:
806 {
807 struct g_gate_ctl_cancel *ggio = (void *)addr;
808 struct bio *tbp, *lbp;
809
810 G_GATE_CHECK_VERSION(ggio);
811 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
812 if (sc == NULL)
813 return (ENXIO);
814 lbp = NULL;
815 mtx_lock(&sc->sc_queue_mtx);
816 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
817 if (ggio->gctl_seq == 0 ||
818 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
819 G_GATE_LOGREQ(1, bp, "Request canceled.");
820 bioq_remove(&sc->sc_outqueue, bp);
821 /*
822 * Be sure to put requests back onto incoming
823 * queue in the proper order.
824 */
825 if (lbp == NULL)
826 bioq_insert_head(&sc->sc_inqueue, bp);
827 else {
828 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
829 lbp, bp, bio_queue);
830 }
831 lbp = bp;
832 /*
833 * If only one request was canceled, leave now.
834 */
835 if (ggio->gctl_seq != 0)
836 break;
837 }
838 }
839 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
840 ggio->gctl_unit = sc->sc_unit;
841 mtx_unlock(&sc->sc_queue_mtx);
842 g_gate_release(sc);
843 return (error);
844 }
845 case G_GATE_CMD_START:
846 {
847 struct g_gate_ctl_io *ggio = (void *)addr;
848
849 G_GATE_CHECK_VERSION(ggio);
850 sc = g_gate_hold(ggio->gctl_unit, NULL);
851 if (sc == NULL)
852 return (ENXIO);
853 error = 0;
854 for (;;) {
855 mtx_lock(&sc->sc_queue_mtx);
856 bp = bioq_first(&sc->sc_inqueue);
857 if (bp != NULL)
858 break;
859 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
860 ggio->gctl_error = ECANCELED;
861 mtx_unlock(&sc->sc_queue_mtx);
862 goto start_end;
863 }
864 error = msleep(sc, &sc->sc_queue_mtx,
865 PPAUSE | PDROP | PCATCH, "ggwait", 0);
866 if (error != 0)
867 goto start_end;
868 }
869 ggio->gctl_cmd = bp->bio_cmd;
870 if (bp->bio_cmd == BIO_WRITE &&
871 bp->bio_length > ggio->gctl_length) {
872 mtx_unlock(&sc->sc_queue_mtx);
873 ggio->gctl_length = bp->bio_length;
874 ggio->gctl_error = ENOMEM;
875 goto start_end;
876 }
877 bioq_remove(&sc->sc_inqueue, bp);
878 bioq_insert_tail(&sc->sc_outqueue, bp);
879 mtx_unlock(&sc->sc_queue_mtx);
880
881 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
882 ggio->gctl_offset = bp->bio_offset;
883 ggio->gctl_length = bp->bio_length;
884
885 switch (bp->bio_cmd) {
886 case BIO_READ:
887 case BIO_DELETE:
888 case BIO_FLUSH:
889 case BIO_SPEEDUP:
890 break;
891 case BIO_WRITE:
892 error = copyout(bp->bio_data, ggio->gctl_data,
893 bp->bio_length);
894 if (error != 0) {
895 mtx_lock(&sc->sc_queue_mtx);
896 bioq_remove(&sc->sc_outqueue, bp);
897 bioq_insert_head(&sc->sc_inqueue, bp);
898 mtx_unlock(&sc->sc_queue_mtx);
899 goto start_end;
900 }
901 break;
902 }
903 start_end:
904 g_gate_release(sc);
905 return (error);
906 }
907 case G_GATE_CMD_DONE:
908 {
909 struct g_gate_ctl_io *ggio = (void *)addr;
910
911 G_GATE_CHECK_VERSION(ggio);
912 sc = g_gate_hold(ggio->gctl_unit, NULL);
913 if (sc == NULL)
914 return (ENOENT);
915 error = 0;
916 mtx_lock(&sc->sc_queue_mtx);
917 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
918 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
919 break;
920 }
921 if (bp != NULL) {
922 bioq_remove(&sc->sc_outqueue, bp);
923 sc->sc_queue_count--;
924 }
925 mtx_unlock(&sc->sc_queue_mtx);
926 if (bp == NULL) {
927 /*
928 * Request was probably canceled.
929 */
930 goto done_end;
931 }
932 if (ggio->gctl_error == EAGAIN) {
933 bp->bio_error = 0;
934 G_GATE_LOGREQ(1, bp, "Request desisted.");
935 mtx_lock(&sc->sc_queue_mtx);
936 sc->sc_queue_count++;
937 bioq_insert_head(&sc->sc_inqueue, bp);
938 wakeup(sc);
939 mtx_unlock(&sc->sc_queue_mtx);
940 } else {
941 bp->bio_error = ggio->gctl_error;
942 if (bp->bio_error == 0) {
943 bp->bio_completed = bp->bio_length;
944 switch (bp->bio_cmd) {
945 case BIO_READ:
946 error = copyin(ggio->gctl_data,
947 bp->bio_data, bp->bio_length);
948 if (error != 0)
949 bp->bio_error = error;
950 break;
951 case BIO_DELETE:
952 case BIO_WRITE:
953 case BIO_FLUSH:
954 case BIO_SPEEDUP:
955 break;
956 }
957 }
958 G_GATE_LOGREQ(2, bp, "Request done.");
959 g_io_deliver(bp, bp->bio_error);
960 }
961 done_end:
962 g_gate_release(sc);
963 return (error);
964 }
965 }
966 return (ENOIOCTL);
967 }
968
969 static void
970 g_gate_device(void)
971 {
972
973 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
974 G_GATE_CTL_NAME);
975 }
976
977 static int
978 g_gate_modevent(module_t mod, int type, void *data)
979 {
980 int error = 0;
981
982 switch (type) {
983 case MOD_LOAD:
984 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
985 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
986 M_GATE, M_WAITOK | M_ZERO);
987 g_gate_nunits = 0;
988 g_gate_device();
989 break;
990 case MOD_UNLOAD:
991 mtx_lock(&g_gate_units_lock);
992 if (g_gate_nunits > 0) {
993 mtx_unlock(&g_gate_units_lock);
994 error = EBUSY;
995 break;
996 }
997 mtx_unlock(&g_gate_units_lock);
998 mtx_destroy(&g_gate_units_lock);
999 if (status_dev != NULL)
1000 destroy_dev(status_dev);
1001 free(g_gate_units, M_GATE);
1002 break;
1003 default:
1004 return (EOPNOTSUPP);
1005 break;
1006 }
1007
1008 return (error);
1009 }
1010 static moduledata_t g_gate_module = {
1011 G_GATE_MOD_NAME,
1012 g_gate_modevent,
1013 NULL
1014 };
1015 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1016 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
1017 MODULE_VERSION(geom_gate, 0);
Cache object: 59077019eb11ccb612aa327dcaf94c01
|