FreeBSD/Linux Kernel Cross Reference
sys/geom/gate/g_gate.c
1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/8.3/sys/geom/gate/g_gate.c 223602 2011-06-27 18:56:43Z trociny $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/signalvar.h>
50 #include <sys/time.h>
51 #include <machine/atomic.h>
52
53 #include <geom/geom.h>
54 #include <geom/gate/g_gate.h>
55
56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
57
58 SYSCTL_DECL(_kern_geom);
59 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
60 static int g_gate_debug = 0;
61 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
62 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
63 "Debug level");
64 static u_int g_gate_maxunits = 256;
65 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
66 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
67 &g_gate_maxunits, 0, "Maximum number of ggate devices");
68
69 struct g_class g_gate_class = {
70 .name = G_GATE_CLASS_NAME,
71 .version = G_VERSION,
72 };
73
74 static struct cdev *status_dev;
75 static d_ioctl_t g_gate_ioctl;
76 static struct cdevsw g_gate_cdevsw = {
77 .d_version = D_VERSION,
78 .d_ioctl = g_gate_ioctl,
79 .d_name = G_GATE_CTL_NAME
80 };
81
82
83 static struct g_gate_softc **g_gate_units;
84 static u_int g_gate_nunits;
85 static struct mtx g_gate_units_lock;
86
87 static int
88 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
89 {
90 struct g_provider *pp;
91 struct g_geom *gp;
92 struct bio *bp;
93
94 g_topology_assert();
95 mtx_assert(&g_gate_units_lock, MA_OWNED);
96 pp = sc->sc_provider;
97 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
98 mtx_unlock(&g_gate_units_lock);
99 return (EBUSY);
100 }
101 mtx_unlock(&g_gate_units_lock);
102 mtx_lock(&sc->sc_queue_mtx);
103 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
104 sc->sc_flags |= G_GATE_FLAG_DESTROY;
105 wakeup(sc);
106 mtx_unlock(&sc->sc_queue_mtx);
107 gp = pp->geom;
108 pp->flags |= G_PF_WITHER;
109 g_orphan_provider(pp, ENXIO);
110 callout_drain(&sc->sc_callout);
111 mtx_lock(&sc->sc_queue_mtx);
112 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
113 bioq_remove(&sc->sc_inqueue, bp);
114 sc->sc_queue_count--;
115 G_GATE_LOGREQ(1, bp, "Request canceled.");
116 g_io_deliver(bp, ENXIO);
117 }
118 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
119 bioq_remove(&sc->sc_outqueue, bp);
120 sc->sc_queue_count--;
121 G_GATE_LOGREQ(1, bp, "Request canceled.");
122 g_io_deliver(bp, ENXIO);
123 }
124 mtx_unlock(&sc->sc_queue_mtx);
125 g_topology_unlock();
126 mtx_lock(&g_gate_units_lock);
127 /* One reference is ours. */
128 sc->sc_ref--;
129 while (sc->sc_ref > 0)
130 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
131 g_gate_units[sc->sc_unit] = NULL;
132 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
133 g_gate_nunits--;
134 mtx_unlock(&g_gate_units_lock);
135 mtx_destroy(&sc->sc_queue_mtx);
136 g_topology_lock();
137 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
138 gp->softc = NULL;
139 g_wither_geom(gp, ENXIO);
140 sc->sc_provider = NULL;
141 free(sc, M_GATE);
142 return (0);
143 }
144
145 static int
146 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
147 {
148 struct g_gate_softc *sc;
149
150 if (dr <= 0 && dw <= 0 && de <= 0)
151 return (0);
152 sc = pp->geom->softc;
153 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
154 return (ENXIO);
155 /* XXX: Hack to allow read-only mounts. */
156 #if 0
157 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
158 return (EPERM);
159 #endif
160 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
161 return (EPERM);
162 return (0);
163 }
164
165 static void
166 g_gate_start(struct bio *bp)
167 {
168 struct g_gate_softc *sc;
169
170 sc = bp->bio_to->geom->softc;
171 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
172 g_io_deliver(bp, ENXIO);
173 return;
174 }
175 G_GATE_LOGREQ(2, bp, "Request received.");
176 switch (bp->bio_cmd) {
177 case BIO_READ:
178 break;
179 case BIO_DELETE:
180 case BIO_WRITE:
181 case BIO_FLUSH:
182 /* XXX: Hack to allow read-only mounts. */
183 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
184 g_io_deliver(bp, EPERM);
185 return;
186 }
187 break;
188 case BIO_GETATTR:
189 default:
190 G_GATE_LOGREQ(2, bp, "Ignoring request.");
191 g_io_deliver(bp, EOPNOTSUPP);
192 return;
193 }
194
195 mtx_lock(&sc->sc_queue_mtx);
196 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
197 mtx_unlock(&sc->sc_queue_mtx);
198 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
199 g_io_deliver(bp, ENOMEM);
200 return;
201 }
202
203 bp->bio_driver1 = (void *)sc->sc_seq;
204 sc->sc_seq++;
205 sc->sc_queue_count++;
206
207 bioq_insert_tail(&sc->sc_inqueue, bp);
208 wakeup(sc);
209
210 mtx_unlock(&sc->sc_queue_mtx);
211 }
212
213 static struct g_gate_softc *
214 g_gate_hold(int unit, const char *name)
215 {
216 struct g_gate_softc *sc = NULL;
217
218 mtx_lock(&g_gate_units_lock);
219 if (unit >= 0 && unit < g_gate_maxunits)
220 sc = g_gate_units[unit];
221 else if (unit == G_GATE_NAME_GIVEN) {
222 KASSERT(name != NULL, ("name is NULL"));
223 for (unit = 0; unit < g_gate_maxunits; unit++) {
224 if (g_gate_units[unit] == NULL)
225 continue;
226 if (strcmp(name,
227 g_gate_units[unit]->sc_provider->name) != 0) {
228 continue;
229 }
230 sc = g_gate_units[unit];
231 break;
232 }
233 }
234 if (sc != NULL)
235 sc->sc_ref++;
236 mtx_unlock(&g_gate_units_lock);
237 return (sc);
238 }
239
240 static void
241 g_gate_release(struct g_gate_softc *sc)
242 {
243
244 g_topology_assert_not();
245 mtx_lock(&g_gate_units_lock);
246 sc->sc_ref--;
247 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
248 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
249 wakeup(&sc->sc_ref);
250 mtx_unlock(&g_gate_units_lock);
251 }
252
253 static int
254 g_gate_getunit(int unit, int *errorp)
255 {
256
257 mtx_assert(&g_gate_units_lock, MA_OWNED);
258 if (unit >= 0) {
259 if (unit >= g_gate_maxunits)
260 *errorp = EINVAL;
261 else if (g_gate_units[unit] == NULL)
262 return (unit);
263 else
264 *errorp = EEXIST;
265 } else {
266 for (unit = 0; unit < g_gate_maxunits; unit++) {
267 if (g_gate_units[unit] == NULL)
268 return (unit);
269 }
270 *errorp = ENFILE;
271 }
272 return (-1);
273 }
274
275 static void
276 g_gate_guard(void *arg)
277 {
278 struct g_gate_softc *sc;
279 struct bintime curtime;
280 struct bio *bp, *bp2;
281
282 sc = arg;
283 binuptime(&curtime);
284 g_gate_hold(sc->sc_unit, NULL);
285 mtx_lock(&sc->sc_queue_mtx);
286 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
287 if (curtime.sec - bp->bio_t0.sec < 5)
288 continue;
289 bioq_remove(&sc->sc_inqueue, bp);
290 sc->sc_queue_count--;
291 G_GATE_LOGREQ(1, bp, "Request timeout.");
292 g_io_deliver(bp, EIO);
293 }
294 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
295 if (curtime.sec - bp->bio_t0.sec < 5)
296 continue;
297 bioq_remove(&sc->sc_outqueue, bp);
298 sc->sc_queue_count--;
299 G_GATE_LOGREQ(1, bp, "Request timeout.");
300 g_io_deliver(bp, EIO);
301 }
302 mtx_unlock(&sc->sc_queue_mtx);
303 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
304 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
305 g_gate_guard, sc);
306 }
307 g_gate_release(sc);
308 }
309
310 static void
311 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
312 struct g_consumer *cp, struct g_provider *pp)
313 {
314 struct g_gate_softc *sc;
315
316 sc = gp->softc;
317 if (sc == NULL || pp != NULL || cp != NULL)
318 return;
319 g_gate_hold(sc->sc_unit, NULL);
320 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
321 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
322 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
323 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
324 "write-only");
325 } else {
326 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
327 "read-write");
328 }
329 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
330 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
331 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
332 sc->sc_queue_count);
333 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
334 sc->sc_queue_size);
335 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
336 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
337 g_topology_unlock();
338 g_gate_release(sc);
339 g_topology_lock();
340 }
341
342 static int
343 g_gate_create(struct g_gate_ctl_create *ggio)
344 {
345 struct g_gate_softc *sc;
346 struct g_geom *gp;
347 struct g_provider *pp;
348 char name[NAME_MAX];
349 int error = 0, unit;
350
351 if (ggio->gctl_mediasize == 0) {
352 G_GATE_DEBUG(1, "Invalid media size.");
353 return (EINVAL);
354 }
355 if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
356 G_GATE_DEBUG(1, "Invalid sector size.");
357 return (EINVAL);
358 }
359 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
360 G_GATE_DEBUG(1, "Invalid media size.");
361 return (EINVAL);
362 }
363 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
364 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
365 G_GATE_DEBUG(1, "Invalid flags.");
366 return (EINVAL);
367 }
368 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
369 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
370 ggio->gctl_unit < 0) {
371 G_GATE_DEBUG(1, "Invalid unit number.");
372 return (EINVAL);
373 }
374 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
375 ggio->gctl_name[0] == '\0') {
376 G_GATE_DEBUG(1, "No device name.");
377 return (EINVAL);
378 }
379
380 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
381 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
382 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
383 sc->sc_seq = 1;
384 bioq_init(&sc->sc_inqueue);
385 bioq_init(&sc->sc_outqueue);
386 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
387 sc->sc_queue_count = 0;
388 sc->sc_queue_size = ggio->gctl_maxcount;
389 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
390 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
391 sc->sc_timeout = ggio->gctl_timeout;
392 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
393 mtx_lock(&g_gate_units_lock);
394 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
395 if (sc->sc_unit < 0) {
396 mtx_unlock(&g_gate_units_lock);
397 mtx_destroy(&sc->sc_queue_mtx);
398 free(sc, M_GATE);
399 return (error);
400 }
401 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
402 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
403 else {
404 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
405 sc->sc_unit);
406 }
407 /* Check for name collision. */
408 for (unit = 0; unit < g_gate_maxunits; unit++) {
409 if (g_gate_units[unit] == NULL)
410 continue;
411 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
412 continue;
413 mtx_unlock(&g_gate_units_lock);
414 mtx_destroy(&sc->sc_queue_mtx);
415 free(sc, M_GATE);
416 return (EEXIST);
417 }
418 sc->sc_name = name;
419 g_gate_units[sc->sc_unit] = sc;
420 g_gate_nunits++;
421 mtx_unlock(&g_gate_units_lock);
422
423 ggio->gctl_unit = sc->sc_unit;
424
425 g_topology_lock();
426 gp = g_new_geomf(&g_gate_class, "%s", name);
427 gp->start = g_gate_start;
428 gp->access = g_gate_access;
429 gp->dumpconf = g_gate_dumpconf;
430 gp->softc = sc;
431 pp = g_new_providerf(gp, "%s", name);
432 pp->mediasize = ggio->gctl_mediasize;
433 pp->sectorsize = ggio->gctl_sectorsize;
434 sc->sc_provider = pp;
435 g_error_provider(pp, 0);
436 g_topology_unlock();
437 mtx_lock(&g_gate_units_lock);
438 sc->sc_name = sc->sc_provider->name;
439 mtx_unlock(&g_gate_units_lock);
440 G_GATE_DEBUG(1, "Device %s created.", gp->name);
441
442 if (sc->sc_timeout > 0) {
443 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
444 g_gate_guard, sc);
445 }
446 return (0);
447 }
448
449 #define G_GATE_CHECK_VERSION(ggio) do { \
450 if ((ggio)->gctl_version != G_GATE_VERSION) { \
451 printf("Version mismatch %d != %d.\n", \
452 ggio->gctl_version, G_GATE_VERSION); \
453 return (EINVAL); \
454 } \
455 } while (0)
456 static int
457 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
458 {
459 struct g_gate_softc *sc;
460 struct bio *bp;
461 int error = 0;
462
463 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
464 flags, td);
465
466 switch (cmd) {
467 case G_GATE_CMD_CREATE:
468 {
469 struct g_gate_ctl_create *ggio = (void *)addr;
470
471 G_GATE_CHECK_VERSION(ggio);
472 error = g_gate_create(ggio);
473 /*
474 * Reset TDP_GEOM flag.
475 * There are pending events for sure, because we just created
476 * new provider and other classes want to taste it, but we
477 * cannot answer on I/O requests until we're here.
478 */
479 td->td_pflags &= ~TDP_GEOM;
480 return (error);
481 }
482 case G_GATE_CMD_DESTROY:
483 {
484 struct g_gate_ctl_destroy *ggio = (void *)addr;
485
486 G_GATE_CHECK_VERSION(ggio);
487 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
488 if (sc == NULL)
489 return (ENXIO);
490 g_topology_lock();
491 mtx_lock(&g_gate_units_lock);
492 error = g_gate_destroy(sc, ggio->gctl_force);
493 g_topology_unlock();
494 if (error != 0)
495 g_gate_release(sc);
496 return (error);
497 }
498 case G_GATE_CMD_CANCEL:
499 {
500 struct g_gate_ctl_cancel *ggio = (void *)addr;
501 struct bio *tbp, *lbp;
502
503 G_GATE_CHECK_VERSION(ggio);
504 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
505 if (sc == NULL)
506 return (ENXIO);
507 lbp = NULL;
508 mtx_lock(&sc->sc_queue_mtx);
509 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
510 if (ggio->gctl_seq == 0 ||
511 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
512 G_GATE_LOGREQ(1, bp, "Request canceled.");
513 bioq_remove(&sc->sc_outqueue, bp);
514 /*
515 * Be sure to put requests back onto incoming
516 * queue in the proper order.
517 */
518 if (lbp == NULL)
519 bioq_insert_head(&sc->sc_inqueue, bp);
520 else {
521 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
522 lbp, bp, bio_queue);
523 }
524 lbp = bp;
525 /*
526 * If only one request was canceled, leave now.
527 */
528 if (ggio->gctl_seq != 0)
529 break;
530 }
531 }
532 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
533 ggio->gctl_unit = sc->sc_unit;
534 mtx_unlock(&sc->sc_queue_mtx);
535 g_gate_release(sc);
536 return (error);
537 }
538 case G_GATE_CMD_START:
539 {
540 struct g_gate_ctl_io *ggio = (void *)addr;
541
542 G_GATE_CHECK_VERSION(ggio);
543 sc = g_gate_hold(ggio->gctl_unit, NULL);
544 if (sc == NULL)
545 return (ENXIO);
546 error = 0;
547 for (;;) {
548 mtx_lock(&sc->sc_queue_mtx);
549 bp = bioq_first(&sc->sc_inqueue);
550 if (bp != NULL)
551 break;
552 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
553 ggio->gctl_error = ECANCELED;
554 mtx_unlock(&sc->sc_queue_mtx);
555 goto start_end;
556 }
557 if (msleep(sc, &sc->sc_queue_mtx,
558 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
559 ggio->gctl_error = ECANCELED;
560 goto start_end;
561 }
562 }
563 ggio->gctl_cmd = bp->bio_cmd;
564 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
565 bp->bio_length > ggio->gctl_length) {
566 mtx_unlock(&sc->sc_queue_mtx);
567 ggio->gctl_length = bp->bio_length;
568 ggio->gctl_error = ENOMEM;
569 goto start_end;
570 }
571 bioq_remove(&sc->sc_inqueue, bp);
572 bioq_insert_tail(&sc->sc_outqueue, bp);
573 mtx_unlock(&sc->sc_queue_mtx);
574
575 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
576 ggio->gctl_offset = bp->bio_offset;
577 ggio->gctl_length = bp->bio_length;
578
579 switch (bp->bio_cmd) {
580 case BIO_READ:
581 case BIO_DELETE:
582 case BIO_FLUSH:
583 break;
584 case BIO_WRITE:
585 error = copyout(bp->bio_data, ggio->gctl_data,
586 bp->bio_length);
587 if (error != 0) {
588 mtx_lock(&sc->sc_queue_mtx);
589 bioq_remove(&sc->sc_outqueue, bp);
590 bioq_insert_head(&sc->sc_inqueue, bp);
591 mtx_unlock(&sc->sc_queue_mtx);
592 goto start_end;
593 }
594 break;
595 }
596 start_end:
597 g_gate_release(sc);
598 return (error);
599 }
600 case G_GATE_CMD_DONE:
601 {
602 struct g_gate_ctl_io *ggio = (void *)addr;
603
604 G_GATE_CHECK_VERSION(ggio);
605 sc = g_gate_hold(ggio->gctl_unit, NULL);
606 if (sc == NULL)
607 return (ENOENT);
608 error = 0;
609 mtx_lock(&sc->sc_queue_mtx);
610 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
611 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
612 break;
613 }
614 if (bp != NULL) {
615 bioq_remove(&sc->sc_outqueue, bp);
616 sc->sc_queue_count--;
617 }
618 mtx_unlock(&sc->sc_queue_mtx);
619 if (bp == NULL) {
620 /*
621 * Request was probably canceled.
622 */
623 goto done_end;
624 }
625 if (ggio->gctl_error == EAGAIN) {
626 bp->bio_error = 0;
627 G_GATE_LOGREQ(1, bp, "Request desisted.");
628 mtx_lock(&sc->sc_queue_mtx);
629 sc->sc_queue_count++;
630 bioq_insert_head(&sc->sc_inqueue, bp);
631 wakeup(sc);
632 mtx_unlock(&sc->sc_queue_mtx);
633 } else {
634 bp->bio_error = ggio->gctl_error;
635 if (bp->bio_error == 0) {
636 bp->bio_completed = bp->bio_length;
637 switch (bp->bio_cmd) {
638 case BIO_READ:
639 error = copyin(ggio->gctl_data,
640 bp->bio_data, bp->bio_length);
641 if (error != 0)
642 bp->bio_error = error;
643 break;
644 case BIO_DELETE:
645 case BIO_WRITE:
646 case BIO_FLUSH:
647 break;
648 }
649 }
650 G_GATE_LOGREQ(2, bp, "Request done.");
651 g_io_deliver(bp, bp->bio_error);
652 }
653 done_end:
654 g_gate_release(sc);
655 return (error);
656 }
657 }
658 return (ENOIOCTL);
659 }
660
661 static void
662 g_gate_device(void)
663 {
664
665 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
666 G_GATE_CTL_NAME);
667 }
668
669 static int
670 g_gate_modevent(module_t mod, int type, void *data)
671 {
672 int error = 0;
673
674 switch (type) {
675 case MOD_LOAD:
676 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
677 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
678 M_GATE, M_WAITOK | M_ZERO);
679 g_gate_nunits = 0;
680 g_gate_device();
681 break;
682 case MOD_UNLOAD:
683 mtx_lock(&g_gate_units_lock);
684 if (g_gate_nunits > 0) {
685 mtx_unlock(&g_gate_units_lock);
686 error = EBUSY;
687 break;
688 }
689 mtx_unlock(&g_gate_units_lock);
690 mtx_destroy(&g_gate_units_lock);
691 if (status_dev != 0)
692 destroy_dev(status_dev);
693 free(g_gate_units, M_GATE);
694 break;
695 default:
696 return (EOPNOTSUPP);
697 break;
698 }
699
700 return (error);
701 }
702 static moduledata_t g_gate_module = {
703 G_GATE_MOD_NAME,
704 g_gate_modevent,
705 NULL
706 };
707 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
708 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
Cache object: 3cc03c30d75fe0cd90c1259c0d661519
|