1 /*-
2 * Copyright (c) 2002 Dima Dorfman.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/8.0/sys/fs/devfs/devfs_rule.c 179926 2008-06-22 14:34:38Z gonzo $
27 */
28
29 /*
30 * DEVFS ruleset implementation.
31 *
32 * A note on terminology: To "run" a rule on a dirent is to take the
33 * prescribed action; to "apply" a rule is to check whether it matches
34 * a dirent and run if if it does.
35 *
36 * A note on locking: Only foreign entry points (non-static functions)
37 * should deal with locking. Everything else assumes we already hold
38 * the required kind of lock.
39 *
40 * A note on namespace: devfs_rules_* are the non-static functions for
41 * the entire "ruleset" subsystem, devfs_rule_* are the static
42 * functions that operate on rules, and devfs_ruleset_* are the static
43 * functions that operate on rulesets. The line between the last two
44 * isn't always clear, but the guideline is still useful.
45 *
46 * A note on "special" identifiers: Ruleset 0 is the NULL, or empty,
47 * ruleset; it cannot be deleted or changed in any way. This may be
48 * assumed inside the code; e.g., a ruleset of 0 may be interpeted to
49 * mean "no ruleset". The interpretation of rule 0 is
50 * command-dependent, but in no case is there a real rule with number
51 * 0.
52 *
53 * A note on errno codes: To make it easier for the userland to tell
54 * what went wrong, we sometimes use errno codes that are not entirely
55 * appropriate for the error but that would be less ambiguous than the
56 * appropriate "generic" code. For example, when we can't find a
57 * ruleset, we return ESRCH instead of ENOENT (except in
58 * DEVFSIO_{R,S}GETNEXT, where a nonexistent ruleset means "end of
59 * list", and the userland expects ENOENT to be this indicator); this
60 * way, when an operation fails, it's clear that what couldn't be
61 * found is a ruleset and not a rule (well, it's clear to those who
62 * know the convention).
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/conf.h>
68 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/priv.h>
71 #include <sys/dirent.h>
72 #include <sys/ioccom.h>
73 #include <sys/lock.h>
74 #include <sys/sx.h>
75
76 #include <fs/devfs/devfs.h>
77 #include <fs/devfs/devfs_int.h>
78
79 /*
80 * Kernel version of devfs_rule.
81 */
82 struct devfs_krule {
83 TAILQ_ENTRY(devfs_krule) dk_list;
84 struct devfs_ruleset *dk_ruleset;
85 struct devfs_rule dk_rule;
86 };
87
88 TAILQ_HEAD(rulehead, devfs_krule);
89 static MALLOC_DEFINE(M_DEVFSRULE, "DEVFS_RULE", "DEVFS rule storage");
90
91 /*
92 * Structure to describe a ruleset.
93 */
94 struct devfs_ruleset {
95 TAILQ_ENTRY(devfs_ruleset) ds_list;
96 struct rulehead ds_rules;
97 devfs_rsnum ds_number;
98 int ds_refcount;
99 };
100
101 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm);
102
103 static void devfs_rule_applyde_recursive(struct devfs_krule *dk,
104 struct devfs_dirent *de);
105 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm);
106 static int devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnp);
107 static struct devfs_krule *devfs_rule_byid(devfs_rid rid);
108 static int devfs_rule_delete(struct devfs_krule *dkp);
109 static struct cdev *devfs_rule_getdev(struct devfs_dirent *de);
110 static int devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm);
111 static int devfs_rule_insert(struct devfs_rule *dr);
112 static int devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de);
113 static int devfs_rule_matchpath(struct devfs_krule *dk,
114 struct devfs_dirent *de);
115 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth);
116
117 static void devfs_ruleset_applyde(struct devfs_ruleset *ds,
118 struct devfs_dirent *de, unsigned depth);
119 static void devfs_ruleset_applydm(struct devfs_ruleset *ds,
120 struct devfs_mount *dm);
121 static struct devfs_ruleset *devfs_ruleset_bynum(devfs_rsnum rsnum);
122 static struct devfs_ruleset *devfs_ruleset_create(devfs_rsnum rsnum);
123 static void devfs_ruleset_reap(struct devfs_ruleset *dsp);
124 static int devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm);
125
126 static struct sx sx_rules;
127 SX_SYSINIT(sx_rules, &sx_rules, "DEVFS ruleset lock");
128
129 static TAILQ_HEAD(, devfs_ruleset) devfs_rulesets =
130 TAILQ_HEAD_INITIALIZER(devfs_rulesets);
131
132 /*
133 * Called to apply the proper rules for 'de' before it can be
134 * exposed to the userland. This should be called with an exclusive
135 * lock on dm in case we need to run anything.
136 */
137 void
138 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de)
139 {
140 struct devfs_ruleset *ds;
141
142 if (dm->dm_ruleset == 0)
143 return;
144 sx_slock(&sx_rules);
145 ds = devfs_ruleset_bynum(dm->dm_ruleset);
146 KASSERT(ds != NULL, ("mount-point has NULL ruleset"));
147 devfs_ruleset_applyde(ds, de, devfs_rule_depth);
148 sx_sunlock(&sx_rules);
149 }
150
151 /*
152 * Rule subsystem ioctl hook.
153 */
154 int
155 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td)
156 {
157 struct devfs_ruleset *ds;
158 struct devfs_krule *dk;
159 struct devfs_rule *dr;
160 devfs_rsnum rsnum;
161 devfs_rnum rnum;
162 devfs_rid rid;
163 int error;
164
165 sx_assert(&dm->dm_lock, SX_XLOCKED);
166
167 /*
168 * XXX: This returns an error regardless of whether we actually
169 * support the cmd or not.
170 *
171 * We could make this privileges finer grained if desired.
172 */
173 error = priv_check(td, PRIV_DEVFS_RULE);
174 if (error)
175 return (error);
176
177 sx_xlock(&sx_rules);
178
179 switch (cmd) {
180 case DEVFSIO_RADD:
181 dr = (struct devfs_rule *)data;
182 error = devfs_rule_input(dr, dm);
183 if (error != 0)
184 break;
185 dk = devfs_rule_byid(dr->dr_id);
186 if (dk != NULL) {
187 error = EEXIST;
188 break;
189 }
190 if (rid2rsn(dr->dr_id) == 0) {
191 error = EIO;
192 break;
193 }
194 error = devfs_rule_insert(dr);
195 break;
196 case DEVFSIO_RAPPLY:
197 dr = (struct devfs_rule *)data;
198 error = devfs_rule_input(dr, dm);
199 if (error != 0)
200 break;
201
202 /*
203 * This is one of many possible hackish
204 * implementations. The primary contender is an
205 * implementation where the rule we read in is
206 * temporarily inserted into some ruleset, perhaps
207 * with a hypothetical DRO_NOAUTO flag so that it
208 * doesn't get used where it isn't intended, and
209 * applied in the normal way. This can be done in the
210 * userland (DEVFSIO_ADD, DEVFSIO_APPLYID,
211 * DEVFSIO_DEL) or in the kernel; either way it breaks
212 * some corner case assumptions in other parts of the
213 * code (not that this implementation doesn't do
214 * that).
215 */
216 if (dr->dr_iacts & DRA_INCSET &&
217 devfs_ruleset_bynum(dr->dr_incset) == NULL) {
218 error = ESRCH;
219 break;
220 }
221 dk = malloc(sizeof(*dk), M_TEMP, M_WAITOK | M_ZERO);
222 memcpy(&dk->dk_rule, dr, sizeof(*dr));
223 devfs_rule_applydm(dk, dm);
224 free(dk, M_TEMP);
225 break;
226 case DEVFSIO_RAPPLYID:
227 rid = *(devfs_rid *)data;
228 rid = devfs_rid_input(rid, dm);
229 dk = devfs_rule_byid(rid);
230 if (dk == NULL) {
231 error = ENOENT;
232 break;
233 }
234 devfs_rule_applydm(dk, dm);
235 break;
236 case DEVFSIO_RDEL:
237 rid = *(devfs_rid *)data;
238 rid = devfs_rid_input(rid, dm);
239 dk = devfs_rule_byid(rid);
240 if (dk == NULL) {
241 error = ENOENT;
242 break;
243 }
244 ds = dk->dk_ruleset;
245 error = devfs_rule_delete(dk);
246 break;
247 case DEVFSIO_RGETNEXT:
248 dr = (struct devfs_rule *)data;
249 error = devfs_rule_input(dr, dm);
250 if (error != 0)
251 break;
252 /*
253 * We can't use devfs_rule_byid() here since that
254 * requires the rule specified to exist, but we want
255 * getnext(N) to work whether there is a rule N or not
256 * (specifically, getnext(0) must work, but we should
257 * never have a rule 0 since the add command
258 * interprets 0 to mean "auto-number").
259 */
260 ds = devfs_ruleset_bynum(rid2rsn(dr->dr_id));
261 if (ds == NULL) {
262 error = ENOENT;
263 break;
264 }
265 rnum = rid2rn(dr->dr_id);
266 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
267 if (rid2rn(dk->dk_rule.dr_id) > rnum)
268 break;
269 }
270 if (dk == NULL) {
271 error = ENOENT;
272 break;
273 }
274 memcpy(dr, &dk->dk_rule, sizeof(*dr));
275 break;
276 case DEVFSIO_SUSE:
277 rsnum = *(devfs_rsnum *)data;
278 error = devfs_ruleset_use(rsnum, dm);
279 break;
280 case DEVFSIO_SAPPLY:
281 rsnum = *(devfs_rsnum *)data;
282 rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm));
283 ds = devfs_ruleset_bynum(rsnum);
284 if (ds == NULL) {
285 error = ESRCH;
286 break;
287 }
288 devfs_ruleset_applydm(ds, dm);
289 break;
290 case DEVFSIO_SGETNEXT:
291 rsnum = *(devfs_rsnum *)data;
292 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
293 if (ds->ds_number > rsnum)
294 break;
295 }
296 if (ds == NULL) {
297 error = ENOENT;
298 break;
299 }
300 *(devfs_rsnum *)data = ds->ds_number;
301 break;
302 default:
303 error = ENOIOCTL;
304 break;
305 }
306
307 sx_xunlock(&sx_rules);
308 return (error);
309 }
310
311 /*
312 * Adjust the rule identifier to use the ruleset of dm if one isn't
313 * explicitly specified.
314 *
315 * Note that after this operation, rid2rsn(rid) might still be 0, and
316 * that's okay; ruleset 0 is a valid ruleset, but when it's read in
317 * from the userland, it means "current ruleset for this mount-point".
318 */
319 static devfs_rid
320 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm)
321 {
322
323 if (rid2rsn(rid) == 0)
324 return (mkrid(dm->dm_ruleset, rid2rn(rid)));
325 else
326 return (rid);
327 }
328
329 /*
330 * Apply dk to de and everything under de.
331 *
332 * XXX: This method needs a function call for every nested
333 * subdirectory in a devfs mount. If we plan to have many of these,
334 * we might eventually run out of kernel stack space.
335 * XXX: a linear search could be done through the cdev list instead.
336 */
337 static void
338 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_dirent *de)
339 {
340 struct devfs_dirent *de2;
341
342 TAILQ_FOREACH(de2, &de->de_dlist, de_list)
343 devfs_rule_applyde_recursive(dk, de2);
344 devfs_rule_run(dk, de, devfs_rule_depth);
345 }
346
347 /*
348 * Apply dk to all entires in dm.
349 */
350 static void
351 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm)
352 {
353
354 devfs_rule_applyde_recursive(dk, dm->dm_rootdir);
355 }
356
357 /*
358 * Automatically select a number for a new rule in ds, and write the
359 * result into rnump.
360 */
361 static int
362 devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnump)
363 {
364 struct devfs_krule *dk;
365
366 /* Find the last rule. */
367 dk = TAILQ_LAST(&ds->ds_rules, rulehead);
368 if (dk == NULL)
369 *rnump = 100;
370 else {
371 *rnump = rid2rn(dk->dk_rule.dr_id) + 100;
372 /* Detect overflow. */
373 if (*rnump < rid2rn(dk->dk_rule.dr_id))
374 return (ERANGE);
375 }
376 KASSERT(devfs_rule_byid(mkrid(ds->ds_number, *rnump)) == NULL,
377 ("autonumbering resulted in an already existing rule"));
378 return (0);
379 }
380
381 /*
382 * Find a krule by id.
383 */
384 static struct devfs_krule *
385 devfs_rule_byid(devfs_rid rid)
386 {
387 struct devfs_ruleset *ds;
388 struct devfs_krule *dk;
389 devfs_rnum rn;
390
391 rn = rid2rn(rid);
392 ds = devfs_ruleset_bynum(rid2rsn(rid));
393 if (ds == NULL)
394 return (NULL);
395 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
396 if (rid2rn(dk->dk_rule.dr_id) == rn)
397 return (dk);
398 else if (rid2rn(dk->dk_rule.dr_id) > rn)
399 break;
400 }
401 return (NULL);
402 }
403
404 /*
405 * Remove dkp from any lists it may be on and remove memory associated
406 * with it.
407 */
408 static int
409 devfs_rule_delete(struct devfs_krule *dk)
410 {
411 struct devfs_ruleset *ds;
412
413 if (dk->dk_rule.dr_iacts & DRA_INCSET) {
414 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
415 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
416 --ds->ds_refcount;
417 devfs_ruleset_reap(ds);
418 }
419 ds = dk->dk_ruleset;
420 TAILQ_REMOVE(&ds->ds_rules, dk, dk_list);
421 devfs_ruleset_reap(ds);
422 free(dk, M_DEVFSRULE);
423 return (0);
424 }
425
426 /*
427 * Get a struct cdev *corresponding to de so we can try to match rules based
428 * on it. If this routine returns NULL, there is no struct cdev *associated
429 * with the dirent (symlinks and directories don't have dev_ts), and
430 * the caller should assume that any critera dependent on a dev_t
431 * don't match.
432 */
433 static struct cdev *
434 devfs_rule_getdev(struct devfs_dirent *de)
435 {
436
437 if (de->de_cdp == NULL)
438 return (NULL);
439 if (de->de_cdp->cdp_flags & CDP_ACTIVE)
440 return (&de->de_cdp->cdp_c);
441 else
442 return (NULL);
443 }
444
445 /*
446 * Do what we need to do to a rule that we just loaded from the
447 * userland. In particular, we need to check the magic, and adjust
448 * the ruleset appropriate if desired.
449 */
450 static int
451 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm)
452 {
453
454 if (dr->dr_magic != DEVFS_MAGIC)
455 return (ERPCMISMATCH);
456 dr->dr_id = devfs_rid_input(dr->dr_id, dm);
457 return (0);
458 }
459
460 /*
461 * Import dr into the appropriate place in the kernel (i.e., make a
462 * krule). The value of dr is copied, so the pointer may be destroyed
463 * after this call completes.
464 */
465 static int
466 devfs_rule_insert(struct devfs_rule *dr)
467 {
468 struct devfs_ruleset *ds, *dsi;
469 struct devfs_krule *k1;
470 struct devfs_krule *dk;
471 devfs_rsnum rsnum;
472 devfs_rnum dkrn;
473 int error;
474
475 /*
476 * This stuff seems out of place here, but we want to do it as
477 * soon as possible so that if it fails, we don't have to roll
478 * back any changes we already made (e.g., ruleset creation).
479 */
480 if (dr->dr_iacts & DRA_INCSET) {
481 dsi = devfs_ruleset_bynum(dr->dr_incset);
482 if (dsi == NULL)
483 return (ESRCH);
484 } else
485 dsi = NULL;
486
487 rsnum = rid2rsn(dr->dr_id);
488 KASSERT(rsnum != 0, ("Inserting into ruleset zero"));
489
490 ds = devfs_ruleset_bynum(rsnum);
491 if (ds == NULL)
492 ds = devfs_ruleset_create(rsnum);
493 dkrn = rid2rn(dr->dr_id);
494 if (dkrn == 0) {
495 error = devfs_rule_autonumber(ds, &dkrn);
496 if (error != 0) {
497 devfs_ruleset_reap(ds);
498 return (error);
499 }
500 }
501
502 dk = malloc(sizeof(*dk), M_DEVFSRULE, M_WAITOK | M_ZERO);
503 dk->dk_ruleset = ds;
504 if (dsi != NULL)
505 ++dsi->ds_refcount;
506 /* XXX: Inspect dr? */
507 memcpy(&dk->dk_rule, dr, sizeof(*dr));
508 dk->dk_rule.dr_id = mkrid(rid2rsn(dk->dk_rule.dr_id), dkrn);
509
510 TAILQ_FOREACH(k1, &ds->ds_rules, dk_list) {
511 if (rid2rn(k1->dk_rule.dr_id) > dkrn) {
512 TAILQ_INSERT_BEFORE(k1, dk, dk_list);
513 break;
514 }
515 }
516 if (k1 == NULL)
517 TAILQ_INSERT_TAIL(&ds->ds_rules, dk, dk_list);
518 return (0);
519 }
520
521 /*
522 * Determine whether dk matches de. Returns 1 if dk should be run on
523 * de; 0, otherwise.
524 */
525 static int
526 devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de)
527 {
528 struct devfs_rule *dr = &dk->dk_rule;
529 struct cdev *dev;
530 struct cdevsw *dsw;
531
532 dev = devfs_rule_getdev(de);
533 /*
534 * At this point, if dev is NULL, we should assume that any
535 * criteria that depend on it don't match. We should *not*
536 * just ignore them (i.e., act like they weren't specified),
537 * since that makes a rule that only has criteria dependent on
538 * the struct cdev *match all symlinks and directories.
539 *
540 * Note also that the following tests are somewhat reversed:
541 * They're actually testing to see whether the condition does
542 * *not* match, since the default is to assume the rule should
543 * be run (such as if there are no conditions).
544 */
545 if (dr->dr_icond & DRC_DSWFLAGS) {
546 if (dev == NULL)
547 return (0);
548 dsw = dev_refthread(dev);
549 if (dsw == NULL)
550 return (0);
551 if ((dsw->d_flags & dr->dr_dswflags) == 0) {
552 dev_relthread(dev);
553 return (0);
554 }
555 dev_relthread(dev);
556 }
557 if (dr->dr_icond & DRC_PATHPTRN)
558 if (!devfs_rule_matchpath(dk, de))
559 return (0);
560
561 return (1);
562 }
563
564 /*
565 * Determine whether dk matches de on account of dr_pathptrn.
566 */
567 static int
568 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_dirent *de)
569 {
570 struct devfs_rule *dr = &dk->dk_rule;
571 char *pname;
572 struct cdev *dev;
573
574 dev = devfs_rule_getdev(de);
575 if (dev != NULL)
576 pname = dev->si_name;
577 else if (de->de_dirent->d_type == DT_LNK ||
578 de->de_dirent->d_type == DT_DIR)
579 pname = de->de_dirent->d_name;
580 else
581 return (0);
582 KASSERT(pname != NULL, ("devfs_rule_matchpath: NULL pname"));
583
584 return (fnmatch(dr->dr_pathptrn, pname, 0) == 0);
585 }
586
587 /*
588 * Run dk on de.
589 */
590 static void
591 devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth)
592 {
593 struct devfs_rule *dr = &dk->dk_rule;
594 struct devfs_ruleset *ds;
595
596 if (!devfs_rule_match(dk, de))
597 return;
598 if (dr->dr_iacts & DRA_BACTS) {
599 if (dr->dr_bacts & DRB_HIDE)
600 de->de_flags |= DE_WHITEOUT;
601 if (dr->dr_bacts & DRB_UNHIDE)
602 de->de_flags &= ~DE_WHITEOUT;
603 }
604 if (dr->dr_iacts & DRA_UID)
605 de->de_uid = dr->dr_uid;
606 if (dr->dr_iacts & DRA_GID)
607 de->de_gid = dr->dr_gid;
608 if (dr->dr_iacts & DRA_MODE)
609 de->de_mode = dr->dr_mode;
610 if (dr->dr_iacts & DRA_INCSET) {
611 /*
612 * XXX: we should tell the user if the depth is exceeded here
613 * XXX: but it is not obvious how to. A return value will
614 * XXX: not work as this is called when devices are created
615 * XXX: long time after the rules were instantiated.
616 * XXX: a printf() would probably give too much noise, or
617 * XXX: DoS the machine. I guess a a rate-limited message
618 * XXX: might work.
619 */
620 if (depth > 0) {
621 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
622 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
623 devfs_ruleset_applyde(ds, de, depth - 1);
624 }
625 }
626 }
627
628 /*
629 * Apply all the rules in ds to de.
630 */
631 static void
632 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_dirent *de, unsigned depth)
633 {
634 struct devfs_krule *dk;
635
636 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
637 devfs_rule_run(dk, de, depth);
638 }
639
640 /*
641 * Apply all the rules in ds to all the entires in dm.
642 */
643 static void
644 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm)
645 {
646 struct devfs_krule *dk;
647
648 /*
649 * XXX: Does it matter whether we do
650 *
651 * foreach(dk in ds)
652 * foreach(de in dm)
653 * apply(dk to de)
654 *
655 * as opposed to
656 *
657 * foreach(de in dm)
658 * foreach(dk in ds)
659 * apply(dk to de)
660 *
661 * The end result is obviously the same, but does the order
662 * matter?
663 */
664 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
665 devfs_rule_applydm(dk, dm);
666 }
667
668 /*
669 * Find a ruleset by number.
670 */
671 static struct devfs_ruleset *
672 devfs_ruleset_bynum(devfs_rsnum rsnum)
673 {
674 struct devfs_ruleset *ds;
675
676 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
677 if (ds->ds_number == rsnum)
678 return (ds);
679 }
680 return (NULL);
681 }
682
683 /*
684 * Create a new ruleset.
685 */
686 static struct devfs_ruleset *
687 devfs_ruleset_create(devfs_rsnum rsnum)
688 {
689 struct devfs_ruleset *s1;
690 struct devfs_ruleset *ds;
691
692 KASSERT(rsnum != 0, ("creating ruleset zero"));
693
694 KASSERT(devfs_ruleset_bynum(rsnum) == NULL,
695 ("creating already existent ruleset %d", rsnum));
696
697 ds = malloc(sizeof(*ds), M_DEVFSRULE, M_WAITOK | M_ZERO);
698 ds->ds_number = rsnum;
699 TAILQ_INIT(&ds->ds_rules);
700
701 TAILQ_FOREACH(s1, &devfs_rulesets, ds_list) {
702 if (s1->ds_number > rsnum) {
703 TAILQ_INSERT_BEFORE(s1, ds, ds_list);
704 break;
705 }
706 }
707 if (s1 == NULL)
708 TAILQ_INSERT_TAIL(&devfs_rulesets, ds, ds_list);
709 return (ds);
710 }
711
712 /*
713 * Remove a ruleset from the system if it's empty and not used
714 * anywhere. This should be called after every time a rule is deleted
715 * from this ruleset or the reference count is decremented.
716 */
717 static void
718 devfs_ruleset_reap(struct devfs_ruleset *ds)
719 {
720
721 KASSERT(ds->ds_number != 0, ("reaping ruleset zero "));
722
723 if (!TAILQ_EMPTY(&ds->ds_rules) || ds->ds_refcount != 0)
724 return;
725
726 TAILQ_REMOVE(&devfs_rulesets, ds, ds_list);
727 free(ds, M_DEVFSRULE);
728 }
729
730 /*
731 * Make rsnum the active ruleset for dm.
732 */
733 static int
734 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm)
735 {
736 struct devfs_ruleset *cds, *ds;
737
738 if (dm->dm_ruleset != 0) {
739 cds = devfs_ruleset_bynum(dm->dm_ruleset);
740 --cds->ds_refcount;
741 devfs_ruleset_reap(cds);
742 }
743
744 ds = devfs_ruleset_bynum(rsnum);
745 if (ds == NULL)
746 ds = devfs_ruleset_create(rsnum);
747 /* These should probably be made atomic somehow. */
748 ++ds->ds_refcount;
749 dm->dm_ruleset = rsnum;
750
751 return (0);
752 }
753
754 void
755 devfs_rules_cleanup(struct devfs_mount *dm)
756 {
757 struct devfs_ruleset *ds;
758
759 sx_assert(&dm->dm_lock, SX_XLOCKED);
760 if (dm->dm_ruleset != 0) {
761 ds = devfs_ruleset_bynum(dm->dm_ruleset);
762 --ds->ds_refcount;
763 devfs_ruleset_reap(ds);
764 }
765 }
Cache object: fc022e181bc3aacaa5015364e504321e
|