1 /*-
2 * Copyright (c) 2002 Dima Dorfman.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/6.1/sys/fs/devfs/devfs_rule.c 154220 2006-01-11 10:15:57Z rwatson $
27 */
28
29 /*
30 * DEVFS ruleset implementation.
31 *
32 * A note on terminology: To "run" a rule on a dirent is to take the
33 * prescribed action; to "apply" a rule is to check whether it matches
34 * a dirent and run if if it does.
35 *
36 * A note on locking: Only foreign entry points (non-static functions)
37 * should deal with locking. Everything else assumes we already hold
38 * the required kind of lock.
39 *
40 * A note on namespace: devfs_rules_* are the non-static functions for
41 * the entire "ruleset" subsystem, devfs_rule_* are the static
42 * functions that operate on rules, and devfs_ruleset_* are the static
43 * functions that operate on rulesets. The line between the last two
44 * isn't always clear, but the guideline is still useful.
45 *
46 * A note on "special" identifiers: Ruleset 0 is the NULL, or empty,
47 * ruleset; it cannot be deleted or changed in any way. This may be
48 * assumed inside the code; e.g., a ruleset of 0 may be interpeted to
49 * mean "no ruleset". The interpretation of rule 0 is
50 * command-dependent, but in no case is there a real rule with number
51 * 0.
52 *
53 * A note on errno codes: To make it easier for the userland to tell
54 * what went wrong, we sometimes use errno codes that are not entirely
55 * appropriate for the error but that would be less ambiguous than the
56 * appropriate "generic" code. For example, when we can't find a
57 * ruleset, we return ESRCH instead of ENOENT (except in
58 * DEVFSIO_{R,S}GETNEXT, where a nonexistent ruleset means "end of
59 * list", and the userland expects ENOENT to be this indicator); this
60 * way, when an operation fails, it's clear that what couldn't be
61 * found is a ruleset and not a rule (well, it's clear to those who
62 * know the convention).
63 */
64
65 #include "opt_devfs.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/conf.h>
70 #include <sys/kernel.h>
71 #include <sys/malloc.h>
72 #include <sys/dirent.h>
73 #include <sys/ioccom.h>
74 #include <sys/lock.h>
75 #include <sys/sx.h>
76
77 #include <fs/devfs/devfs.h>
78 #include <fs/devfs/devfs_int.h>
79
80 /*
81 * Kernel version of devfs_rule.
82 */
83 struct devfs_krule {
84 TAILQ_ENTRY(devfs_krule) dk_list;
85 struct devfs_ruleset *dk_ruleset;
86 struct devfs_rule dk_rule;
87 };
88
89 TAILQ_HEAD(rulehead, devfs_krule);
90 static MALLOC_DEFINE(M_DEVFSRULE, "DEVFS_RULE", "DEVFS rule storage");
91
92 /*
93 * Structure to describe a ruleset.
94 */
95 struct devfs_ruleset {
96 TAILQ_ENTRY(devfs_ruleset) ds_list;
97 struct rulehead ds_rules;
98 devfs_rsnum ds_number;
99 int ds_refcount;
100 };
101
102 static devfs_rid devfs_rid_input(devfs_rid rid, struct devfs_mount *dm);
103
104 static void devfs_rule_applyde_recursive(struct devfs_krule *dk,
105 struct devfs_dirent *de);
106 static void devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm);
107 static int devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnp);
108 static struct devfs_krule *devfs_rule_byid(devfs_rid rid);
109 static int devfs_rule_delete(struct devfs_krule *dkp);
110 static struct cdev *devfs_rule_getdev(struct devfs_dirent *de);
111 static int devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm);
112 static int devfs_rule_insert(struct devfs_rule *dr);
113 static int devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de);
114 static int devfs_rule_matchpath(struct devfs_krule *dk,
115 struct devfs_dirent *de);
116 static void devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth);
117
118 static void devfs_ruleset_applyde(struct devfs_ruleset *ds,
119 struct devfs_dirent *de, unsigned depth);
120 static void devfs_ruleset_applydm(struct devfs_ruleset *ds,
121 struct devfs_mount *dm);
122 static struct devfs_ruleset *devfs_ruleset_bynum(devfs_rsnum rsnum);
123 static struct devfs_ruleset *devfs_ruleset_create(devfs_rsnum rsnum);
124 static void devfs_ruleset_reap(struct devfs_ruleset *dsp);
125 static int devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm);
126
127 static struct sx sx_rules;
128 SX_SYSINIT(sx_rules, &sx_rules, "DEVFS ruleset lock");
129
130 static TAILQ_HEAD(, devfs_ruleset) devfs_rulesets =
131 TAILQ_HEAD_INITIALIZER(devfs_rulesets);
132
133 /*
134 * Called to apply the proper rules for 'de' before it can be
135 * exposed to the userland. This should be called with an exclusive
136 * lock on dm in case we need to run anything.
137 */
138 void
139 devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de)
140 {
141 struct devfs_ruleset *ds;
142
143 if (dm->dm_ruleset == 0)
144 return;
145 sx_slock(&sx_rules);
146 ds = devfs_ruleset_bynum(dm->dm_ruleset);
147 KASSERT(ds != NULL, ("mount-point has NULL ruleset"));
148 devfs_ruleset_applyde(ds, de, devfs_rule_depth);
149 sx_sunlock(&sx_rules);
150 }
151
152 /*
153 * Rule subsystem ioctl hook.
154 */
155 int
156 devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td)
157 {
158 struct devfs_ruleset *ds;
159 struct devfs_krule *dk;
160 struct devfs_rule *dr;
161 devfs_rsnum rsnum;
162 devfs_rnum rnum;
163 devfs_rid rid;
164 int error;
165
166 sx_assert(&dm->dm_lock, SX_XLOCKED);
167
168 /*
169 * XXX: This returns an error regardless of whether we
170 * actually support the cmd or not.
171 */
172 error = suser(td);
173 if (error != 0)
174 return (error);
175
176 sx_xlock(&sx_rules);
177
178 switch (cmd) {
179 case DEVFSIO_RADD:
180 dr = (struct devfs_rule *)data;
181 error = devfs_rule_input(dr, dm);
182 if (error != 0)
183 break;
184 dk = devfs_rule_byid(dr->dr_id);
185 if (dk != NULL) {
186 error = EEXIST;
187 break;
188 }
189 if (rid2rsn(dr->dr_id) == 0) {
190 error = EIO;
191 break;
192 }
193 error = devfs_rule_insert(dr);
194 break;
195 case DEVFSIO_RAPPLY:
196 dr = (struct devfs_rule *)data;
197 error = devfs_rule_input(dr, dm);
198 if (error != 0)
199 break;
200
201 /*
202 * This is one of many possible hackish
203 * implementations. The primary contender is an
204 * implementation where the rule we read in is
205 * temporarily inserted into some ruleset, perhaps
206 * with a hypothetical DRO_NOAUTO flag so that it
207 * doesn't get used where it isn't intended, and
208 * applied in the normal way. This can be done in the
209 * userland (DEVFSIO_ADD, DEVFSIO_APPLYID,
210 * DEVFSIO_DEL) or in the kernel; either way it breaks
211 * some corner case assumptions in other parts of the
212 * code (not that this implementation doesn't do
213 * that).
214 */
215 if (dr->dr_iacts & DRA_INCSET &&
216 devfs_ruleset_bynum(dr->dr_incset) == NULL) {
217 error = ESRCH;
218 break;
219 }
220 dk = malloc(sizeof(*dk), M_TEMP, M_WAITOK | M_ZERO);
221 memcpy(&dk->dk_rule, dr, sizeof(*dr));
222 devfs_rule_applydm(dk, dm);
223 free(dk, M_TEMP);
224 break;
225 case DEVFSIO_RAPPLYID:
226 rid = *(devfs_rid *)data;
227 rid = devfs_rid_input(rid, dm);
228 dk = devfs_rule_byid(rid);
229 if (dk == NULL) {
230 error = ENOENT;
231 break;
232 }
233 devfs_rule_applydm(dk, dm);
234 break;
235 case DEVFSIO_RDEL:
236 rid = *(devfs_rid *)data;
237 rid = devfs_rid_input(rid, dm);
238 dk = devfs_rule_byid(rid);
239 if (dk == NULL) {
240 error = ENOENT;
241 break;
242 }
243 ds = dk->dk_ruleset;
244 error = devfs_rule_delete(dk);
245 break;
246 case DEVFSIO_RGETNEXT:
247 dr = (struct devfs_rule *)data;
248 error = devfs_rule_input(dr, dm);
249 if (error != 0)
250 break;
251 /*
252 * We can't use devfs_rule_byid() here since that
253 * requires the rule specified to exist, but we want
254 * getnext(N) to work whether there is a rule N or not
255 * (specifically, getnext(0) must work, but we should
256 * never have a rule 0 since the add command
257 * interprets 0 to mean "auto-number").
258 */
259 ds = devfs_ruleset_bynum(rid2rsn(dr->dr_id));
260 if (ds == NULL) {
261 error = ENOENT;
262 break;
263 }
264 rnum = rid2rn(dr->dr_id);
265 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
266 if (rid2rn(dk->dk_rule.dr_id) > rnum)
267 break;
268 }
269 if (dk == NULL) {
270 error = ENOENT;
271 break;
272 }
273 memcpy(dr, &dk->dk_rule, sizeof(*dr));
274 break;
275 case DEVFSIO_SUSE:
276 rsnum = *(devfs_rsnum *)data;
277 error = devfs_ruleset_use(rsnum, dm);
278 break;
279 case DEVFSIO_SAPPLY:
280 rsnum = *(devfs_rsnum *)data;
281 rsnum = rid2rsn(devfs_rid_input(mkrid(rsnum, 0), dm));
282 ds = devfs_ruleset_bynum(rsnum);
283 if (ds == NULL) {
284 error = ESRCH;
285 break;
286 }
287 devfs_ruleset_applydm(ds, dm);
288 break;
289 case DEVFSIO_SGETNEXT:
290 rsnum = *(devfs_rsnum *)data;
291 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
292 if (ds->ds_number > rsnum)
293 break;
294 }
295 if (ds == NULL) {
296 error = ENOENT;
297 break;
298 }
299 *(devfs_rsnum *)data = ds->ds_number;
300 break;
301 default:
302 error = ENOIOCTL;
303 break;
304 }
305
306 sx_xunlock(&sx_rules);
307 return (error);
308 }
309
310 /*
311 * Adjust the rule identifier to use the ruleset of dm if one isn't
312 * explicitly specified.
313 *
314 * Note that after this operation, rid2rsn(rid) might still be 0, and
315 * that's okay; ruleset 0 is a valid ruleset, but when it's read in
316 * from the userland, it means "current ruleset for this mount-point".
317 */
318 static devfs_rid
319 devfs_rid_input(devfs_rid rid, struct devfs_mount *dm)
320 {
321
322 if (rid2rsn(rid) == 0)
323 return (mkrid(dm->dm_ruleset, rid2rn(rid)));
324 else
325 return (rid);
326 }
327
328 /*
329 * Apply dk to de and everything under de.
330 *
331 * XXX: This method needs a function call for every nested
332 * subdirectory in a devfs mount. If we plan to have many of these,
333 * we might eventually run out of kernel stack space.
334 * XXX: a linear search could be done through the cdev list instead.
335 */
336 static void
337 devfs_rule_applyde_recursive(struct devfs_krule *dk, struct devfs_dirent *de)
338 {
339 struct devfs_dirent *de2;
340
341 TAILQ_FOREACH(de2, &de->de_dlist, de_list)
342 devfs_rule_applyde_recursive(dk, de2);
343 devfs_rule_run(dk, de, devfs_rule_depth);
344 }
345
346 /*
347 * Apply dk to all entires in dm.
348 */
349 static void
350 devfs_rule_applydm(struct devfs_krule *dk, struct devfs_mount *dm)
351 {
352
353 devfs_rule_applyde_recursive(dk, dm->dm_rootdir);
354 }
355
356 /*
357 * Automatically select a number for a new rule in ds, and write the
358 * result into rnump.
359 */
360 static int
361 devfs_rule_autonumber(struct devfs_ruleset *ds, devfs_rnum *rnump)
362 {
363 struct devfs_krule *dk;
364
365 /* Find the last rule. */
366 dk = TAILQ_LAST(&ds->ds_rules, rulehead);
367 if (dk == NULL)
368 *rnump = 100;
369 else {
370 *rnump = rid2rn(dk->dk_rule.dr_id) + 100;
371 /* Detect overflow. */
372 if (*rnump < rid2rn(dk->dk_rule.dr_id))
373 return (ERANGE);
374 }
375 KASSERT(devfs_rule_byid(mkrid(ds->ds_number, *rnump)) == NULL,
376 ("autonumbering resulted in an already existing rule"));
377 return (0);
378 }
379
380 /*
381 * Find a krule by id.
382 */
383 static struct devfs_krule *
384 devfs_rule_byid(devfs_rid rid)
385 {
386 struct devfs_ruleset *ds;
387 struct devfs_krule *dk;
388 devfs_rnum rn;
389
390 rn = rid2rn(rid);
391 ds = devfs_ruleset_bynum(rid2rsn(rid));
392 if (ds == NULL)
393 return (NULL);
394 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list) {
395 if (rid2rn(dk->dk_rule.dr_id) == rn)
396 return (dk);
397 else if (rid2rn(dk->dk_rule.dr_id) > rn)
398 break;
399 }
400 return (NULL);
401 }
402
403 /*
404 * Remove dkp from any lists it may be on and remove memory associated
405 * with it.
406 */
407 static int
408 devfs_rule_delete(struct devfs_krule *dk)
409 {
410 struct devfs_ruleset *ds;
411
412 if (dk->dk_rule.dr_iacts & DRA_INCSET) {
413 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
414 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
415 --ds->ds_refcount;
416 devfs_ruleset_reap(ds);
417 }
418 ds = dk->dk_ruleset;
419 TAILQ_REMOVE(&ds->ds_rules, dk, dk_list);
420 devfs_ruleset_reap(ds);
421 free(dk, M_DEVFSRULE);
422 return (0);
423 }
424
425 /*
426 * Get a struct cdev *corresponding to de so we can try to match rules based
427 * on it. If this routine returns NULL, there is no struct cdev *associated
428 * with the dirent (symlinks and directories don't have dev_ts), and
429 * the caller should assume that any critera dependent on a dev_t
430 * don't match.
431 */
432 static struct cdev *
433 devfs_rule_getdev(struct devfs_dirent *de)
434 {
435
436 if (de->de_cdp == NULL)
437 return (NULL);
438 if (de->de_cdp->cdp_flags & CDP_ACTIVE)
439 return (&de->de_cdp->cdp_c);
440 else
441 return (NULL);
442 }
443
444 /*
445 * Do what we need to do to a rule that we just loaded from the
446 * userland. In particular, we need to check the magic, and adjust
447 * the ruleset appropriate if desired.
448 */
449 static int
450 devfs_rule_input(struct devfs_rule *dr, struct devfs_mount *dm)
451 {
452
453 if (dr->dr_magic != DEVFS_MAGIC)
454 return (ERPCMISMATCH);
455 dr->dr_id = devfs_rid_input(dr->dr_id, dm);
456 return (0);
457 }
458
459 /*
460 * Import dr into the appropriate place in the kernel (i.e., make a
461 * krule). The value of dr is copied, so the pointer may be destroyed
462 * after this call completes.
463 */
464 static int
465 devfs_rule_insert(struct devfs_rule *dr)
466 {
467 struct devfs_ruleset *ds, *dsi;
468 struct devfs_krule *k1;
469 struct devfs_krule *dk;
470 devfs_rsnum rsnum;
471 devfs_rnum dkrn;
472 int error;
473
474 /*
475 * This stuff seems out of place here, but we want to do it as
476 * soon as possible so that if it fails, we don't have to roll
477 * back any changes we already made (e.g., ruleset creation).
478 */
479 if (dr->dr_iacts & DRA_INCSET) {
480 dsi = devfs_ruleset_bynum(dr->dr_incset);
481 if (dsi == NULL)
482 return (ESRCH);
483 } else
484 dsi = NULL;
485
486 rsnum = rid2rsn(dr->dr_id);
487 KASSERT(rsnum != 0, ("Inserting into ruleset zero"));
488
489 ds = devfs_ruleset_bynum(rsnum);
490 if (ds == NULL)
491 ds = devfs_ruleset_create(rsnum);
492 dkrn = rid2rn(dr->dr_id);
493 if (dkrn == 0) {
494 error = devfs_rule_autonumber(ds, &dkrn);
495 if (error != 0) {
496 devfs_ruleset_reap(ds);
497 return (error);
498 }
499 }
500
501 dk = malloc(sizeof(*dk), M_DEVFSRULE, M_WAITOK | M_ZERO);
502 dk->dk_ruleset = ds;
503 if (dsi != NULL)
504 ++dsi->ds_refcount;
505 /* XXX: Inspect dr? */
506 memcpy(&dk->dk_rule, dr, sizeof(*dr));
507 dk->dk_rule.dr_id = mkrid(rid2rsn(dk->dk_rule.dr_id), dkrn);
508
509 TAILQ_FOREACH(k1, &ds->ds_rules, dk_list) {
510 if (rid2rn(k1->dk_rule.dr_id) > dkrn) {
511 TAILQ_INSERT_BEFORE(k1, dk, dk_list);
512 break;
513 }
514 }
515 if (k1 == NULL)
516 TAILQ_INSERT_TAIL(&ds->ds_rules, dk, dk_list);
517 return (0);
518 }
519
520 /*
521 * Determine whether dk matches de. Returns 1 if dk should be run on
522 * de; 0, otherwise.
523 */
524 static int
525 devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de)
526 {
527 struct devfs_rule *dr = &dk->dk_rule;
528 struct cdev *dev;
529
530 dev = devfs_rule_getdev(de);
531 /*
532 * At this point, if dev is NULL, we should assume that any
533 * criteria that depend on it don't match. We should *not*
534 * just ignore them (i.e., act like they weren't specified),
535 * since that makes a rule that only has criteria dependent on
536 * the struct cdev *match all symlinks and directories.
537 *
538 * Note also that the following tests are somewhat reversed:
539 * They're actually testing to see whether the condition does
540 * *not* match, since the default is to assume the rule should
541 * be run (such as if there are no conditions).
542 *
543 * XXX: lacks threadref on dev
544 */
545 if (dr->dr_icond & DRC_DSWFLAGS)
546 if (dev == NULL ||
547 (dev->si_devsw->d_flags & dr->dr_dswflags) == 0)
548 return (0);
549 if (dr->dr_icond & DRC_PATHPTRN)
550 if (!devfs_rule_matchpath(dk, de))
551 return (0);
552
553 return (1);
554 }
555
556 /*
557 * Determine whether dk matches de on account of dr_pathptrn.
558 */
559 static int
560 devfs_rule_matchpath(struct devfs_krule *dk, struct devfs_dirent *de)
561 {
562 struct devfs_rule *dr = &dk->dk_rule;
563 char *pname;
564 struct cdev *dev;
565
566 dev = devfs_rule_getdev(de);
567 if (dev != NULL)
568 pname = dev->si_name;
569 else if (de->de_dirent->d_type == DT_LNK ||
570 de->de_dirent->d_type == DT_DIR)
571 pname = de->de_dirent->d_name;
572 else
573 return (0);
574 KASSERT(pname != NULL, ("devfs_rule_matchpath: NULL pname"));
575
576 return (fnmatch(dr->dr_pathptrn, pname, 0) == 0);
577 }
578
579 /*
580 * Run dk on de.
581 */
582 static void
583 devfs_rule_run(struct devfs_krule *dk, struct devfs_dirent *de, unsigned depth)
584 {
585 struct devfs_rule *dr = &dk->dk_rule;
586 struct devfs_ruleset *ds;
587
588 if (!devfs_rule_match(dk, de))
589 return;
590 if (dr->dr_iacts & DRA_BACTS) {
591 if (dr->dr_bacts & DRB_HIDE)
592 de->de_flags |= DE_WHITEOUT;
593 if (dr->dr_bacts & DRB_UNHIDE)
594 de->de_flags &= ~DE_WHITEOUT;
595 }
596 if (dr->dr_iacts & DRA_UID)
597 de->de_uid = dr->dr_uid;
598 if (dr->dr_iacts & DRA_GID)
599 de->de_gid = dr->dr_gid;
600 if (dr->dr_iacts & DRA_MODE)
601 de->de_mode = dr->dr_mode;
602 if (dr->dr_iacts & DRA_INCSET) {
603 /*
604 * XXX: we should tell the user if the depth is exceeded here
605 * XXX: but it is not obvious how to. A return value will
606 * XXX: not work as this is called when devices are created
607 * XXX: long time after the rules were instantiated.
608 * XXX: a printf() would probably give too much noise, or
609 * XXX: DoS the machine. I guess a a rate-limited message
610 * XXX: might work.
611 */
612 if (depth > 0) {
613 ds = devfs_ruleset_bynum(dk->dk_rule.dr_incset);
614 KASSERT(ds != NULL, ("DRA_INCSET but bad dr_incset"));
615 devfs_ruleset_applyde(ds, de, depth - 1);
616 }
617 }
618 }
619
620 /*
621 * Apply all the rules in ds to de.
622 */
623 static void
624 devfs_ruleset_applyde(struct devfs_ruleset *ds, struct devfs_dirent *de, unsigned depth)
625 {
626 struct devfs_krule *dk;
627
628 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
629 devfs_rule_run(dk, de, depth);
630 }
631
632 /*
633 * Apply all the rules in ds to all the entires in dm.
634 */
635 static void
636 devfs_ruleset_applydm(struct devfs_ruleset *ds, struct devfs_mount *dm)
637 {
638 struct devfs_krule *dk;
639
640 /*
641 * XXX: Does it matter whether we do
642 *
643 * foreach(dk in ds)
644 * foreach(de in dm)
645 * apply(dk to de)
646 *
647 * as opposed to
648 *
649 * foreach(de in dm)
650 * foreach(dk in ds)
651 * apply(dk to de)
652 *
653 * The end result is obviously the same, but does the order
654 * matter?
655 */
656 TAILQ_FOREACH(dk, &ds->ds_rules, dk_list)
657 devfs_rule_applydm(dk, dm);
658 }
659
660 /*
661 * Find a ruleset by number.
662 */
663 static struct devfs_ruleset *
664 devfs_ruleset_bynum(devfs_rsnum rsnum)
665 {
666 struct devfs_ruleset *ds;
667
668 TAILQ_FOREACH(ds, &devfs_rulesets, ds_list) {
669 if (ds->ds_number == rsnum)
670 return (ds);
671 }
672 return (NULL);
673 }
674
675 /*
676 * Create a new ruleset.
677 */
678 static struct devfs_ruleset *
679 devfs_ruleset_create(devfs_rsnum rsnum)
680 {
681 struct devfs_ruleset *s1;
682 struct devfs_ruleset *ds;
683
684 KASSERT(rsnum != 0, ("creating ruleset zero"));
685
686 KASSERT(devfs_ruleset_bynum(rsnum) == NULL,
687 ("creating already existent ruleset %d", rsnum));
688
689 ds = malloc(sizeof(*ds), M_DEVFSRULE, M_WAITOK | M_ZERO);
690 ds->ds_number = rsnum;
691 TAILQ_INIT(&ds->ds_rules);
692
693 TAILQ_FOREACH(s1, &devfs_rulesets, ds_list) {
694 if (s1->ds_number > rsnum) {
695 TAILQ_INSERT_BEFORE(s1, ds, ds_list);
696 break;
697 }
698 }
699 if (s1 == NULL)
700 TAILQ_INSERT_TAIL(&devfs_rulesets, ds, ds_list);
701 return (ds);
702 }
703
704 /*
705 * Remove a ruleset from the system if it's empty and not used
706 * anywhere. This should be called after every time a rule is deleted
707 * from this ruleset or the reference count is decremented.
708 */
709 static void
710 devfs_ruleset_reap(struct devfs_ruleset *ds)
711 {
712
713 KASSERT(ds->ds_number != 0, ("reaping ruleset zero "));
714
715 if (!TAILQ_EMPTY(&ds->ds_rules) || ds->ds_refcount != 0)
716 return;
717
718 TAILQ_REMOVE(&devfs_rulesets, ds, ds_list);
719 free(ds, M_DEVFSRULE);
720 }
721
722 /*
723 * Make rsnum the active ruleset for dm.
724 */
725 static int
726 devfs_ruleset_use(devfs_rsnum rsnum, struct devfs_mount *dm)
727 {
728 struct devfs_ruleset *cds, *ds;
729
730 ds = devfs_ruleset_bynum(rsnum);
731 if (ds == NULL)
732 ds = devfs_ruleset_create(rsnum);
733 if (dm->dm_ruleset != 0) {
734 cds = devfs_ruleset_bynum(dm->dm_ruleset);
735 --cds->ds_refcount;
736 devfs_ruleset_reap(cds);
737 }
738
739 /* These should probably be made atomic somehow. */
740 ++ds->ds_refcount;
741 dm->dm_ruleset = rsnum;
742
743 return (0);
744 }
745
746 void
747 devfs_rules_cleanup(struct devfs_mount *dm)
748 {
749 struct devfs_ruleset *ds;
750
751 sx_assert(&dm->dm_lock, SX_XLOCKED);
752 if (dm->dm_ruleset != 0) {
753 ds = devfs_ruleset_bynum(dm->dm_ruleset);
754 --ds->ds_refcount;
755 devfs_ruleset_reap(ds);
756 }
757 }
Cache object: b6edda0dbab095275ed9e4dc96227554
|