1 /*-
2 * Copyright (c) 1999-2002, 2006 Robert N. M. Watson
3 * Copyright (c) 2001 Ilmar S. Habibulin
4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc.
5 * Copyright (c) 2005-2006 SPARTA, Inc.
6 * All rights reserved.
7 *
8 * This software was developed by Robert Watson and Ilmar Habibulin for the
9 * TrustedBSD Project.
10 *
11 * This software was developed for the FreeBSD Project in part by Network
12 * Associates Laboratories, the Security Research Division of Network
13 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
14 * as part of the DARPA CHATS research program.
15 *
16 * This software was enhanced by SPARTA ISSO under SPAWAR contract
17 * N66001-04-C-6019 ("SEFOS").
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40
41 /*-
42 * Framework for extensible kernel access control. This file contains core
43 * kernel infrastructure for the TrustedBSD MAC Framework, including policy
44 * registration, versioning, locking, error composition operator, and system
45 * calls.
46 *
47 * The MAC Framework implements three programming interfaces:
48 *
49 * - The kernel MAC interface, defined in mac_framework.h, and invoked
50 * throughout the kernel to request security decisions, notify of security
51 * related events, etc.
52 *
53 * - The MAC policy module interface, defined in mac_policy.h, which is
54 * implemented by MAC policy modules and invoked by the MAC Framework to
55 * forward kernel security requests and notifications to policy modules.
56 *
57 * - The user MAC API, defined in mac.h, which allows user programs to query
58 * and set label state on objects.
59 *
60 * The majority of the MAC Framework implementation may be found in
61 * src/sys/security/mac. Sample policy modules may be found in
62 * src/sys/security/mac_*.
63 */
64
65 #include "opt_mac.h"
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include <sys/param.h>
71 #include <sys/condvar.h>
72 #include <sys/kernel.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/mac.h>
76 #include <sys/module.h>
77 #include <sys/systm.h>
78 #include <sys/sysctl.h>
79
80 #include <security/mac/mac_framework.h>
81 #include <security/mac/mac_internal.h>
82 #include <security/mac/mac_policy.h>
83
84 /*
85 * Root sysctl node for all MAC and MAC policy controls.
86 */
87 SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW, 0,
88 "TrustedBSD MAC policy controls");
89
90 /*
91 * Declare that the kernel provides MAC support, version 3 (FreeBSD 7.x).
92 * This permits modules to refuse to be loaded if the necessary support isn't
93 * present, even if it's pre-boot.
94 */
95 MODULE_VERSION(kernel_mac_support, MAC_VERSION);
96
97 static unsigned int mac_version = MAC_VERSION;
98 SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
99 "");
100
101 /*
102 * Labels consist of a indexed set of "slots", which are allocated policies
103 * as required. The MAC Framework maintains a bitmask of slots allocated so
104 * far to prevent reuse. Slots cannot be reused, as the MAC Framework
105 * guarantees that newly allocated slots in labels will be NULL unless
106 * otherwise initialized, and because we do not have a mechanism to garbage
107 * collect slots on policy unload. As labeled policies tend to be statically
108 * loaded during boot, and not frequently unloaded and reloaded, this is not
109 * generally an issue.
110 */
111 #if MAC_MAX_SLOTS > 32
112 #error "MAC_MAX_SLOTS too large"
113 #endif
114
115 static unsigned int mac_max_slots = MAC_MAX_SLOTS;
116 static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1;
117 SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots,
118 0, "");
119
120 /*
121 * Has the kernel started generating labeled objects yet? All read/write
122 * access to this variable is serialized during the boot process. Following
123 * the end of serialization, we don't update this flag; no locking.
124 */
125 static int mac_late = 0;
126
127 /*
128 * Flag to indicate whether or not we should allocate label storage for new
129 * mbufs. Since most dynamic policies we currently work with don't rely on
130 * mbuf labeling, try to avoid paying the cost of mtag allocation unless
131 * specifically notified of interest. One result of this is that if a
132 * dynamically loaded policy requests mbuf labels, it must be able to deal
133 * with a NULL label being returned on any mbufs that were already in flight
134 * when the policy was loaded. Since the policy already has to deal with
135 * uninitialized labels, this probably won't be a problem. Note: currently
136 * no locking. Will this be a problem?
137 *
138 * In the future, we may want to allow objects to request labeling on a per-
139 * object type basis, rather than globally for all objects.
140 */
141 #ifndef MAC_ALWAYS_LABEL_MBUF
142 int mac_labelmbufs = 0;
143 #endif
144
145 MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage");
146
147 /*
148 * mac_static_policy_list holds a list of policy modules that are not loaded
149 * while the system is "live", and cannot be unloaded. These policies can be
150 * invoked without holding the busy count.
151 *
152 * mac_policy_list stores the list of dynamic policies. A busy count is
153 * maintained for the list, stored in mac_policy_busy. The busy count is
154 * protected by mac_policy_mtx; the list may be modified only while the busy
155 * count is 0, requiring that the lock be held to prevent new references to
156 * the list from being acquired. For almost all operations, incrementing the
157 * busy count is sufficient to guarantee consistency, as the list cannot be
158 * modified while the busy count is elevated. For a few special operations
159 * involving a change to the list of active policies, the mtx itself must be
160 * held. A condition variable, mac_policy_cv, is used to signal potential
161 * exclusive consumers that they should try to acquire the lock if a first
162 * attempt at exclusive access fails.
163 *
164 * This design intentionally avoids fairness, and may starve attempts to
165 * acquire an exclusive lock on a busy system. This is required because we
166 * do not ever want acquiring a read reference to perform an unbounded length
167 * sleep. Read references are acquired in ithreads, network isrs, etc, and
168 * any unbounded blocking could lead quickly to deadlock.
169 *
170 * Another reason for never blocking on read references is that the MAC
171 * Framework may recurse: if a policy calls a VOP, for example, this might
172 * lead to vnode life cycle operations (such as init/destroy).
173 *
174 * If the kernel option MAC_STATIC has been compiled in, all locking becomes
175 * a no-op, and the global list of policies is not allowed to change after
176 * early boot.
177 *
178 * XXXRW: Currently, we signal mac_policy_cv every time the framework becomes
179 * unbusy and there is a thread waiting to enter it exclusively. Since it
180 * may take some time before the thread runs, we may issue a lot of signals.
181 * We should instead keep track of the fact that we've signalled, taking into
182 * account that the framework may be busy again by the time the thread runs,
183 * requiring us to re-signal.
184 */
185 #ifndef MAC_STATIC
186 static struct mtx mac_policy_mtx;
187 static struct cv mac_policy_cv;
188 static int mac_policy_count;
189 static int mac_policy_wait;
190 #endif
191 struct mac_policy_list_head mac_policy_list;
192 struct mac_policy_list_head mac_static_policy_list;
193
194 /*
195 * We manually invoke WITNESS_WARN() to allow Witness to generate warnings
196 * even if we don't end up ever triggering the wait at run-time. The
197 * consumer of the exclusive interface must not hold any locks (other than
198 * potentially Giant) since we may sleep for long (potentially indefinite)
199 * periods of time waiting for the framework to become quiescent so that a
200 * policy list change may be made.
201 */
202 void
203 mac_policy_grab_exclusive(void)
204 {
205
206 #ifndef MAC_STATIC
207 if (!mac_late)
208 return;
209
210 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
211 "mac_policy_grab_exclusive() at %s:%d", __FILE__, __LINE__);
212 mtx_lock(&mac_policy_mtx);
213 while (mac_policy_count != 0) {
214 mac_policy_wait++;
215 cv_wait(&mac_policy_cv, &mac_policy_mtx);
216 mac_policy_wait--;
217 }
218 #endif
219 }
220
221 void
222 mac_policy_assert_exclusive(void)
223 {
224
225 #ifndef MAC_STATIC
226 if (!mac_late)
227 return;
228
229 mtx_assert(&mac_policy_mtx, MA_OWNED);
230 KASSERT(mac_policy_count == 0,
231 ("mac_policy_assert_exclusive(): not exclusive"));
232 #endif
233 }
234
235 void
236 mac_policy_release_exclusive(void)
237 {
238 #ifndef MAC_STATIC
239 int dowakeup;
240
241 if (!mac_late)
242 return;
243
244 KASSERT(mac_policy_count == 0,
245 ("mac_policy_release_exclusive(): not exclusive"));
246 dowakeup = (mac_policy_wait != 0);
247 mtx_unlock(&mac_policy_mtx);
248 if (dowakeup)
249 cv_signal(&mac_policy_cv);
250 #endif
251 }
252
253 void
254 mac_policy_list_busy(void)
255 {
256
257 #ifndef MAC_STATIC
258 if (!mac_late)
259 return;
260
261 mtx_lock(&mac_policy_mtx);
262 mac_policy_count++;
263 mtx_unlock(&mac_policy_mtx);
264 #endif
265 }
266
267 int
268 mac_policy_list_conditional_busy(void)
269 {
270 #ifndef MAC_STATIC
271 int ret;
272
273 if (!mac_late)
274 return (1);
275
276 mtx_lock(&mac_policy_mtx);
277 if (!LIST_EMPTY(&mac_policy_list)) {
278 mac_policy_count++;
279 ret = 1;
280 } else
281 ret = 0;
282 mtx_unlock(&mac_policy_mtx);
283 return (ret);
284 #else
285 return (1);
286 #endif
287 }
288
289 void
290 mac_policy_list_unbusy(void)
291 {
292 #ifndef MAC_STATIC
293 int dowakeup;
294
295 if (!mac_late)
296 return;
297
298 mtx_lock(&mac_policy_mtx);
299 mac_policy_count--;
300 KASSERT(mac_policy_count >= 0, ("MAC_POLICY_LIST_LOCK"));
301 dowakeup = (mac_policy_count == 0 && mac_policy_wait != 0);
302 mtx_unlock(&mac_policy_mtx);
303
304 if (dowakeup)
305 cv_signal(&mac_policy_cv);
306 #endif
307 }
308
309 /*
310 * Initialize the MAC subsystem, including appropriate SMP locks.
311 */
312 static void
313 mac_init(void)
314 {
315
316 LIST_INIT(&mac_static_policy_list);
317 LIST_INIT(&mac_policy_list);
318 mac_labelzone_init();
319
320 #ifndef MAC_STATIC
321 mtx_init(&mac_policy_mtx, "mac_policy_mtx", NULL, MTX_DEF);
322 cv_init(&mac_policy_cv, "mac_policy_cv");
323 #endif
324 }
325
326 /*
327 * For the purposes of modules that want to know if they were loaded "early",
328 * set the mac_late flag once we've processed modules either linked into the
329 * kernel, or loaded before the kernel startup.
330 */
331 static void
332 mac_late_init(void)
333 {
334
335 mac_late = 1;
336 }
337
338 /*
339 * After the policy list has changed, walk the list to update any global
340 * flags. Currently, we support only one flag, and it's conditionally
341 * defined; as a result, the entire function is conditional. Eventually, the
342 * #else case might also iterate across the policies.
343 */
344 static void
345 mac_policy_updateflags(void)
346 {
347 #ifndef MAC_ALWAYS_LABEL_MBUF
348 struct mac_policy_conf *tmpc;
349 int labelmbufs;
350
351 mac_policy_assert_exclusive();
352
353 labelmbufs = 0;
354 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) {
355 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS)
356 labelmbufs++;
357 }
358 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) {
359 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS)
360 labelmbufs++;
361 }
362 mac_labelmbufs = (labelmbufs != 0);
363 #endif
364 }
365
366 static int
367 mac_policy_register(struct mac_policy_conf *mpc)
368 {
369 struct mac_policy_conf *tmpc;
370 int error, slot, static_entry;
371
372 error = 0;
373
374 /*
375 * We don't technically need exclusive access while !mac_late, but
376 * hold it for assertion consistency.
377 */
378 mac_policy_grab_exclusive();
379
380 /*
381 * If the module can potentially be unloaded, or we're loading late,
382 * we have to stick it in the non-static list and pay an extra
383 * performance overhead. Otherwise, we can pay a light locking cost
384 * and stick it in the static list.
385 */
386 static_entry = (!mac_late &&
387 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK));
388
389 if (static_entry) {
390 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) {
391 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
392 error = EEXIST;
393 goto out;
394 }
395 }
396 } else {
397 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) {
398 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
399 error = EEXIST;
400 goto out;
401 }
402 }
403 }
404 if (mpc->mpc_field_off != NULL) {
405 slot = ffs(mac_slot_offsets_free);
406 if (slot == 0) {
407 error = ENOMEM;
408 goto out;
409 }
410 slot--;
411 mac_slot_offsets_free &= ~(1 << slot);
412 *mpc->mpc_field_off = slot;
413 }
414 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED;
415
416 /*
417 * If we're loading a MAC module after the framework has initialized,
418 * it has to go into the dynamic list. If we're loading it before
419 * we've finished initializing, it can go into the static list with
420 * weaker locker requirements.
421 */
422 if (static_entry)
423 LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list);
424 else
425 LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list);
426
427 /*
428 * Per-policy initialization. Currently, this takes place under the
429 * exclusive lock, so policies must not sleep in their init method.
430 * In the future, we may want to separate "init" from "start", with
431 * "init" occuring without the lock held. Likewise, on tear-down,
432 * breaking out "stop" from "destroy".
433 */
434 if (mpc->mpc_ops->mpo_init != NULL)
435 (*(mpc->mpc_ops->mpo_init))(mpc);
436 mac_policy_updateflags();
437
438 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname,
439 mpc->mpc_name);
440
441 out:
442 mac_policy_release_exclusive();
443 return (error);
444 }
445
446 static int
447 mac_policy_unregister(struct mac_policy_conf *mpc)
448 {
449
450 /*
451 * If we fail the load, we may get a request to unload. Check to see
452 * if we did the run-time registration, and if not, silently succeed.
453 */
454 mac_policy_grab_exclusive();
455 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) {
456 mac_policy_release_exclusive();
457 return (0);
458 }
459 #if 0
460 /*
461 * Don't allow unloading modules with private data.
462 */
463 if (mpc->mpc_field_off != NULL) {
464 MAC_POLICY_LIST_UNLOCK();
465 return (EBUSY);
466 }
467 #endif
468 /*
469 * Only allow the unload to proceed if the module is unloadable by
470 * its own definition.
471 */
472 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) {
473 mac_policy_release_exclusive();
474 return (EBUSY);
475 }
476 if (mpc->mpc_ops->mpo_destroy != NULL)
477 (*(mpc->mpc_ops->mpo_destroy))(mpc);
478
479 LIST_REMOVE(mpc, mpc_list);
480 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED;
481 mac_policy_updateflags();
482
483 mac_policy_release_exclusive();
484
485 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname,
486 mpc->mpc_name);
487
488 return (0);
489 }
490
491 /*
492 * Allow MAC policy modules to register during boot, etc.
493 */
494 int
495 mac_policy_modevent(module_t mod, int type, void *data)
496 {
497 struct mac_policy_conf *mpc;
498 int error;
499
500 error = 0;
501 mpc = (struct mac_policy_conf *) data;
502
503 #ifdef MAC_STATIC
504 if (mac_late) {
505 printf("mac_policy_modevent: MAC_STATIC and late\n");
506 return (EBUSY);
507 }
508 #endif
509
510 switch (type) {
511 case MOD_LOAD:
512 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE &&
513 mac_late) {
514 printf("mac_policy_modevent: can't load %s policy "
515 "after booting\n", mpc->mpc_name);
516 error = EBUSY;
517 break;
518 }
519 error = mac_policy_register(mpc);
520 break;
521 case MOD_UNLOAD:
522 /* Don't unregister the module if it was never registered. */
523 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED)
524 != 0)
525 error = mac_policy_unregister(mpc);
526 else
527 error = 0;
528 break;
529 default:
530 error = EOPNOTSUPP;
531 break;
532 }
533
534 return (error);
535 }
536
537 /*
538 * Define an error value precedence, and given two arguments, selects the
539 * value with the higher precedence.
540 */
541 int
542 mac_error_select(int error1, int error2)
543 {
544
545 /* Certain decision-making errors take top priority. */
546 if (error1 == EDEADLK || error2 == EDEADLK)
547 return (EDEADLK);
548
549 /* Invalid arguments should be reported where possible. */
550 if (error1 == EINVAL || error2 == EINVAL)
551 return (EINVAL);
552
553 /* Precedence goes to "visibility", with both process and file. */
554 if (error1 == ESRCH || error2 == ESRCH)
555 return (ESRCH);
556
557 if (error1 == ENOENT || error2 == ENOENT)
558 return (ENOENT);
559
560 /* Precedence goes to DAC/MAC protections. */
561 if (error1 == EACCES || error2 == EACCES)
562 return (EACCES);
563
564 /* Precedence goes to privilege. */
565 if (error1 == EPERM || error2 == EPERM)
566 return (EPERM);
567
568 /* Precedence goes to error over success; otherwise, arbitrary. */
569 if (error1 != 0)
570 return (error1);
571 return (error2);
572 }
573
574 int
575 mac_check_structmac_consistent(struct mac *mac)
576 {
577
578 if (mac->m_buflen < 0 ||
579 mac->m_buflen > MAC_MAX_LABEL_BUF_LEN)
580 return (EINVAL);
581
582 return (0);
583 }
584
585 SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL);
586 SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL);
Cache object: 7fbd1f4608c013b11b594d74a8255b12
|