1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Mike Karels at Berkeley Software Design, Inc.
7 *
8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9 * project, to make these variables more userfriendly.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_capsicum.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44
45 #include <sys/param.h>
46 #include <sys/fail.h>
47 #include <sys/systm.h>
48 #include <sys/capsicum.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/jail.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/rmlock.h>
58 #include <sys/sbuf.h>
59 #include <sys/sx.h>
60 #include <sys/sysproto.h>
61 #include <sys/uio.h>
62 #ifdef KTRACE
63 #include <sys/ktrace.h>
64 #endif
65
66 #include <net/vnet.h>
67
68 #include <security/mac/mac_framework.h>
69
70 #include <vm/vm.h>
71 #include <vm/vm_extern.h>
72
73 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
74 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
75 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
76
77 /*
78 * The sysctllock protects the MIB tree. It also protects sysctl
79 * contexts used with dynamic sysctls. The sysctl_register_oid() and
80 * sysctl_unregister_oid() routines require the sysctllock to already
81 * be held, so the sysctl_wlock() and sysctl_wunlock() routines are
82 * provided for the few places in the kernel which need to use that
83 * API rather than using the dynamic API. Use of the dynamic API is
84 * strongly encouraged for most code.
85 *
86 * The sysctlmemlock is used to limit the amount of user memory wired for
87 * sysctl requests. This is implemented by serializing any userland
88 * sysctl requests larger than a single page via an exclusive lock.
89 */
90 static struct rmlock sysctllock;
91 static struct sx __exclusive_cache_line sysctlmemlock;
92
93 #define SYSCTL_WLOCK() rm_wlock(&sysctllock)
94 #define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock)
95 #define SYSCTL_RLOCK(tracker) rm_rlock(&sysctllock, (tracker))
96 #define SYSCTL_RUNLOCK(tracker) rm_runlock(&sysctllock, (tracker))
97 #define SYSCTL_WLOCKED() rm_wowned(&sysctllock)
98 #define SYSCTL_ASSERT_LOCKED() rm_assert(&sysctllock, RA_LOCKED)
99 #define SYSCTL_ASSERT_WLOCKED() rm_assert(&sysctllock, RA_WLOCKED)
100 #define SYSCTL_ASSERT_RLOCKED() rm_assert(&sysctllock, RA_RLOCKED)
101 #define SYSCTL_INIT() rm_init_flags(&sysctllock, "sysctl lock", \
102 RM_SLEEPABLE)
103 #define SYSCTL_SLEEP(ch, wmesg, timo) \
104 rm_sleep(ch, &sysctllock, 0, wmesg, timo)
105
106 static int sysctl_root(SYSCTL_HANDLER_ARGS);
107
108 /* Root list */
109 struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children);
110
111 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
112 int recurse);
113 static int sysctl_old_kernel(struct sysctl_req *, const void *, size_t);
114 static int sysctl_new_kernel(struct sysctl_req *, void *, size_t);
115
116 static struct sysctl_oid *
117 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
118 {
119 struct sysctl_oid *oidp;
120
121 SYSCTL_ASSERT_LOCKED();
122 SLIST_FOREACH(oidp, list, oid_link) {
123 if (strcmp(oidp->oid_name, name) == 0) {
124 return (oidp);
125 }
126 }
127 return (NULL);
128 }
129
130 /*
131 * Initialization of the MIB tree.
132 *
133 * Order by number in each list.
134 */
135 void
136 sysctl_wlock(void)
137 {
138
139 SYSCTL_WLOCK();
140 }
141
142 void
143 sysctl_wunlock(void)
144 {
145
146 SYSCTL_WUNLOCK();
147 }
148
149 static int
150 sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2,
151 struct sysctl_req *req, struct rm_priotracker *tracker)
152 {
153 int error;
154
155 if (oid->oid_kind & CTLFLAG_DYN)
156 atomic_add_int(&oid->oid_running, 1);
157
158 if (tracker != NULL)
159 SYSCTL_RUNLOCK(tracker);
160 else
161 SYSCTL_WUNLOCK();
162
163 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
164 mtx_lock(&Giant);
165 error = oid->oid_handler(oid, arg1, arg2, req);
166 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
167 mtx_unlock(&Giant);
168
169 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
170
171 if (tracker != NULL)
172 SYSCTL_RLOCK(tracker);
173 else
174 SYSCTL_WLOCK();
175
176 if (oid->oid_kind & CTLFLAG_DYN) {
177 if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 &&
178 (oid->oid_kind & CTLFLAG_DYING) != 0)
179 wakeup(&oid->oid_running);
180 }
181
182 return (error);
183 }
184
185 static void
186 sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp)
187 {
188 struct sysctl_req req;
189 struct sysctl_oid *curr;
190 char *penv = NULL;
191 char path[96];
192 ssize_t rem = sizeof(path);
193 ssize_t len;
194 uint8_t data[512] __aligned(sizeof(uint64_t));
195 int size;
196 int error;
197
198 path[--rem] = 0;
199
200 for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) {
201 len = strlen(curr->oid_name);
202 rem -= len;
203 if (curr != oidp)
204 rem -= 1;
205 if (rem < 0) {
206 printf("OID path exceeds %d bytes\n", (int)sizeof(path));
207 return;
208 }
209 memcpy(path + rem, curr->oid_name, len);
210 if (curr != oidp)
211 path[rem + len] = '.';
212 }
213
214 memset(&req, 0, sizeof(req));
215
216 req.td = curthread;
217 req.oldfunc = sysctl_old_kernel;
218 req.newfunc = sysctl_new_kernel;
219 req.lock = REQ_UNWIRED;
220
221 switch (oidp->oid_kind & CTLTYPE) {
222 case CTLTYPE_INT:
223 if (getenv_array(path + rem, data, sizeof(data), &size,
224 sizeof(int), GETENV_SIGNED) == 0)
225 return;
226 req.newlen = size;
227 req.newptr = data;
228 break;
229 case CTLTYPE_UINT:
230 if (getenv_array(path + rem, data, sizeof(data), &size,
231 sizeof(int), GETENV_UNSIGNED) == 0)
232 return;
233 req.newlen = size;
234 req.newptr = data;
235 break;
236 case CTLTYPE_LONG:
237 if (getenv_array(path + rem, data, sizeof(data), &size,
238 sizeof(long), GETENV_SIGNED) == 0)
239 return;
240 req.newlen = size;
241 req.newptr = data;
242 break;
243 case CTLTYPE_ULONG:
244 if (getenv_array(path + rem, data, sizeof(data), &size,
245 sizeof(long), GETENV_UNSIGNED) == 0)
246 return;
247 req.newlen = size;
248 req.newptr = data;
249 break;
250 case CTLTYPE_S8:
251 if (getenv_array(path + rem, data, sizeof(data), &size,
252 sizeof(int8_t), GETENV_SIGNED) == 0)
253 return;
254 req.newlen = size;
255 req.newptr = data;
256 break;
257 case CTLTYPE_S16:
258 if (getenv_array(path + rem, data, sizeof(data), &size,
259 sizeof(int16_t), GETENV_SIGNED) == 0)
260 return;
261 req.newlen = size;
262 req.newptr = data;
263 break;
264 case CTLTYPE_S32:
265 if (getenv_array(path + rem, data, sizeof(data), &size,
266 sizeof(int32_t), GETENV_SIGNED) == 0)
267 return;
268 req.newlen = size;
269 req.newptr = data;
270 break;
271 case CTLTYPE_S64:
272 if (getenv_array(path + rem, data, sizeof(data), &size,
273 sizeof(int64_t), GETENV_SIGNED) == 0)
274 return;
275 req.newlen = size;
276 req.newptr = data;
277 break;
278 case CTLTYPE_U8:
279 if (getenv_array(path + rem, data, sizeof(data), &size,
280 sizeof(uint8_t), GETENV_UNSIGNED) == 0)
281 return;
282 req.newlen = size;
283 req.newptr = data;
284 break;
285 case CTLTYPE_U16:
286 if (getenv_array(path + rem, data, sizeof(data), &size,
287 sizeof(uint16_t), GETENV_UNSIGNED) == 0)
288 return;
289 req.newlen = size;
290 req.newptr = data;
291 break;
292 case CTLTYPE_U32:
293 if (getenv_array(path + rem, data, sizeof(data), &size,
294 sizeof(uint32_t), GETENV_UNSIGNED) == 0)
295 return;
296 req.newlen = size;
297 req.newptr = data;
298 break;
299 case CTLTYPE_U64:
300 if (getenv_array(path + rem, data, sizeof(data), &size,
301 sizeof(uint64_t), GETENV_UNSIGNED) == 0)
302 return;
303 req.newlen = size;
304 req.newptr = data;
305 break;
306 case CTLTYPE_STRING:
307 penv = kern_getenv(path + rem);
308 if (penv == NULL)
309 return;
310 req.newlen = strlen(penv);
311 req.newptr = penv;
312 break;
313 default:
314 return;
315 }
316 error = sysctl_root_handler_locked(oidp, oidp->oid_arg1,
317 oidp->oid_arg2, &req, NULL);
318 if (error != 0)
319 printf("Setting sysctl %s failed: %d\n", path + rem, error);
320 if (penv != NULL)
321 freeenv(penv);
322 }
323
324 void
325 sysctl_register_oid(struct sysctl_oid *oidp)
326 {
327 struct sysctl_oid_list *parent = oidp->oid_parent;
328 struct sysctl_oid *p;
329 struct sysctl_oid *q;
330 int oid_number;
331 int timeout = 2;
332
333 /*
334 * First check if another oid with the same name already
335 * exists in the parent's list.
336 */
337 SYSCTL_ASSERT_WLOCKED();
338 p = sysctl_find_oidname(oidp->oid_name, parent);
339 if (p != NULL) {
340 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
341 p->oid_refcnt++;
342 return;
343 } else {
344 printf("can't re-use a leaf (%s)!\n", p->oid_name);
345 return;
346 }
347 }
348 /* get current OID number */
349 oid_number = oidp->oid_number;
350
351 #if (OID_AUTO >= 0)
352 #error "OID_AUTO is expected to be a negative value"
353 #endif
354 /*
355 * Any negative OID number qualifies as OID_AUTO. Valid OID
356 * numbers should always be positive.
357 *
358 * NOTE: DO NOT change the starting value here, change it in
359 * <sys/sysctl.h>, and make sure it is at least 256 to
360 * accommodate e.g. net.inet.raw as a static sysctl node.
361 */
362 if (oid_number < 0) {
363 static int newoid;
364
365 /*
366 * By decrementing the next OID number we spend less
367 * time inserting the OIDs into a sorted list.
368 */
369 if (--newoid < CTL_AUTO_START)
370 newoid = 0x7fffffff;
371
372 oid_number = newoid;
373 }
374
375 /*
376 * Insert the OID into the parent's list sorted by OID number.
377 */
378 retry:
379 q = NULL;
380 SLIST_FOREACH(p, parent, oid_link) {
381 /* check if the current OID number is in use */
382 if (oid_number == p->oid_number) {
383 /* get the next valid OID number */
384 if (oid_number < CTL_AUTO_START ||
385 oid_number == 0x7fffffff) {
386 /* wraparound - restart */
387 oid_number = CTL_AUTO_START;
388 /* don't loop forever */
389 if (!timeout--)
390 panic("sysctl: Out of OID numbers\n");
391 goto retry;
392 } else {
393 oid_number++;
394 }
395 } else if (oid_number < p->oid_number)
396 break;
397 q = p;
398 }
399 /* check for non-auto OID number collision */
400 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
401 oid_number >= CTL_AUTO_START) {
402 printf("sysctl: OID number(%d) is already in use for '%s'\n",
403 oidp->oid_number, oidp->oid_name);
404 }
405 /* update the OID number, if any */
406 oidp->oid_number = oid_number;
407 if (q != NULL)
408 SLIST_INSERT_AFTER(q, oidp, oid_link);
409 else
410 SLIST_INSERT_HEAD(parent, oidp, oid_link);
411
412 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE &&
413 #ifdef VIMAGE
414 (oidp->oid_kind & CTLFLAG_VNET) == 0 &&
415 #endif
416 (oidp->oid_kind & CTLFLAG_TUN) != 0 &&
417 (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) {
418 /* only fetch value once */
419 oidp->oid_kind |= CTLFLAG_NOFETCH;
420 /* try to fetch value from kernel environment */
421 sysctl_load_tunable_by_oid_locked(oidp);
422 }
423 }
424
425 void
426 sysctl_register_disabled_oid(struct sysctl_oid *oidp)
427 {
428
429 /*
430 * Mark the leaf as dormant if it's not to be immediately enabled.
431 * We do not disable nodes as they can be shared between modules
432 * and it is always safe to access a node.
433 */
434 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0,
435 ("internal flag is set in oid_kind"));
436 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
437 oidp->oid_kind |= CTLFLAG_DORMANT;
438 sysctl_register_oid(oidp);
439 }
440
441 void
442 sysctl_enable_oid(struct sysctl_oid *oidp)
443 {
444
445 SYSCTL_ASSERT_WLOCKED();
446 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
447 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0,
448 ("sysctl node is marked as dormant"));
449 return;
450 }
451 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) != 0,
452 ("enabling already enabled sysctl oid"));
453 oidp->oid_kind &= ~CTLFLAG_DORMANT;
454 }
455
456 void
457 sysctl_unregister_oid(struct sysctl_oid *oidp)
458 {
459 struct sysctl_oid *p;
460 int error;
461
462 SYSCTL_ASSERT_WLOCKED();
463 if (oidp->oid_number == OID_AUTO) {
464 error = EINVAL;
465 } else {
466 error = ENOENT;
467 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
468 if (p == oidp) {
469 SLIST_REMOVE(oidp->oid_parent, oidp,
470 sysctl_oid, oid_link);
471 error = 0;
472 break;
473 }
474 }
475 }
476
477 /*
478 * This can happen when a module fails to register and is
479 * being unloaded afterwards. It should not be a panic()
480 * for normal use.
481 */
482 if (error) {
483 printf("%s: failed(%d) to unregister sysctl(%s)\n",
484 __func__, error, oidp->oid_name);
485 }
486 }
487
488 /* Initialize a new context to keep track of dynamically added sysctls. */
489 int
490 sysctl_ctx_init(struct sysctl_ctx_list *c)
491 {
492
493 if (c == NULL) {
494 return (EINVAL);
495 }
496
497 /*
498 * No locking here, the caller is responsible for not adding
499 * new nodes to a context until after this function has
500 * returned.
501 */
502 TAILQ_INIT(c);
503 return (0);
504 }
505
506 /* Free the context, and destroy all dynamic oids registered in this context */
507 int
508 sysctl_ctx_free(struct sysctl_ctx_list *clist)
509 {
510 struct sysctl_ctx_entry *e, *e1;
511 int error;
512
513 error = 0;
514 /*
515 * First perform a "dry run" to check if it's ok to remove oids.
516 * XXX FIXME
517 * XXX This algorithm is a hack. But I don't know any
518 * XXX better solution for now...
519 */
520 SYSCTL_WLOCK();
521 TAILQ_FOREACH(e, clist, link) {
522 error = sysctl_remove_oid_locked(e->entry, 0, 0);
523 if (error)
524 break;
525 }
526 /*
527 * Restore deregistered entries, either from the end,
528 * or from the place where error occurred.
529 * e contains the entry that was not unregistered
530 */
531 if (error)
532 e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
533 else
534 e1 = TAILQ_LAST(clist, sysctl_ctx_list);
535 while (e1 != NULL) {
536 sysctl_register_oid(e1->entry);
537 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
538 }
539 if (error) {
540 SYSCTL_WUNLOCK();
541 return(EBUSY);
542 }
543 /* Now really delete the entries */
544 e = TAILQ_FIRST(clist);
545 while (e != NULL) {
546 e1 = TAILQ_NEXT(e, link);
547 error = sysctl_remove_oid_locked(e->entry, 1, 0);
548 if (error)
549 panic("sysctl_remove_oid: corrupt tree, entry: %s",
550 e->entry->oid_name);
551 free(e, M_SYSCTLOID);
552 e = e1;
553 }
554 SYSCTL_WUNLOCK();
555 return (error);
556 }
557
558 /* Add an entry to the context */
559 struct sysctl_ctx_entry *
560 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
561 {
562 struct sysctl_ctx_entry *e;
563
564 SYSCTL_ASSERT_WLOCKED();
565 if (clist == NULL || oidp == NULL)
566 return(NULL);
567 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
568 e->entry = oidp;
569 TAILQ_INSERT_HEAD(clist, e, link);
570 return (e);
571 }
572
573 /* Find an entry in the context */
574 struct sysctl_ctx_entry *
575 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
576 {
577 struct sysctl_ctx_entry *e;
578
579 SYSCTL_ASSERT_WLOCKED();
580 if (clist == NULL || oidp == NULL)
581 return(NULL);
582 TAILQ_FOREACH(e, clist, link) {
583 if(e->entry == oidp)
584 return(e);
585 }
586 return (e);
587 }
588
589 /*
590 * Delete an entry from the context.
591 * NOTE: this function doesn't free oidp! You have to remove it
592 * with sysctl_remove_oid().
593 */
594 int
595 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
596 {
597 struct sysctl_ctx_entry *e;
598
599 if (clist == NULL || oidp == NULL)
600 return (EINVAL);
601 SYSCTL_WLOCK();
602 e = sysctl_ctx_entry_find(clist, oidp);
603 if (e != NULL) {
604 TAILQ_REMOVE(clist, e, link);
605 SYSCTL_WUNLOCK();
606 free(e, M_SYSCTLOID);
607 return (0);
608 } else {
609 SYSCTL_WUNLOCK();
610 return (ENOENT);
611 }
612 }
613
614 /*
615 * Remove dynamically created sysctl trees.
616 * oidp - top of the tree to be removed
617 * del - if 0 - just deregister, otherwise free up entries as well
618 * recurse - if != 0 traverse the subtree to be deleted
619 */
620 int
621 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
622 {
623 int error;
624
625 SYSCTL_WLOCK();
626 error = sysctl_remove_oid_locked(oidp, del, recurse);
627 SYSCTL_WUNLOCK();
628 return (error);
629 }
630
631 int
632 sysctl_remove_name(struct sysctl_oid *parent, const char *name,
633 int del, int recurse)
634 {
635 struct sysctl_oid *p, *tmp;
636 int error;
637
638 error = ENOENT;
639 SYSCTL_WLOCK();
640 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
641 if (strcmp(p->oid_name, name) == 0) {
642 error = sysctl_remove_oid_locked(p, del, recurse);
643 break;
644 }
645 }
646 SYSCTL_WUNLOCK();
647
648 return (error);
649 }
650
651
652 static int
653 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
654 {
655 struct sysctl_oid *p, *tmp;
656 int error;
657
658 SYSCTL_ASSERT_WLOCKED();
659 if (oidp == NULL)
660 return(EINVAL);
661 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
662 printf("Warning: can't remove non-dynamic nodes (%s)!\n",
663 oidp->oid_name);
664 return (EINVAL);
665 }
666 /*
667 * WARNING: normal method to do this should be through
668 * sysctl_ctx_free(). Use recursing as the last resort
669 * method to purge your sysctl tree of leftovers...
670 * However, if some other code still references these nodes,
671 * it will panic.
672 */
673 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
674 if (oidp->oid_refcnt == 1) {
675 SLIST_FOREACH_SAFE(p,
676 SYSCTL_CHILDREN(oidp), oid_link, tmp) {
677 if (!recurse) {
678 printf("Warning: failed attempt to "
679 "remove oid %s with child %s\n",
680 oidp->oid_name, p->oid_name);
681 return (ENOTEMPTY);
682 }
683 error = sysctl_remove_oid_locked(p, del,
684 recurse);
685 if (error)
686 return (error);
687 }
688 }
689 }
690 if (oidp->oid_refcnt > 1 ) {
691 oidp->oid_refcnt--;
692 } else {
693 if (oidp->oid_refcnt == 0) {
694 printf("Warning: bad oid_refcnt=%u (%s)!\n",
695 oidp->oid_refcnt, oidp->oid_name);
696 return (EINVAL);
697 }
698 sysctl_unregister_oid(oidp);
699 if (del) {
700 /*
701 * Wait for all threads running the handler to drain.
702 * This preserves the previous behavior when the
703 * sysctl lock was held across a handler invocation,
704 * and is necessary for module unload correctness.
705 */
706 while (oidp->oid_running > 0) {
707 oidp->oid_kind |= CTLFLAG_DYING;
708 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
709 }
710 if (oidp->oid_descr)
711 free(__DECONST(char *, oidp->oid_descr),
712 M_SYSCTLOID);
713 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
714 free(oidp, M_SYSCTLOID);
715 }
716 }
717 return (0);
718 }
719 /*
720 * Create new sysctls at run time.
721 * clist may point to a valid context initialized with sysctl_ctx_init().
722 */
723 struct sysctl_oid *
724 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
725 int number, const char *name, int kind, void *arg1, intmax_t arg2,
726 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
727 {
728 struct sysctl_oid *oidp;
729
730 /* You have to hook up somewhere.. */
731 if (parent == NULL)
732 return(NULL);
733 /* Check if the node already exists, otherwise create it */
734 SYSCTL_WLOCK();
735 oidp = sysctl_find_oidname(name, parent);
736 if (oidp != NULL) {
737 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
738 oidp->oid_refcnt++;
739 /* Update the context */
740 if (clist != NULL)
741 sysctl_ctx_entry_add(clist, oidp);
742 SYSCTL_WUNLOCK();
743 return (oidp);
744 } else {
745 SYSCTL_WUNLOCK();
746 printf("can't re-use a leaf (%s)!\n", name);
747 return (NULL);
748 }
749 }
750 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
751 oidp->oid_parent = parent;
752 SLIST_INIT(&oidp->oid_children);
753 oidp->oid_number = number;
754 oidp->oid_refcnt = 1;
755 oidp->oid_name = strdup(name, M_SYSCTLOID);
756 oidp->oid_handler = handler;
757 oidp->oid_kind = CTLFLAG_DYN | kind;
758 oidp->oid_arg1 = arg1;
759 oidp->oid_arg2 = arg2;
760 oidp->oid_fmt = fmt;
761 if (descr != NULL)
762 oidp->oid_descr = strdup(descr, M_SYSCTLOID);
763 /* Update the context, if used */
764 if (clist != NULL)
765 sysctl_ctx_entry_add(clist, oidp);
766 /* Register this oid */
767 sysctl_register_oid(oidp);
768 SYSCTL_WUNLOCK();
769 return (oidp);
770 }
771
772 /*
773 * Rename an existing oid.
774 */
775 void
776 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
777 {
778 char *newname;
779 char *oldname;
780
781 newname = strdup(name, M_SYSCTLOID);
782 SYSCTL_WLOCK();
783 oldname = __DECONST(char *, oidp->oid_name);
784 oidp->oid_name = newname;
785 SYSCTL_WUNLOCK();
786 free(oldname, M_SYSCTLOID);
787 }
788
789 /*
790 * Reparent an existing oid.
791 */
792 int
793 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
794 {
795 struct sysctl_oid *oidp;
796
797 SYSCTL_WLOCK();
798 if (oid->oid_parent == parent) {
799 SYSCTL_WUNLOCK();
800 return (0);
801 }
802 oidp = sysctl_find_oidname(oid->oid_name, parent);
803 if (oidp != NULL) {
804 SYSCTL_WUNLOCK();
805 return (EEXIST);
806 }
807 sysctl_unregister_oid(oid);
808 oid->oid_parent = parent;
809 oid->oid_number = OID_AUTO;
810 sysctl_register_oid(oid);
811 SYSCTL_WUNLOCK();
812 return (0);
813 }
814
815 /*
816 * Register the kernel's oids on startup.
817 */
818 SET_DECLARE(sysctl_set, struct sysctl_oid);
819
820 static void
821 sysctl_register_all(void *arg)
822 {
823 struct sysctl_oid **oidp;
824
825 sx_init(&sysctlmemlock, "sysctl mem");
826 SYSCTL_INIT();
827 SYSCTL_WLOCK();
828 SET_FOREACH(oidp, sysctl_set)
829 sysctl_register_oid(*oidp);
830 SYSCTL_WUNLOCK();
831 }
832 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, NULL);
833
834 /*
835 * "Staff-functions"
836 *
837 * These functions implement a presently undocumented interface
838 * used by the sysctl program to walk the tree, and get the type
839 * so it can print the value.
840 * This interface is under work and consideration, and should probably
841 * be killed with a big axe by the first person who can find the time.
842 * (be aware though, that the proper interface isn't as obvious as it
843 * may seem, there are various conflicting requirements.
844 *
845 * {0,0} printf the entire MIB-tree.
846 * {0,1,...} return the name of the "..." OID.
847 * {0,2,...} return the next OID.
848 * {0,3} return the OID of the name in "new"
849 * {0,4,...} return the kind & format info for the "..." OID.
850 * {0,5,...} return the description the "..." OID.
851 */
852
853 #ifdef SYSCTL_DEBUG
854 static void
855 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
856 {
857 int k;
858 struct sysctl_oid *oidp;
859
860 SYSCTL_ASSERT_LOCKED();
861 SLIST_FOREACH(oidp, l, oid_link) {
862
863 for (k=0; k<i; k++)
864 printf(" ");
865
866 printf("%d %s ", oidp->oid_number, oidp->oid_name);
867
868 printf("%c%c",
869 oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
870 oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
871
872 if (oidp->oid_handler)
873 printf(" *Handler");
874
875 switch (oidp->oid_kind & CTLTYPE) {
876 case CTLTYPE_NODE:
877 printf(" Node\n");
878 if (!oidp->oid_handler) {
879 sysctl_sysctl_debug_dump_node(
880 SYSCTL_CHILDREN(oidp), i + 2);
881 }
882 break;
883 case CTLTYPE_INT: printf(" Int\n"); break;
884 case CTLTYPE_UINT: printf(" u_int\n"); break;
885 case CTLTYPE_LONG: printf(" Long\n"); break;
886 case CTLTYPE_ULONG: printf(" u_long\n"); break;
887 case CTLTYPE_STRING: printf(" String\n"); break;
888 case CTLTYPE_S8: printf(" int8_t\n"); break;
889 case CTLTYPE_S16: printf(" int16_t\n"); break;
890 case CTLTYPE_S32: printf(" int32_t\n"); break;
891 case CTLTYPE_S64: printf(" int64_t\n"); break;
892 case CTLTYPE_U8: printf(" uint8_t\n"); break;
893 case CTLTYPE_U16: printf(" uint16_t\n"); break;
894 case CTLTYPE_U32: printf(" uint32_t\n"); break;
895 case CTLTYPE_U64: printf(" uint64_t\n"); break;
896 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
897 default: printf("\n");
898 }
899
900 }
901 }
902
903 static int
904 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
905 {
906 struct rm_priotracker tracker;
907 int error;
908
909 error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
910 if (error)
911 return (error);
912 SYSCTL_RLOCK(&tracker);
913 sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
914 SYSCTL_RUNLOCK(&tracker);
915 return (ENOENT);
916 }
917
918 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE,
919 0, 0, sysctl_sysctl_debug, "-", "");
920 #endif
921
922 static int
923 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
924 {
925 int *name = (int *) arg1;
926 u_int namelen = arg2;
927 int error = 0;
928 struct sysctl_oid *oid;
929 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
930 struct rm_priotracker tracker;
931 char buf[10];
932
933 SYSCTL_RLOCK(&tracker);
934 while (namelen) {
935 if (!lsp) {
936 snprintf(buf,sizeof(buf),"%d",*name);
937 if (req->oldidx)
938 error = SYSCTL_OUT(req, ".", 1);
939 if (!error)
940 error = SYSCTL_OUT(req, buf, strlen(buf));
941 if (error)
942 goto out;
943 namelen--;
944 name++;
945 continue;
946 }
947 lsp2 = NULL;
948 SLIST_FOREACH(oid, lsp, oid_link) {
949 if (oid->oid_number != *name)
950 continue;
951
952 if (req->oldidx)
953 error = SYSCTL_OUT(req, ".", 1);
954 if (!error)
955 error = SYSCTL_OUT(req, oid->oid_name,
956 strlen(oid->oid_name));
957 if (error)
958 goto out;
959
960 namelen--;
961 name++;
962
963 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
964 break;
965
966 if (oid->oid_handler)
967 break;
968
969 lsp2 = SYSCTL_CHILDREN(oid);
970 break;
971 }
972 lsp = lsp2;
973 }
974 error = SYSCTL_OUT(req, "", 1);
975 out:
976 SYSCTL_RUNLOCK(&tracker);
977 return (error);
978 }
979
980 /*
981 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
982 * capability mode.
983 */
984 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
985 sysctl_sysctl_name, "");
986
987 static int
988 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
989 int *next, int *len, int level, struct sysctl_oid **oidpp)
990 {
991 struct sysctl_oid *oidp;
992
993 SYSCTL_ASSERT_LOCKED();
994 *len = level;
995 SLIST_FOREACH(oidp, lsp, oid_link) {
996 *next = oidp->oid_number;
997 *oidpp = oidp;
998
999 if ((oidp->oid_kind & (CTLFLAG_SKIP | CTLFLAG_DORMANT)) != 0)
1000 continue;
1001
1002 if (!namelen) {
1003 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1004 return (0);
1005 if (oidp->oid_handler)
1006 /* We really should call the handler here...*/
1007 return (0);
1008 lsp = SYSCTL_CHILDREN(oidp);
1009 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
1010 len, level+1, oidpp))
1011 return (0);
1012 goto emptynode;
1013 }
1014
1015 if (oidp->oid_number < *name)
1016 continue;
1017
1018 if (oidp->oid_number > *name) {
1019 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1020 return (0);
1021 if (oidp->oid_handler)
1022 return (0);
1023 lsp = SYSCTL_CHILDREN(oidp);
1024 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
1025 next+1, len, level+1, oidpp))
1026 return (0);
1027 goto next;
1028 }
1029 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1030 continue;
1031
1032 if (oidp->oid_handler)
1033 continue;
1034
1035 lsp = SYSCTL_CHILDREN(oidp);
1036 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
1037 len, level+1, oidpp))
1038 return (0);
1039 next:
1040 namelen = 1;
1041 emptynode:
1042 *len = level;
1043 }
1044 return (1);
1045 }
1046
1047 static int
1048 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
1049 {
1050 int *name = (int *) arg1;
1051 u_int namelen = arg2;
1052 int i, j, error;
1053 struct sysctl_oid *oid;
1054 struct sysctl_oid_list *lsp = &sysctl__children;
1055 struct rm_priotracker tracker;
1056 int newoid[CTL_MAXNAME];
1057
1058 SYSCTL_RLOCK(&tracker);
1059 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
1060 SYSCTL_RUNLOCK(&tracker);
1061 if (i)
1062 return (ENOENT);
1063 error = SYSCTL_OUT(req, newoid, j * sizeof (int));
1064 return (error);
1065 }
1066
1067 /*
1068 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
1069 * capability mode.
1070 */
1071 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
1072 sysctl_sysctl_next, "");
1073
1074 static int
1075 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
1076 {
1077 struct sysctl_oid *oidp;
1078 struct sysctl_oid_list *lsp = &sysctl__children;
1079 char *p;
1080
1081 SYSCTL_ASSERT_LOCKED();
1082
1083 for (*len = 0; *len < CTL_MAXNAME;) {
1084 p = strsep(&name, ".");
1085
1086 oidp = SLIST_FIRST(lsp);
1087 for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
1088 if (oidp == NULL)
1089 return (ENOENT);
1090 if (strcmp(p, oidp->oid_name) == 0)
1091 break;
1092 }
1093 *oid++ = oidp->oid_number;
1094 (*len)++;
1095
1096 if (name == NULL || *name == '\0') {
1097 if (oidpp)
1098 *oidpp = oidp;
1099 return (0);
1100 }
1101
1102 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1103 break;
1104
1105 if (oidp->oid_handler)
1106 break;
1107
1108 lsp = SYSCTL_CHILDREN(oidp);
1109 }
1110 return (ENOENT);
1111 }
1112
1113 static int
1114 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
1115 {
1116 char *p;
1117 int error, oid[CTL_MAXNAME], len = 0;
1118 struct sysctl_oid *op = NULL;
1119 struct rm_priotracker tracker;
1120 char buf[32];
1121
1122 if (!req->newlen)
1123 return (ENOENT);
1124 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
1125 return (ENAMETOOLONG);
1126
1127 p = buf;
1128 if (req->newlen >= sizeof(buf))
1129 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
1130
1131 error = SYSCTL_IN(req, p, req->newlen);
1132 if (error) {
1133 if (p != buf)
1134 free(p, M_SYSCTL);
1135 return (error);
1136 }
1137
1138 p [req->newlen] = '\0';
1139
1140 SYSCTL_RLOCK(&tracker);
1141 error = name2oid(p, oid, &len, &op);
1142 SYSCTL_RUNLOCK(&tracker);
1143
1144 if (p != buf)
1145 free(p, M_SYSCTL);
1146
1147 if (error)
1148 return (error);
1149
1150 error = SYSCTL_OUT(req, oid, len * sizeof *oid);
1151 return (error);
1152 }
1153
1154 /*
1155 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
1156 * capability mode.
1157 */
1158 SYSCTL_PROC(_sysctl, 3, name2oid,
1159 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
1160 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
1161
1162 static int
1163 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
1164 {
1165 struct sysctl_oid *oid;
1166 struct rm_priotracker tracker;
1167 int error;
1168
1169 SYSCTL_RLOCK(&tracker);
1170 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1171 if (error)
1172 goto out;
1173
1174 if (oid->oid_fmt == NULL) {
1175 error = ENOENT;
1176 goto out;
1177 }
1178 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
1179 if (error)
1180 goto out;
1181 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
1182 out:
1183 SYSCTL_RUNLOCK(&tracker);
1184 return (error);
1185 }
1186
1187
1188 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1189 sysctl_sysctl_oidfmt, "");
1190
1191 static int
1192 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
1193 {
1194 struct sysctl_oid *oid;
1195 struct rm_priotracker tracker;
1196 int error;
1197
1198 SYSCTL_RLOCK(&tracker);
1199 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1200 if (error)
1201 goto out;
1202
1203 if (oid->oid_descr == NULL) {
1204 error = ENOENT;
1205 goto out;
1206 }
1207 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
1208 out:
1209 SYSCTL_RUNLOCK(&tracker);
1210 return (error);
1211 }
1212
1213 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1214 sysctl_sysctl_oiddescr, "");
1215
1216 /*
1217 * Default "handler" functions.
1218 */
1219
1220 /*
1221 * Handle a bool.
1222 * Two cases:
1223 * a variable: point arg1 at it.
1224 * a constant: pass it in arg2.
1225 */
1226
1227 int
1228 sysctl_handle_bool(SYSCTL_HANDLER_ARGS)
1229 {
1230 uint8_t temp;
1231 int error;
1232
1233 /*
1234 * Attempt to get a coherent snapshot by making a copy of the data.
1235 */
1236 if (arg1)
1237 temp = *(bool *)arg1 ? 1 : 0;
1238 else
1239 temp = arg2 ? 1 : 0;
1240
1241 error = SYSCTL_OUT(req, &temp, sizeof(temp));
1242 if (error || !req->newptr)
1243 return (error);
1244
1245 if (!arg1)
1246 error = EPERM;
1247 else {
1248 error = SYSCTL_IN(req, &temp, sizeof(temp));
1249 if (!error)
1250 *(bool *)arg1 = temp ? 1 : 0;
1251 }
1252 return (error);
1253 }
1254
1255 /*
1256 * Handle an int8_t, signed or unsigned.
1257 * Two cases:
1258 * a variable: point arg1 at it.
1259 * a constant: pass it in arg2.
1260 */
1261
1262 int
1263 sysctl_handle_8(SYSCTL_HANDLER_ARGS)
1264 {
1265 int8_t tmpout;
1266 int error = 0;
1267
1268 /*
1269 * Attempt to get a coherent snapshot by making a copy of the data.
1270 */
1271 if (arg1)
1272 tmpout = *(int8_t *)arg1;
1273 else
1274 tmpout = arg2;
1275 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1276
1277 if (error || !req->newptr)
1278 return (error);
1279
1280 if (!arg1)
1281 error = EPERM;
1282 else
1283 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1284 return (error);
1285 }
1286
1287 /*
1288 * Handle an int16_t, signed or unsigned.
1289 * Two cases:
1290 * a variable: point arg1 at it.
1291 * a constant: pass it in arg2.
1292 */
1293
1294 int
1295 sysctl_handle_16(SYSCTL_HANDLER_ARGS)
1296 {
1297 int16_t tmpout;
1298 int error = 0;
1299
1300 /*
1301 * Attempt to get a coherent snapshot by making a copy of the data.
1302 */
1303 if (arg1)
1304 tmpout = *(int16_t *)arg1;
1305 else
1306 tmpout = arg2;
1307 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1308
1309 if (error || !req->newptr)
1310 return (error);
1311
1312 if (!arg1)
1313 error = EPERM;
1314 else
1315 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1316 return (error);
1317 }
1318
1319 /*
1320 * Handle an int32_t, signed or unsigned.
1321 * Two cases:
1322 * a variable: point arg1 at it.
1323 * a constant: pass it in arg2.
1324 */
1325
1326 int
1327 sysctl_handle_32(SYSCTL_HANDLER_ARGS)
1328 {
1329 int32_t tmpout;
1330 int error = 0;
1331
1332 /*
1333 * Attempt to get a coherent snapshot by making a copy of the data.
1334 */
1335 if (arg1)
1336 tmpout = *(int32_t *)arg1;
1337 else
1338 tmpout = arg2;
1339 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
1340
1341 if (error || !req->newptr)
1342 return (error);
1343
1344 if (!arg1)
1345 error = EPERM;
1346 else
1347 error = SYSCTL_IN(req, arg1, sizeof(tmpout));
1348 return (error);
1349 }
1350
1351 /*
1352 * Handle an int, signed or unsigned.
1353 * Two cases:
1354 * a variable: point arg1 at it.
1355 * a constant: pass it in arg2.
1356 */
1357
1358 int
1359 sysctl_handle_int(SYSCTL_HANDLER_ARGS)
1360 {
1361 int tmpout, error = 0;
1362
1363 /*
1364 * Attempt to get a coherent snapshot by making a copy of the data.
1365 */
1366 if (arg1)
1367 tmpout = *(int *)arg1;
1368 else
1369 tmpout = arg2;
1370 error = SYSCTL_OUT(req, &tmpout, sizeof(int));
1371
1372 if (error || !req->newptr)
1373 return (error);
1374
1375 if (!arg1)
1376 error = EPERM;
1377 else
1378 error = SYSCTL_IN(req, arg1, sizeof(int));
1379 return (error);
1380 }
1381
1382 /*
1383 * Based on on sysctl_handle_int() convert milliseconds into ticks.
1384 * Note: this is used by TCP.
1385 */
1386
1387 int
1388 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
1389 {
1390 int error, s, tt;
1391
1392 tt = *(int *)arg1;
1393 s = (int)((int64_t)tt * 1000 / hz);
1394
1395 error = sysctl_handle_int(oidp, &s, 0, req);
1396 if (error || !req->newptr)
1397 return (error);
1398
1399 tt = (int)((int64_t)s * hz / 1000);
1400 if (tt < 1)
1401 return (EINVAL);
1402
1403 *(int *)arg1 = tt;
1404 return (0);
1405 }
1406
1407
1408 /*
1409 * Handle a long, signed or unsigned.
1410 * Two cases:
1411 * a variable: point arg1 at it.
1412 * a constant: pass it in arg2.
1413 */
1414
1415 int
1416 sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1417 {
1418 int error = 0;
1419 long tmplong;
1420 #ifdef SCTL_MASK32
1421 int tmpint;
1422 #endif
1423
1424 /*
1425 * Attempt to get a coherent snapshot by making a copy of the data.
1426 */
1427 if (arg1)
1428 tmplong = *(long *)arg1;
1429 else
1430 tmplong = arg2;
1431 #ifdef SCTL_MASK32
1432 if (req->flags & SCTL_MASK32) {
1433 tmpint = tmplong;
1434 error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1435 } else
1436 #endif
1437 error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1438
1439 if (error || !req->newptr)
1440 return (error);
1441
1442 if (!arg1)
1443 error = EPERM;
1444 #ifdef SCTL_MASK32
1445 else if (req->flags & SCTL_MASK32) {
1446 error = SYSCTL_IN(req, &tmpint, sizeof(int));
1447 *(long *)arg1 = (long)tmpint;
1448 }
1449 #endif
1450 else
1451 error = SYSCTL_IN(req, arg1, sizeof(long));
1452 return (error);
1453 }
1454
1455 /*
1456 * Handle a 64 bit int, signed or unsigned.
1457 * Two cases:
1458 * a variable: point arg1 at it.
1459 * a constant: pass it in arg2.
1460 */
1461 int
1462 sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1463 {
1464 int error = 0;
1465 uint64_t tmpout;
1466
1467 /*
1468 * Attempt to get a coherent snapshot by making a copy of the data.
1469 */
1470 if (arg1)
1471 tmpout = *(uint64_t *)arg1;
1472 else
1473 tmpout = arg2;
1474 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1475
1476 if (error || !req->newptr)
1477 return (error);
1478
1479 if (!arg1)
1480 error = EPERM;
1481 else
1482 error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1483 return (error);
1484 }
1485
1486 /*
1487 * Handle our generic '\0' terminated 'C' string.
1488 * Two cases:
1489 * a variable string: point arg1 at it, arg2 is max length.
1490 * a constant string: point arg1 at it, arg2 is zero.
1491 */
1492
1493 int
1494 sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1495 {
1496 size_t outlen;
1497 int error = 0, ro_string = 0;
1498
1499 /*
1500 * A zero-length buffer indicates a fixed size read-only
1501 * string:
1502 */
1503 if (arg2 == 0) {
1504 arg2 = strlen((char *)arg1) + 1;
1505 ro_string = 1;
1506 }
1507
1508 if (req->oldptr != NULL) {
1509 char *tmparg;
1510
1511 if (ro_string) {
1512 tmparg = arg1;
1513 } else {
1514 /* try to make a coherent snapshot of the string */
1515 tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK);
1516 memcpy(tmparg, arg1, arg2);
1517 }
1518
1519 outlen = strnlen(tmparg, arg2 - 1) + 1;
1520 error = SYSCTL_OUT(req, tmparg, outlen);
1521
1522 if (!ro_string)
1523 free(tmparg, M_SYSCTLTMP);
1524 } else {
1525 outlen = strnlen((char *)arg1, arg2 - 1) + 1;
1526 error = SYSCTL_OUT(req, NULL, outlen);
1527 }
1528 if (error || !req->newptr)
1529 return (error);
1530
1531 if ((req->newlen - req->newidx) >= arg2) {
1532 error = EINVAL;
1533 } else {
1534 arg2 = (req->newlen - req->newidx);
1535 error = SYSCTL_IN(req, arg1, arg2);
1536 ((char *)arg1)[arg2] = '\0';
1537 }
1538 return (error);
1539 }
1540
1541 /*
1542 * Handle any kind of opaque data.
1543 * arg1 points to it, arg2 is the size.
1544 */
1545
1546 int
1547 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1548 {
1549 int error, tries;
1550 u_int generation;
1551 struct sysctl_req req2;
1552
1553 /*
1554 * Attempt to get a coherent snapshot, by using the thread
1555 * pre-emption counter updated from within mi_switch() to
1556 * determine if we were pre-empted during a bcopy() or
1557 * copyout(). Make 3 attempts at doing this before giving up.
1558 * If we encounter an error, stop immediately.
1559 */
1560 tries = 0;
1561 req2 = *req;
1562 retry:
1563 generation = curthread->td_generation;
1564 error = SYSCTL_OUT(req, arg1, arg2);
1565 if (error)
1566 return (error);
1567 tries++;
1568 if (generation != curthread->td_generation && tries < 3) {
1569 *req = req2;
1570 goto retry;
1571 }
1572
1573 error = SYSCTL_IN(req, arg1, arg2);
1574
1575 return (error);
1576 }
1577
1578 /*
1579 * Convert seconds to a struct timeval. Intended for use with
1580 * intervals and thus does not permit negative seconds.
1581 */
1582 int
1583 sysctl_sec_to_timeval(SYSCTL_HANDLER_ARGS)
1584 {
1585 struct timeval *tv;
1586 int error, secs;
1587
1588 tv = arg1;
1589 secs = tv->tv_sec;
1590
1591 error = sysctl_handle_int(oidp, &secs, 0, req);
1592 if (error || req->newptr == NULL)
1593 return (error);
1594
1595 if (secs < 0)
1596 return (EINVAL);
1597 tv->tv_sec = secs;
1598
1599 return (0);
1600 }
1601
1602 /*
1603 * Transfer functions to/from kernel space.
1604 * XXX: rather untested at this point
1605 */
1606 static int
1607 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1608 {
1609 size_t i = 0;
1610
1611 if (req->oldptr) {
1612 i = l;
1613 if (req->oldlen <= req->oldidx)
1614 i = 0;
1615 else
1616 if (i > req->oldlen - req->oldidx)
1617 i = req->oldlen - req->oldidx;
1618 if (i > 0)
1619 bcopy(p, (char *)req->oldptr + req->oldidx, i);
1620 }
1621 req->oldidx += l;
1622 if (req->oldptr && i != l)
1623 return (ENOMEM);
1624 return (0);
1625 }
1626
1627 static int
1628 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1629 {
1630 if (!req->newptr)
1631 return (0);
1632 if (req->newlen - req->newidx < l)
1633 return (EINVAL);
1634 bcopy((char *)req->newptr + req->newidx, p, l);
1635 req->newidx += l;
1636 return (0);
1637 }
1638
1639 int
1640 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1641 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1642 {
1643 int error = 0;
1644 struct sysctl_req req;
1645
1646 bzero(&req, sizeof req);
1647
1648 req.td = td;
1649 req.flags = flags;
1650
1651 if (oldlenp) {
1652 req.oldlen = *oldlenp;
1653 }
1654 req.validlen = req.oldlen;
1655
1656 if (old) {
1657 req.oldptr= old;
1658 }
1659
1660 if (new != NULL) {
1661 req.newlen = newlen;
1662 req.newptr = new;
1663 }
1664
1665 req.oldfunc = sysctl_old_kernel;
1666 req.newfunc = sysctl_new_kernel;
1667 req.lock = REQ_UNWIRED;
1668
1669 error = sysctl_root(0, name, namelen, &req);
1670
1671 if (req.lock == REQ_WIRED && req.validlen > 0)
1672 vsunlock(req.oldptr, req.validlen);
1673
1674 if (error && error != ENOMEM)
1675 return (error);
1676
1677 if (retval) {
1678 if (req.oldptr && req.oldidx > req.validlen)
1679 *retval = req.validlen;
1680 else
1681 *retval = req.oldidx;
1682 }
1683 return (error);
1684 }
1685
1686 int
1687 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1688 void *new, size_t newlen, size_t *retval, int flags)
1689 {
1690 int oid[CTL_MAXNAME];
1691 size_t oidlen, plen;
1692 int error;
1693
1694 oid[0] = 0; /* sysctl internal magic */
1695 oid[1] = 3; /* name2oid */
1696 oidlen = sizeof(oid);
1697
1698 error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1699 (void *)name, strlen(name), &plen, flags);
1700 if (error)
1701 return (error);
1702
1703 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1704 new, newlen, retval, flags);
1705 return (error);
1706 }
1707
1708 /*
1709 * Transfer function to/from user space.
1710 */
1711 static int
1712 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1713 {
1714 size_t i, len, origidx;
1715 int error;
1716
1717 origidx = req->oldidx;
1718 req->oldidx += l;
1719 if (req->oldptr == NULL)
1720 return (0);
1721 /*
1722 * If we have not wired the user supplied buffer and we are currently
1723 * holding locks, drop a witness warning, as it's possible that
1724 * write operations to the user page can sleep.
1725 */
1726 if (req->lock != REQ_WIRED)
1727 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1728 "sysctl_old_user()");
1729 i = l;
1730 len = req->validlen;
1731 if (len <= origidx)
1732 i = 0;
1733 else {
1734 if (i > len - origidx)
1735 i = len - origidx;
1736 if (req->lock == REQ_WIRED) {
1737 error = copyout_nofault(p, (char *)req->oldptr +
1738 origidx, i);
1739 } else
1740 error = copyout(p, (char *)req->oldptr + origidx, i);
1741 if (error != 0)
1742 return (error);
1743 }
1744 if (i < l)
1745 return (ENOMEM);
1746 return (0);
1747 }
1748
1749 static int
1750 sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1751 {
1752 int error;
1753
1754 if (!req->newptr)
1755 return (0);
1756 if (req->newlen - req->newidx < l)
1757 return (EINVAL);
1758 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1759 "sysctl_new_user()");
1760 error = copyin((char *)req->newptr + req->newidx, p, l);
1761 req->newidx += l;
1762 return (error);
1763 }
1764
1765 /*
1766 * Wire the user space destination buffer. If set to a value greater than
1767 * zero, the len parameter limits the maximum amount of wired memory.
1768 */
1769 int
1770 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1771 {
1772 int ret;
1773 size_t wiredlen;
1774
1775 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1776 ret = 0;
1777 if (req->lock != REQ_WIRED && req->oldptr &&
1778 req->oldfunc == sysctl_old_user) {
1779 if (wiredlen != 0) {
1780 ret = vslock(req->oldptr, wiredlen);
1781 if (ret != 0) {
1782 if (ret != ENOMEM)
1783 return (ret);
1784 wiredlen = 0;
1785 }
1786 }
1787 req->lock = REQ_WIRED;
1788 req->validlen = wiredlen;
1789 }
1790 return (0);
1791 }
1792
1793 int
1794 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1795 int *nindx, struct sysctl_req *req)
1796 {
1797 struct sysctl_oid_list *lsp;
1798 struct sysctl_oid *oid;
1799 int indx;
1800
1801 SYSCTL_ASSERT_LOCKED();
1802 lsp = &sysctl__children;
1803 indx = 0;
1804 while (indx < CTL_MAXNAME) {
1805 SLIST_FOREACH(oid, lsp, oid_link) {
1806 if (oid->oid_number == name[indx])
1807 break;
1808 }
1809 if (oid == NULL)
1810 return (ENOENT);
1811
1812 indx++;
1813 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1814 if (oid->oid_handler != NULL || indx == namelen) {
1815 *noid = oid;
1816 if (nindx != NULL)
1817 *nindx = indx;
1818 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1819 ("%s found DYING node %p", __func__, oid));
1820 return (0);
1821 }
1822 lsp = SYSCTL_CHILDREN(oid);
1823 } else if (indx == namelen) {
1824 if ((oid->oid_kind & CTLFLAG_DORMANT) != 0)
1825 return (ENOENT);
1826 *noid = oid;
1827 if (nindx != NULL)
1828 *nindx = indx;
1829 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1830 ("%s found DYING node %p", __func__, oid));
1831 return (0);
1832 } else {
1833 return (ENOTDIR);
1834 }
1835 }
1836 return (ENOENT);
1837 }
1838
1839 /*
1840 * Traverse our tree, and find the right node, execute whatever it points
1841 * to, and return the resulting error code.
1842 */
1843
1844 static int
1845 sysctl_root(SYSCTL_HANDLER_ARGS)
1846 {
1847 struct sysctl_oid *oid;
1848 struct rm_priotracker tracker;
1849 int error, indx, lvl;
1850
1851 SYSCTL_RLOCK(&tracker);
1852
1853 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1854 if (error)
1855 goto out;
1856
1857 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1858 /*
1859 * You can't call a sysctl when it's a node, but has
1860 * no handler. Inform the user that it's a node.
1861 * The indx may or may not be the same as namelen.
1862 */
1863 if (oid->oid_handler == NULL) {
1864 error = EISDIR;
1865 goto out;
1866 }
1867 }
1868
1869 /* Is this sysctl writable? */
1870 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) {
1871 error = EPERM;
1872 goto out;
1873 }
1874
1875 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1876
1877 #ifdef CAPABILITY_MODE
1878 /*
1879 * If the process is in capability mode, then don't permit reading or
1880 * writing unless specifically granted for the node.
1881 */
1882 if (IN_CAPABILITY_MODE(req->td)) {
1883 if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) ||
1884 (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) {
1885 error = EPERM;
1886 goto out;
1887 }
1888 }
1889 #endif
1890
1891 /* Is this sysctl sensitive to securelevels? */
1892 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1893 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1894 error = securelevel_gt(req->td->td_ucred, lvl);
1895 if (error)
1896 goto out;
1897 }
1898
1899 /* Is this sysctl writable by only privileged users? */
1900 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1901 int priv;
1902
1903 if (oid->oid_kind & CTLFLAG_PRISON)
1904 priv = PRIV_SYSCTL_WRITEJAIL;
1905 #ifdef VIMAGE
1906 else if ((oid->oid_kind & CTLFLAG_VNET) &&
1907 prison_owns_vnet(req->td->td_ucred))
1908 priv = PRIV_SYSCTL_WRITEJAIL;
1909 #endif
1910 else
1911 priv = PRIV_SYSCTL_WRITE;
1912 error = priv_check(req->td, priv);
1913 if (error)
1914 goto out;
1915 }
1916
1917 if (!oid->oid_handler) {
1918 error = EINVAL;
1919 goto out;
1920 }
1921
1922 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1923 arg1 = (int *)arg1 + indx;
1924 arg2 -= indx;
1925 } else {
1926 arg1 = oid->oid_arg1;
1927 arg2 = oid->oid_arg2;
1928 }
1929 #ifdef MAC
1930 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1931 req);
1932 if (error != 0)
1933 goto out;
1934 #endif
1935 #ifdef VIMAGE
1936 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
1937 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
1938 #endif
1939 error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker);
1940
1941 out:
1942 SYSCTL_RUNLOCK(&tracker);
1943 return (error);
1944 }
1945
1946 #ifndef _SYS_SYSPROTO_H_
1947 struct sysctl_args {
1948 int *name;
1949 u_int namelen;
1950 void *old;
1951 size_t *oldlenp;
1952 void *new;
1953 size_t newlen;
1954 };
1955 #endif
1956 int
1957 sys___sysctl(struct thread *td, struct sysctl_args *uap)
1958 {
1959 int error, i, name[CTL_MAXNAME];
1960 size_t j;
1961
1962 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
1963 return (EINVAL);
1964
1965 error = copyin(uap->name, &name, uap->namelen * sizeof(int));
1966 if (error)
1967 return (error);
1968
1969 error = userland_sysctl(td, name, uap->namelen,
1970 uap->old, uap->oldlenp, 0,
1971 uap->new, uap->newlen, &j, 0);
1972 if (error && error != ENOMEM)
1973 return (error);
1974 if (uap->oldlenp) {
1975 i = copyout(&j, uap->oldlenp, sizeof(j));
1976 if (i)
1977 return (i);
1978 }
1979 return (error);
1980 }
1981
1982 /*
1983 * This is used from various compatibility syscalls too. That's why name
1984 * must be in kernel space.
1985 */
1986 int
1987 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1988 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
1989 int flags)
1990 {
1991 int error = 0, memlocked;
1992 struct sysctl_req req;
1993
1994 bzero(&req, sizeof req);
1995
1996 req.td = td;
1997 req.flags = flags;
1998
1999 if (oldlenp) {
2000 if (inkernel) {
2001 req.oldlen = *oldlenp;
2002 } else {
2003 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
2004 if (error)
2005 return (error);
2006 }
2007 }
2008 req.validlen = req.oldlen;
2009 req.oldptr = old;
2010
2011 if (new != NULL) {
2012 req.newlen = newlen;
2013 req.newptr = new;
2014 }
2015
2016 req.oldfunc = sysctl_old_user;
2017 req.newfunc = sysctl_new_user;
2018 req.lock = REQ_UNWIRED;
2019
2020 #ifdef KTRACE
2021 if (KTRPOINT(curthread, KTR_SYSCTL))
2022 ktrsysctl(name, namelen);
2023 #endif
2024
2025 if (req.oldptr && req.oldlen > PAGE_SIZE) {
2026 memlocked = 1;
2027 sx_xlock(&sysctlmemlock);
2028 } else
2029 memlocked = 0;
2030 CURVNET_SET(TD_TO_VNET(td));
2031
2032 for (;;) {
2033 req.oldidx = 0;
2034 req.newidx = 0;
2035 error = sysctl_root(0, name, namelen, &req);
2036 if (error != EAGAIN)
2037 break;
2038 kern_yield(PRI_USER);
2039 }
2040
2041 CURVNET_RESTORE();
2042
2043 if (req.lock == REQ_WIRED && req.validlen > 0)
2044 vsunlock(req.oldptr, req.validlen);
2045 if (memlocked)
2046 sx_xunlock(&sysctlmemlock);
2047
2048 if (error && error != ENOMEM)
2049 return (error);
2050
2051 if (retval) {
2052 if (req.oldptr && req.oldidx > req.validlen)
2053 *retval = req.validlen;
2054 else
2055 *retval = req.oldidx;
2056 }
2057 return (error);
2058 }
2059
2060 /*
2061 * Drain into a sysctl struct. The user buffer should be wired if a page
2062 * fault would cause issue.
2063 */
2064 static int
2065 sbuf_sysctl_drain(void *arg, const char *data, int len)
2066 {
2067 struct sysctl_req *req = arg;
2068 int error;
2069
2070 error = SYSCTL_OUT(req, data, len);
2071 KASSERT(error >= 0, ("Got unexpected negative value %d", error));
2072 return (error == 0 ? len : -error);
2073 }
2074
2075 struct sbuf *
2076 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
2077 struct sysctl_req *req)
2078 {
2079
2080 /* Supply a default buffer size if none given. */
2081 if (buf == NULL && length == 0)
2082 length = 64;
2083 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
2084 sbuf_set_drain(s, sbuf_sysctl_drain, req);
2085 return (s);
2086 }
Cache object: 09ec973d90e1033e911acb1af4761990
|