1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Mike Karels at Berkeley Software Design, Inc.
7 *
8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9 * project, to make these variables more userfriendly.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/10.3/sys/kern/kern_sysctl.c 283171 2015-05-21 06:30:44Z hselasky $");
40
41 #include "opt_capsicum.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44
45 #include <sys/param.h>
46 #include <sys/fail.h>
47 #include <sys/systm.h>
48 #include <sys/capsicum.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/jail.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/sbuf.h>
58 #include <sys/sx.h>
59 #include <sys/sysproto.h>
60 #include <sys/uio.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 #include <net/vnet.h>
66
67 #include <security/mac/mac_framework.h>
68
69 #include <vm/vm.h>
70 #include <vm/vm_extern.h>
71
72 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
73 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
74 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
75
76 /*
77 * The sysctllock protects the MIB tree. It also protects sysctl
78 * contexts used with dynamic sysctls. The sysctl_register_oid() and
79 * sysctl_unregister_oid() routines require the sysctllock to already
80 * be held, so the sysctl_lock() and sysctl_unlock() routines are
81 * provided for the few places in the kernel which need to use that
82 * API rather than using the dynamic API. Use of the dynamic API is
83 * strongly encouraged for most code.
84 *
85 * The sysctlmemlock is used to limit the amount of user memory wired for
86 * sysctl requests. This is implemented by serializing any userland
87 * sysctl requests larger than a single page via an exclusive lock.
88 */
89 static struct sx sysctllock;
90 static struct sx sysctlmemlock;
91
92 #define SYSCTL_XLOCK() sx_xlock(&sysctllock)
93 #define SYSCTL_XUNLOCK() sx_xunlock(&sysctllock)
94 #define SYSCTL_ASSERT_XLOCKED() sx_assert(&sysctllock, SA_XLOCKED)
95 #define SYSCTL_INIT() sx_init(&sysctllock, "sysctl lock")
96 #define SYSCTL_SLEEP(ch, wmesg, timo) \
97 sx_sleep(ch, &sysctllock, 0, wmesg, timo)
98
99 static int sysctl_root(SYSCTL_HANDLER_ARGS);
100
101 struct sysctl_oid_list sysctl__children; /* root list */
102
103 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
104 int recurse);
105
106 static struct sysctl_oid *
107 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
108 {
109 struct sysctl_oid *oidp;
110
111 SYSCTL_ASSERT_XLOCKED();
112 SLIST_FOREACH(oidp, list, oid_link) {
113 if (strcmp(oidp->oid_name, name) == 0) {
114 return (oidp);
115 }
116 }
117 return (NULL);
118 }
119
120 /*
121 * Initialization of the MIB tree.
122 *
123 * Order by number in each list.
124 */
125 void
126 sysctl_lock(void)
127 {
128
129 SYSCTL_XLOCK();
130 }
131
132 void
133 sysctl_unlock(void)
134 {
135
136 SYSCTL_XUNLOCK();
137 }
138
139 void
140 sysctl_register_oid(struct sysctl_oid *oidp)
141 {
142 struct sysctl_oid_list *parent = oidp->oid_parent;
143 struct sysctl_oid *p;
144 struct sysctl_oid *q;
145 int oid_number;
146 int timeout = 2;
147
148 /*
149 * First check if another oid with the same name already
150 * exists in the parent's list.
151 */
152 SYSCTL_ASSERT_XLOCKED();
153 p = sysctl_find_oidname(oidp->oid_name, parent);
154 if (p != NULL) {
155 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
156 p->oid_refcnt++;
157 return;
158 } else {
159 printf("can't re-use a leaf (%s)!\n", p->oid_name);
160 return;
161 }
162 }
163 /* get current OID number */
164 oid_number = oidp->oid_number;
165
166 #if (OID_AUTO >= 0)
167 #error "OID_AUTO is expected to be a negative value"
168 #endif
169 /*
170 * Any negative OID number qualifies as OID_AUTO. Valid OID
171 * numbers should always be positive.
172 *
173 * NOTE: DO NOT change the starting value here, change it in
174 * <sys/sysctl.h>, and make sure it is at least 256 to
175 * accomodate e.g. net.inet.raw as a static sysctl node.
176 */
177 if (oid_number < 0) {
178 static int newoid;
179
180 /*
181 * By decrementing the next OID number we spend less
182 * time inserting the OIDs into a sorted list.
183 */
184 if (--newoid < CTL_AUTO_START)
185 newoid = 0x7fffffff;
186
187 oid_number = newoid;
188 }
189
190 /*
191 * Insert the OID into the parent's list sorted by OID number.
192 */
193 retry:
194 q = NULL;
195 SLIST_FOREACH(p, parent, oid_link) {
196 /* check if the current OID number is in use */
197 if (oid_number == p->oid_number) {
198 /* get the next valid OID number */
199 if (oid_number < CTL_AUTO_START ||
200 oid_number == 0x7fffffff) {
201 /* wraparound - restart */
202 oid_number = CTL_AUTO_START;
203 /* don't loop forever */
204 if (!timeout--)
205 panic("sysctl: Out of OID numbers\n");
206 goto retry;
207 } else {
208 oid_number++;
209 }
210 } else if (oid_number < p->oid_number)
211 break;
212 q = p;
213 }
214 /* check for non-auto OID number collision */
215 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
216 oid_number >= CTL_AUTO_START) {
217 printf("sysctl: OID number(%d) is already in use for '%s'\n",
218 oidp->oid_number, oidp->oid_name);
219 }
220 /* update the OID number, if any */
221 oidp->oid_number = oid_number;
222 if (q != NULL)
223 SLIST_INSERT_AFTER(q, oidp, oid_link);
224 else
225 SLIST_INSERT_HEAD(parent, oidp, oid_link);
226 }
227
228 void
229 sysctl_unregister_oid(struct sysctl_oid *oidp)
230 {
231 struct sysctl_oid *p;
232 int error;
233
234 SYSCTL_ASSERT_XLOCKED();
235 error = ENOENT;
236 if (oidp->oid_number == OID_AUTO) {
237 error = EINVAL;
238 } else {
239 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
240 if (p == oidp) {
241 SLIST_REMOVE(oidp->oid_parent, oidp,
242 sysctl_oid, oid_link);
243 error = 0;
244 break;
245 }
246 }
247 }
248
249 /*
250 * This can happen when a module fails to register and is
251 * being unloaded afterwards. It should not be a panic()
252 * for normal use.
253 */
254 if (error)
255 printf("%s: failed to unregister sysctl\n", __func__);
256 }
257
258 /* Initialize a new context to keep track of dynamically added sysctls. */
259 int
260 sysctl_ctx_init(struct sysctl_ctx_list *c)
261 {
262
263 if (c == NULL) {
264 return (EINVAL);
265 }
266
267 /*
268 * No locking here, the caller is responsible for not adding
269 * new nodes to a context until after this function has
270 * returned.
271 */
272 TAILQ_INIT(c);
273 return (0);
274 }
275
276 /* Free the context, and destroy all dynamic oids registered in this context */
277 int
278 sysctl_ctx_free(struct sysctl_ctx_list *clist)
279 {
280 struct sysctl_ctx_entry *e, *e1;
281 int error;
282
283 error = 0;
284 /*
285 * First perform a "dry run" to check if it's ok to remove oids.
286 * XXX FIXME
287 * XXX This algorithm is a hack. But I don't know any
288 * XXX better solution for now...
289 */
290 SYSCTL_XLOCK();
291 TAILQ_FOREACH(e, clist, link) {
292 error = sysctl_remove_oid_locked(e->entry, 0, 0);
293 if (error)
294 break;
295 }
296 /*
297 * Restore deregistered entries, either from the end,
298 * or from the place where error occured.
299 * e contains the entry that was not unregistered
300 */
301 if (error)
302 e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
303 else
304 e1 = TAILQ_LAST(clist, sysctl_ctx_list);
305 while (e1 != NULL) {
306 sysctl_register_oid(e1->entry);
307 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
308 }
309 if (error) {
310 SYSCTL_XUNLOCK();
311 return(EBUSY);
312 }
313 /* Now really delete the entries */
314 e = TAILQ_FIRST(clist);
315 while (e != NULL) {
316 e1 = TAILQ_NEXT(e, link);
317 error = sysctl_remove_oid_locked(e->entry, 1, 0);
318 if (error)
319 panic("sysctl_remove_oid: corrupt tree, entry: %s",
320 e->entry->oid_name);
321 free(e, M_SYSCTLOID);
322 e = e1;
323 }
324 SYSCTL_XUNLOCK();
325 return (error);
326 }
327
328 /* Add an entry to the context */
329 struct sysctl_ctx_entry *
330 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
331 {
332 struct sysctl_ctx_entry *e;
333
334 SYSCTL_ASSERT_XLOCKED();
335 if (clist == NULL || oidp == NULL)
336 return(NULL);
337 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
338 e->entry = oidp;
339 TAILQ_INSERT_HEAD(clist, e, link);
340 return (e);
341 }
342
343 /* Find an entry in the context */
344 struct sysctl_ctx_entry *
345 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
346 {
347 struct sysctl_ctx_entry *e;
348
349 SYSCTL_ASSERT_XLOCKED();
350 if (clist == NULL || oidp == NULL)
351 return(NULL);
352 TAILQ_FOREACH(e, clist, link) {
353 if(e->entry == oidp)
354 return(e);
355 }
356 return (e);
357 }
358
359 /*
360 * Delete an entry from the context.
361 * NOTE: this function doesn't free oidp! You have to remove it
362 * with sysctl_remove_oid().
363 */
364 int
365 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
366 {
367 struct sysctl_ctx_entry *e;
368
369 if (clist == NULL || oidp == NULL)
370 return (EINVAL);
371 SYSCTL_XLOCK();
372 e = sysctl_ctx_entry_find(clist, oidp);
373 if (e != NULL) {
374 TAILQ_REMOVE(clist, e, link);
375 SYSCTL_XUNLOCK();
376 free(e, M_SYSCTLOID);
377 return (0);
378 } else {
379 SYSCTL_XUNLOCK();
380 return (ENOENT);
381 }
382 }
383
384 /*
385 * Remove dynamically created sysctl trees.
386 * oidp - top of the tree to be removed
387 * del - if 0 - just deregister, otherwise free up entries as well
388 * recurse - if != 0 traverse the subtree to be deleted
389 */
390 int
391 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
392 {
393 int error;
394
395 SYSCTL_XLOCK();
396 error = sysctl_remove_oid_locked(oidp, del, recurse);
397 SYSCTL_XUNLOCK();
398 return (error);
399 }
400
401 int
402 sysctl_remove_name(struct sysctl_oid *parent, const char *name,
403 int del, int recurse)
404 {
405 struct sysctl_oid *p, *tmp;
406 int error;
407
408 error = ENOENT;
409 SYSCTL_XLOCK();
410 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
411 if (strcmp(p->oid_name, name) == 0) {
412 error = sysctl_remove_oid_locked(p, del, recurse);
413 break;
414 }
415 }
416 SYSCTL_XUNLOCK();
417
418 return (error);
419 }
420
421
422 static int
423 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
424 {
425 struct sysctl_oid *p, *tmp;
426 int error;
427
428 SYSCTL_ASSERT_XLOCKED();
429 if (oidp == NULL)
430 return(EINVAL);
431 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
432 printf("can't remove non-dynamic nodes!\n");
433 return (EINVAL);
434 }
435 /*
436 * WARNING: normal method to do this should be through
437 * sysctl_ctx_free(). Use recursing as the last resort
438 * method to purge your sysctl tree of leftovers...
439 * However, if some other code still references these nodes,
440 * it will panic.
441 */
442 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
443 if (oidp->oid_refcnt == 1) {
444 SLIST_FOREACH_SAFE(p,
445 SYSCTL_CHILDREN(oidp), oid_link, tmp) {
446 if (!recurse) {
447 printf("Warning: failed attempt to "
448 "remove oid %s with child %s\n",
449 oidp->oid_name, p->oid_name);
450 return (ENOTEMPTY);
451 }
452 error = sysctl_remove_oid_locked(p, del,
453 recurse);
454 if (error)
455 return (error);
456 }
457 if (del)
458 free(SYSCTL_CHILDREN(oidp), M_SYSCTLOID);
459 }
460 }
461 if (oidp->oid_refcnt > 1 ) {
462 oidp->oid_refcnt--;
463 } else {
464 if (oidp->oid_refcnt == 0) {
465 printf("Warning: bad oid_refcnt=%u (%s)!\n",
466 oidp->oid_refcnt, oidp->oid_name);
467 return (EINVAL);
468 }
469 sysctl_unregister_oid(oidp);
470 if (del) {
471 /*
472 * Wait for all threads running the handler to drain.
473 * This preserves the previous behavior when the
474 * sysctl lock was held across a handler invocation,
475 * and is necessary for module unload correctness.
476 */
477 while (oidp->oid_running > 0) {
478 oidp->oid_kind |= CTLFLAG_DYING;
479 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
480 }
481 if (oidp->oid_descr)
482 free(__DECONST(char *, oidp->oid_descr),
483 M_SYSCTLOID);
484 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
485 free(oidp, M_SYSCTLOID);
486 }
487 }
488 return (0);
489 }
490 /*
491 * Create new sysctls at run time.
492 * clist may point to a valid context initialized with sysctl_ctx_init().
493 */
494 struct sysctl_oid *
495 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
496 int number, const char *name, int kind, void *arg1, intptr_t arg2,
497 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
498 {
499 struct sysctl_oid *oidp;
500
501 /* You have to hook up somewhere.. */
502 if (parent == NULL)
503 return(NULL);
504 /* Check if the node already exists, otherwise create it */
505 SYSCTL_XLOCK();
506 oidp = sysctl_find_oidname(name, parent);
507 if (oidp != NULL) {
508 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
509 oidp->oid_refcnt++;
510 /* Update the context */
511 if (clist != NULL)
512 sysctl_ctx_entry_add(clist, oidp);
513 SYSCTL_XUNLOCK();
514 return (oidp);
515 } else {
516 SYSCTL_XUNLOCK();
517 printf("can't re-use a leaf (%s)!\n", name);
518 return (NULL);
519 }
520 }
521 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
522 oidp->oid_parent = parent;
523 SLIST_NEXT(oidp, oid_link) = NULL;
524 oidp->oid_number = number;
525 oidp->oid_refcnt = 1;
526 oidp->oid_name = strdup(name, M_SYSCTLOID);
527 oidp->oid_handler = handler;
528 oidp->oid_kind = CTLFLAG_DYN | kind;
529 if ((kind & CTLTYPE) == CTLTYPE_NODE) {
530 /* Allocate space for children */
531 SYSCTL_CHILDREN_SET(oidp, malloc(sizeof(struct sysctl_oid_list),
532 M_SYSCTLOID, M_WAITOK));
533 SLIST_INIT(SYSCTL_CHILDREN(oidp));
534 oidp->oid_arg2 = arg2;
535 } else {
536 oidp->oid_arg1 = arg1;
537 oidp->oid_arg2 = arg2;
538 }
539 oidp->oid_fmt = fmt;
540 if (descr)
541 oidp->oid_descr = strdup(descr, M_SYSCTLOID);
542 /* Update the context, if used */
543 if (clist != NULL)
544 sysctl_ctx_entry_add(clist, oidp);
545 /* Register this oid */
546 sysctl_register_oid(oidp);
547 SYSCTL_XUNLOCK();
548 return (oidp);
549 }
550
551 /*
552 * Rename an existing oid.
553 */
554 void
555 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
556 {
557 char *newname;
558 char *oldname;
559
560 newname = strdup(name, M_SYSCTLOID);
561 SYSCTL_XLOCK();
562 oldname = __DECONST(char *, oidp->oid_name);
563 oidp->oid_name = newname;
564 SYSCTL_XUNLOCK();
565 free(oldname, M_SYSCTLOID);
566 }
567
568 /*
569 * Reparent an existing oid.
570 */
571 int
572 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
573 {
574 struct sysctl_oid *oidp;
575
576 SYSCTL_XLOCK();
577 if (oid->oid_parent == parent) {
578 SYSCTL_XUNLOCK();
579 return (0);
580 }
581 oidp = sysctl_find_oidname(oid->oid_name, parent);
582 if (oidp != NULL) {
583 SYSCTL_XUNLOCK();
584 return (EEXIST);
585 }
586 sysctl_unregister_oid(oid);
587 oid->oid_parent = parent;
588 oid->oid_number = OID_AUTO;
589 sysctl_register_oid(oid);
590 SYSCTL_XUNLOCK();
591 return (0);
592 }
593
594 /*
595 * Register the kernel's oids on startup.
596 */
597 SET_DECLARE(sysctl_set, struct sysctl_oid);
598
599 static void
600 sysctl_register_all(void *arg)
601 {
602 struct sysctl_oid **oidp;
603
604 sx_init(&sysctlmemlock, "sysctl mem");
605 SYSCTL_INIT();
606 SYSCTL_XLOCK();
607 SET_FOREACH(oidp, sysctl_set)
608 sysctl_register_oid(*oidp);
609 SYSCTL_XUNLOCK();
610 }
611 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_ANY, sysctl_register_all, 0);
612
613 /*
614 * "Staff-functions"
615 *
616 * These functions implement a presently undocumented interface
617 * used by the sysctl program to walk the tree, and get the type
618 * so it can print the value.
619 * This interface is under work and consideration, and should probably
620 * be killed with a big axe by the first person who can find the time.
621 * (be aware though, that the proper interface isn't as obvious as it
622 * may seem, there are various conflicting requirements.
623 *
624 * {0,0} printf the entire MIB-tree.
625 * {0,1,...} return the name of the "..." OID.
626 * {0,2,...} return the next OID.
627 * {0,3} return the OID of the name in "new"
628 * {0,4,...} return the kind & format info for the "..." OID.
629 * {0,5,...} return the description the "..." OID.
630 */
631
632 #ifdef SYSCTL_DEBUG
633 static void
634 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
635 {
636 int k;
637 struct sysctl_oid *oidp;
638
639 SYSCTL_ASSERT_XLOCKED();
640 SLIST_FOREACH(oidp, l, oid_link) {
641
642 for (k=0; k<i; k++)
643 printf(" ");
644
645 printf("%d %s ", oidp->oid_number, oidp->oid_name);
646
647 printf("%c%c",
648 oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
649 oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
650
651 if (oidp->oid_handler)
652 printf(" *Handler");
653
654 switch (oidp->oid_kind & CTLTYPE) {
655 case CTLTYPE_NODE:
656 printf(" Node\n");
657 if (!oidp->oid_handler) {
658 sysctl_sysctl_debug_dump_node(
659 oidp->oid_arg1, i+2);
660 }
661 break;
662 case CTLTYPE_INT: printf(" Int\n"); break;
663 case CTLTYPE_UINT: printf(" u_int\n"); break;
664 case CTLTYPE_LONG: printf(" Long\n"); break;
665 case CTLTYPE_ULONG: printf(" u_long\n"); break;
666 case CTLTYPE_STRING: printf(" String\n"); break;
667 case CTLTYPE_U64: printf(" uint64_t\n"); break;
668 case CTLTYPE_S64: printf(" int64_t\n"); break;
669 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
670 default: printf("\n");
671 }
672
673 }
674 }
675
676 static int
677 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
678 {
679 int error;
680
681 error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
682 if (error)
683 return (error);
684 SYSCTL_XLOCK();
685 sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
686 SYSCTL_XUNLOCK();
687 return (ENOENT);
688 }
689
690 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD,
691 0, 0, sysctl_sysctl_debug, "-", "");
692 #endif
693
694 static int
695 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
696 {
697 int *name = (int *) arg1;
698 u_int namelen = arg2;
699 int error = 0;
700 struct sysctl_oid *oid;
701 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
702 char buf[10];
703
704 SYSCTL_XLOCK();
705 while (namelen) {
706 if (!lsp) {
707 snprintf(buf,sizeof(buf),"%d",*name);
708 if (req->oldidx)
709 error = SYSCTL_OUT(req, ".", 1);
710 if (!error)
711 error = SYSCTL_OUT(req, buf, strlen(buf));
712 if (error)
713 goto out;
714 namelen--;
715 name++;
716 continue;
717 }
718 lsp2 = 0;
719 SLIST_FOREACH(oid, lsp, oid_link) {
720 if (oid->oid_number != *name)
721 continue;
722
723 if (req->oldidx)
724 error = SYSCTL_OUT(req, ".", 1);
725 if (!error)
726 error = SYSCTL_OUT(req, oid->oid_name,
727 strlen(oid->oid_name));
728 if (error)
729 goto out;
730
731 namelen--;
732 name++;
733
734 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
735 break;
736
737 if (oid->oid_handler)
738 break;
739
740 lsp2 = SYSCTL_CHILDREN(oid);
741 break;
742 }
743 lsp = lsp2;
744 }
745 error = SYSCTL_OUT(req, "", 1);
746 out:
747 SYSCTL_XUNLOCK();
748 return (error);
749 }
750
751 /*
752 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
753 * capability mode.
754 */
755 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_CAPRD,
756 sysctl_sysctl_name, "");
757
758 static int
759 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
760 int *next, int *len, int level, struct sysctl_oid **oidpp)
761 {
762 struct sysctl_oid *oidp;
763
764 SYSCTL_ASSERT_XLOCKED();
765 *len = level;
766 SLIST_FOREACH(oidp, lsp, oid_link) {
767 *next = oidp->oid_number;
768 *oidpp = oidp;
769
770 if (oidp->oid_kind & CTLFLAG_SKIP)
771 continue;
772
773 if (!namelen) {
774 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
775 return (0);
776 if (oidp->oid_handler)
777 /* We really should call the handler here...*/
778 return (0);
779 lsp = SYSCTL_CHILDREN(oidp);
780 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
781 len, level+1, oidpp))
782 return (0);
783 goto emptynode;
784 }
785
786 if (oidp->oid_number < *name)
787 continue;
788
789 if (oidp->oid_number > *name) {
790 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
791 return (0);
792 if (oidp->oid_handler)
793 return (0);
794 lsp = SYSCTL_CHILDREN(oidp);
795 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
796 next+1, len, level+1, oidpp))
797 return (0);
798 goto next;
799 }
800 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
801 continue;
802
803 if (oidp->oid_handler)
804 continue;
805
806 lsp = SYSCTL_CHILDREN(oidp);
807 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
808 len, level+1, oidpp))
809 return (0);
810 next:
811 namelen = 1;
812 emptynode:
813 *len = level;
814 }
815 return (1);
816 }
817
818 static int
819 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
820 {
821 int *name = (int *) arg1;
822 u_int namelen = arg2;
823 int i, j, error;
824 struct sysctl_oid *oid;
825 struct sysctl_oid_list *lsp = &sysctl__children;
826 int newoid[CTL_MAXNAME];
827
828 SYSCTL_XLOCK();
829 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
830 SYSCTL_XUNLOCK();
831 if (i)
832 return (ENOENT);
833 error = SYSCTL_OUT(req, newoid, j * sizeof (int));
834 return (error);
835 }
836
837 /*
838 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
839 * capability mode.
840 */
841 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_CAPRD,
842 sysctl_sysctl_next, "");
843
844 static int
845 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
846 {
847 struct sysctl_oid *oidp;
848 struct sysctl_oid_list *lsp = &sysctl__children;
849 char *p;
850
851 SYSCTL_ASSERT_XLOCKED();
852
853 for (*len = 0; *len < CTL_MAXNAME;) {
854 p = strsep(&name, ".");
855
856 oidp = SLIST_FIRST(lsp);
857 for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
858 if (oidp == NULL)
859 return (ENOENT);
860 if (strcmp(p, oidp->oid_name) == 0)
861 break;
862 }
863 *oid++ = oidp->oid_number;
864 (*len)++;
865
866 if (name == NULL || *name == '\0') {
867 if (oidpp)
868 *oidpp = oidp;
869 return (0);
870 }
871
872 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
873 break;
874
875 if (oidp->oid_handler)
876 break;
877
878 lsp = SYSCTL_CHILDREN(oidp);
879 }
880 return (ENOENT);
881 }
882
883 static int
884 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
885 {
886 char *p;
887 int error, oid[CTL_MAXNAME], len = 0;
888 struct sysctl_oid *op = 0;
889
890 if (!req->newlen)
891 return (ENOENT);
892 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
893 return (ENAMETOOLONG);
894
895 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
896
897 error = SYSCTL_IN(req, p, req->newlen);
898 if (error) {
899 free(p, M_SYSCTL);
900 return (error);
901 }
902
903 p [req->newlen] = '\0';
904
905 SYSCTL_XLOCK();
906 error = name2oid(p, oid, &len, &op);
907 SYSCTL_XUNLOCK();
908
909 free(p, M_SYSCTL);
910
911 if (error)
912 return (error);
913
914 error = SYSCTL_OUT(req, oid, len * sizeof *oid);
915 return (error);
916 }
917
918 /*
919 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
920 * capability mode.
921 */
922 SYSCTL_PROC(_sysctl, 3, name2oid,
923 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
924 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
925
926 static int
927 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
928 {
929 struct sysctl_oid *oid;
930 int error;
931
932 SYSCTL_XLOCK();
933 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
934 if (error)
935 goto out;
936
937 if (oid->oid_fmt == NULL) {
938 error = ENOENT;
939 goto out;
940 }
941 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
942 if (error)
943 goto out;
944 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
945 out:
946 SYSCTL_XUNLOCK();
947 return (error);
948 }
949
950
951 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
952 sysctl_sysctl_oidfmt, "");
953
954 static int
955 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
956 {
957 struct sysctl_oid *oid;
958 int error;
959
960 SYSCTL_XLOCK();
961 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
962 if (error)
963 goto out;
964
965 if (oid->oid_descr == NULL) {
966 error = ENOENT;
967 goto out;
968 }
969 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
970 out:
971 SYSCTL_XUNLOCK();
972 return (error);
973 }
974
975 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_CAPRD,
976 sysctl_sysctl_oiddescr, "");
977
978 /*
979 * Default "handler" functions.
980 */
981
982 /*
983 * Handle an int, signed or unsigned.
984 * Two cases:
985 * a variable: point arg1 at it.
986 * a constant: pass it in arg2.
987 */
988
989 int
990 sysctl_handle_int(SYSCTL_HANDLER_ARGS)
991 {
992 int tmpout, error = 0;
993
994 /*
995 * Attempt to get a coherent snapshot by making a copy of the data.
996 */
997 if (arg1)
998 tmpout = *(int *)arg1;
999 else
1000 tmpout = arg2;
1001 error = SYSCTL_OUT(req, &tmpout, sizeof(int));
1002
1003 if (error || !req->newptr)
1004 return (error);
1005
1006 if (!arg1)
1007 error = EPERM;
1008 else
1009 error = SYSCTL_IN(req, arg1, sizeof(int));
1010 return (error);
1011 }
1012
1013 /*
1014 * Based on on sysctl_handle_int() convert milliseconds into ticks.
1015 * Note: this is used by TCP.
1016 */
1017
1018 int
1019 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
1020 {
1021 int error, s, tt;
1022
1023 tt = *(int *)arg1;
1024 s = (int)((int64_t)tt * 1000 / hz);
1025
1026 error = sysctl_handle_int(oidp, &s, 0, req);
1027 if (error || !req->newptr)
1028 return (error);
1029
1030 tt = (int)((int64_t)s * hz / 1000);
1031 if (tt < 1)
1032 return (EINVAL);
1033
1034 *(int *)arg1 = tt;
1035 return (0);
1036 }
1037
1038
1039 /*
1040 * Handle a long, signed or unsigned.
1041 * Two cases:
1042 * a variable: point arg1 at it.
1043 * a constant: pass it in arg2.
1044 */
1045
1046 int
1047 sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1048 {
1049 int error = 0;
1050 long tmplong;
1051 #ifdef SCTL_MASK32
1052 int tmpint;
1053 #endif
1054
1055 /*
1056 * Attempt to get a coherent snapshot by making a copy of the data.
1057 */
1058 if (arg1)
1059 tmplong = *(long *)arg1;
1060 else
1061 tmplong = arg2;
1062 #ifdef SCTL_MASK32
1063 if (req->flags & SCTL_MASK32) {
1064 tmpint = tmplong;
1065 error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1066 } else
1067 #endif
1068 error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1069
1070 if (error || !req->newptr)
1071 return (error);
1072
1073 if (!arg1)
1074 error = EPERM;
1075 #ifdef SCTL_MASK32
1076 else if (req->flags & SCTL_MASK32) {
1077 error = SYSCTL_IN(req, &tmpint, sizeof(int));
1078 *(long *)arg1 = (long)tmpint;
1079 }
1080 #endif
1081 else
1082 error = SYSCTL_IN(req, arg1, sizeof(long));
1083 return (error);
1084 }
1085
1086 /*
1087 * Handle a 64 bit int, signed or unsigned.
1088 * Two cases:
1089 * a variable: point arg1 at it.
1090 * a constant: pass it in arg2.
1091 */
1092 int
1093 sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1094 {
1095 int error = 0;
1096 uint64_t tmpout;
1097
1098 /*
1099 * Attempt to get a coherent snapshot by making a copy of the data.
1100 */
1101 if (arg1)
1102 tmpout = *(uint64_t *)arg1;
1103 else
1104 tmpout = arg2;
1105 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1106
1107 if (error || !req->newptr)
1108 return (error);
1109
1110 if (!arg1)
1111 error = EPERM;
1112 else
1113 error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1114 return (error);
1115 }
1116
1117 /*
1118 * Handle our generic '\0' terminated 'C' string.
1119 * Two cases:
1120 * a variable string: point arg1 at it, arg2 is max length.
1121 * a constant string: point arg1 at it, arg2 is zero.
1122 */
1123
1124 int
1125 sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1126 {
1127 int error=0;
1128 char *tmparg;
1129 size_t outlen;
1130
1131 /*
1132 * Attempt to get a coherent snapshot by copying to a
1133 * temporary kernel buffer.
1134 */
1135 retry:
1136 outlen = strlen((char *)arg1)+1;
1137 tmparg = malloc(outlen, M_SYSCTLTMP, M_WAITOK);
1138
1139 if (strlcpy(tmparg, (char *)arg1, outlen) >= outlen) {
1140 free(tmparg, M_SYSCTLTMP);
1141 goto retry;
1142 }
1143
1144 error = SYSCTL_OUT(req, tmparg, outlen);
1145 free(tmparg, M_SYSCTLTMP);
1146
1147 if (error || !req->newptr)
1148 return (error);
1149
1150 if ((req->newlen - req->newidx) >= arg2) {
1151 error = EINVAL;
1152 } else {
1153 arg2 = (req->newlen - req->newidx);
1154 error = SYSCTL_IN(req, arg1, arg2);
1155 ((char *)arg1)[arg2] = '\0';
1156 }
1157
1158 return (error);
1159 }
1160
1161 /*
1162 * Handle any kind of opaque data.
1163 * arg1 points to it, arg2 is the size.
1164 */
1165
1166 int
1167 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1168 {
1169 int error, tries;
1170 u_int generation;
1171 struct sysctl_req req2;
1172
1173 /*
1174 * Attempt to get a coherent snapshot, by using the thread
1175 * pre-emption counter updated from within mi_switch() to
1176 * determine if we were pre-empted during a bcopy() or
1177 * copyout(). Make 3 attempts at doing this before giving up.
1178 * If we encounter an error, stop immediately.
1179 */
1180 tries = 0;
1181 req2 = *req;
1182 retry:
1183 generation = curthread->td_generation;
1184 error = SYSCTL_OUT(req, arg1, arg2);
1185 if (error)
1186 return (error);
1187 tries++;
1188 if (generation != curthread->td_generation && tries < 3) {
1189 *req = req2;
1190 goto retry;
1191 }
1192
1193 error = SYSCTL_IN(req, arg1, arg2);
1194
1195 return (error);
1196 }
1197
1198 /*
1199 * Transfer functions to/from kernel space.
1200 * XXX: rather untested at this point
1201 */
1202 static int
1203 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1204 {
1205 size_t i = 0;
1206
1207 if (req->oldptr) {
1208 i = l;
1209 if (req->oldlen <= req->oldidx)
1210 i = 0;
1211 else
1212 if (i > req->oldlen - req->oldidx)
1213 i = req->oldlen - req->oldidx;
1214 if (i > 0)
1215 bcopy(p, (char *)req->oldptr + req->oldidx, i);
1216 }
1217 req->oldidx += l;
1218 if (req->oldptr && i != l)
1219 return (ENOMEM);
1220 return (0);
1221 }
1222
1223 static int
1224 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1225 {
1226 if (!req->newptr)
1227 return (0);
1228 if (req->newlen - req->newidx < l)
1229 return (EINVAL);
1230 bcopy((char *)req->newptr + req->newidx, p, l);
1231 req->newidx += l;
1232 return (0);
1233 }
1234
1235 int
1236 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1237 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1238 {
1239 int error = 0;
1240 struct sysctl_req req;
1241
1242 bzero(&req, sizeof req);
1243
1244 req.td = td;
1245 req.flags = flags;
1246
1247 if (oldlenp) {
1248 req.oldlen = *oldlenp;
1249 }
1250 req.validlen = req.oldlen;
1251
1252 if (old) {
1253 req.oldptr= old;
1254 }
1255
1256 if (new != NULL) {
1257 req.newlen = newlen;
1258 req.newptr = new;
1259 }
1260
1261 req.oldfunc = sysctl_old_kernel;
1262 req.newfunc = sysctl_new_kernel;
1263 req.lock = REQ_UNWIRED;
1264
1265 SYSCTL_XLOCK();
1266 error = sysctl_root(0, name, namelen, &req);
1267 SYSCTL_XUNLOCK();
1268
1269 if (req.lock == REQ_WIRED && req.validlen > 0)
1270 vsunlock(req.oldptr, req.validlen);
1271
1272 if (error && error != ENOMEM)
1273 return (error);
1274
1275 if (retval) {
1276 if (req.oldptr && req.oldidx > req.validlen)
1277 *retval = req.validlen;
1278 else
1279 *retval = req.oldidx;
1280 }
1281 return (error);
1282 }
1283
1284 int
1285 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1286 void *new, size_t newlen, size_t *retval, int flags)
1287 {
1288 int oid[CTL_MAXNAME];
1289 size_t oidlen, plen;
1290 int error;
1291
1292 oid[0] = 0; /* sysctl internal magic */
1293 oid[1] = 3; /* name2oid */
1294 oidlen = sizeof(oid);
1295
1296 error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1297 (void *)name, strlen(name), &plen, flags);
1298 if (error)
1299 return (error);
1300
1301 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1302 new, newlen, retval, flags);
1303 return (error);
1304 }
1305
1306 /*
1307 * Transfer function to/from user space.
1308 */
1309 static int
1310 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1311 {
1312 size_t i, len, origidx;
1313 int error;
1314
1315 origidx = req->oldidx;
1316 req->oldidx += l;
1317 if (req->oldptr == NULL)
1318 return (0);
1319 /*
1320 * If we have not wired the user supplied buffer and we are currently
1321 * holding locks, drop a witness warning, as it's possible that
1322 * write operations to the user page can sleep.
1323 */
1324 if (req->lock != REQ_WIRED)
1325 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1326 "sysctl_old_user()");
1327 i = l;
1328 len = req->validlen;
1329 if (len <= origidx)
1330 i = 0;
1331 else {
1332 if (i > len - origidx)
1333 i = len - origidx;
1334 if (req->lock == REQ_WIRED) {
1335 error = copyout_nofault(p, (char *)req->oldptr +
1336 origidx, i);
1337 } else
1338 error = copyout(p, (char *)req->oldptr + origidx, i);
1339 if (error != 0)
1340 return (error);
1341 }
1342 if (i < l)
1343 return (ENOMEM);
1344 return (0);
1345 }
1346
1347 static int
1348 sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1349 {
1350 int error;
1351
1352 if (!req->newptr)
1353 return (0);
1354 if (req->newlen - req->newidx < l)
1355 return (EINVAL);
1356 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1357 "sysctl_new_user()");
1358 error = copyin((char *)req->newptr + req->newidx, p, l);
1359 req->newidx += l;
1360 return (error);
1361 }
1362
1363 /*
1364 * Wire the user space destination buffer. If set to a value greater than
1365 * zero, the len parameter limits the maximum amount of wired memory.
1366 */
1367 int
1368 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1369 {
1370 int ret;
1371 size_t wiredlen;
1372
1373 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1374 ret = 0;
1375 if (req->lock != REQ_WIRED && req->oldptr &&
1376 req->oldfunc == sysctl_old_user) {
1377 if (wiredlen != 0) {
1378 ret = vslock(req->oldptr, wiredlen);
1379 if (ret != 0) {
1380 if (ret != ENOMEM)
1381 return (ret);
1382 wiredlen = 0;
1383 }
1384 }
1385 req->lock = REQ_WIRED;
1386 req->validlen = wiredlen;
1387 }
1388 return (0);
1389 }
1390
1391 int
1392 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1393 int *nindx, struct sysctl_req *req)
1394 {
1395 struct sysctl_oid_list *lsp;
1396 struct sysctl_oid *oid;
1397 int indx;
1398
1399 SYSCTL_ASSERT_XLOCKED();
1400 lsp = &sysctl__children;
1401 indx = 0;
1402 while (indx < CTL_MAXNAME) {
1403 SLIST_FOREACH(oid, lsp, oid_link) {
1404 if (oid->oid_number == name[indx])
1405 break;
1406 }
1407 if (oid == NULL)
1408 return (ENOENT);
1409
1410 indx++;
1411 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1412 if (oid->oid_handler != NULL || indx == namelen) {
1413 *noid = oid;
1414 if (nindx != NULL)
1415 *nindx = indx;
1416 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1417 ("%s found DYING node %p", __func__, oid));
1418 return (0);
1419 }
1420 lsp = SYSCTL_CHILDREN(oid);
1421 } else if (indx == namelen) {
1422 *noid = oid;
1423 if (nindx != NULL)
1424 *nindx = indx;
1425 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1426 ("%s found DYING node %p", __func__, oid));
1427 return (0);
1428 } else {
1429 return (ENOTDIR);
1430 }
1431 }
1432 return (ENOENT);
1433 }
1434
1435 /*
1436 * Traverse our tree, and find the right node, execute whatever it points
1437 * to, and return the resulting error code.
1438 */
1439
1440 static int
1441 sysctl_root(SYSCTL_HANDLER_ARGS)
1442 {
1443 struct sysctl_oid *oid;
1444 int error, indx, lvl;
1445
1446 SYSCTL_ASSERT_XLOCKED();
1447
1448 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1449 if (error)
1450 return (error);
1451
1452 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1453 /*
1454 * You can't call a sysctl when it's a node, but has
1455 * no handler. Inform the user that it's a node.
1456 * The indx may or may not be the same as namelen.
1457 */
1458 if (oid->oid_handler == NULL)
1459 return (EISDIR);
1460 }
1461
1462 /* Is this sysctl writable? */
1463 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR))
1464 return (EPERM);
1465
1466 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1467
1468 #ifdef CAPABILITY_MODE
1469 /*
1470 * If the process is in capability mode, then don't permit reading or
1471 * writing unless specifically granted for the node.
1472 */
1473 if (IN_CAPABILITY_MODE(req->td)) {
1474 if (req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD))
1475 return (EPERM);
1476 if (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))
1477 return (EPERM);
1478 }
1479 #endif
1480
1481 /* Is this sysctl sensitive to securelevels? */
1482 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1483 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1484 error = securelevel_gt(req->td->td_ucred, lvl);
1485 if (error)
1486 return (error);
1487 }
1488
1489 /* Is this sysctl writable by only privileged users? */
1490 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1491 int priv;
1492
1493 if (oid->oid_kind & CTLFLAG_PRISON)
1494 priv = PRIV_SYSCTL_WRITEJAIL;
1495 #ifdef VIMAGE
1496 else if ((oid->oid_kind & CTLFLAG_VNET) &&
1497 prison_owns_vnet(req->td->td_ucred))
1498 priv = PRIV_SYSCTL_WRITEJAIL;
1499 #endif
1500 else
1501 priv = PRIV_SYSCTL_WRITE;
1502 error = priv_check(req->td, priv);
1503 if (error)
1504 return (error);
1505 }
1506
1507 if (!oid->oid_handler)
1508 return (EINVAL);
1509
1510 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1511 arg1 = (int *)arg1 + indx;
1512 arg2 -= indx;
1513 } else {
1514 arg1 = oid->oid_arg1;
1515 arg2 = oid->oid_arg2;
1516 }
1517 #ifdef MAC
1518 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1519 req);
1520 if (error != 0)
1521 return (error);
1522 #endif
1523 oid->oid_running++;
1524 SYSCTL_XUNLOCK();
1525 #ifdef VIMAGE
1526 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
1527 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
1528 #endif
1529 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
1530 mtx_lock(&Giant);
1531 error = oid->oid_handler(oid, arg1, arg2, req);
1532 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
1533 mtx_unlock(&Giant);
1534
1535 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
1536
1537 SYSCTL_XLOCK();
1538 oid->oid_running--;
1539 if (oid->oid_running == 0 && (oid->oid_kind & CTLFLAG_DYING) != 0)
1540 wakeup(&oid->oid_running);
1541 return (error);
1542 }
1543
1544 #ifndef _SYS_SYSPROTO_H_
1545 struct sysctl_args {
1546 int *name;
1547 u_int namelen;
1548 void *old;
1549 size_t *oldlenp;
1550 void *new;
1551 size_t newlen;
1552 };
1553 #endif
1554 int
1555 sys___sysctl(struct thread *td, struct sysctl_args *uap)
1556 {
1557 int error, i, name[CTL_MAXNAME];
1558 size_t j;
1559
1560 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
1561 return (EINVAL);
1562
1563 error = copyin(uap->name, &name, uap->namelen * sizeof(int));
1564 if (error)
1565 return (error);
1566
1567 error = userland_sysctl(td, name, uap->namelen,
1568 uap->old, uap->oldlenp, 0,
1569 uap->new, uap->newlen, &j, 0);
1570 if (error && error != ENOMEM)
1571 return (error);
1572 if (uap->oldlenp) {
1573 i = copyout(&j, uap->oldlenp, sizeof(j));
1574 if (i)
1575 return (i);
1576 }
1577 return (error);
1578 }
1579
1580 /*
1581 * This is used from various compatibility syscalls too. That's why name
1582 * must be in kernel space.
1583 */
1584 int
1585 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1586 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
1587 int flags)
1588 {
1589 int error = 0, memlocked;
1590 struct sysctl_req req;
1591
1592 bzero(&req, sizeof req);
1593
1594 req.td = td;
1595 req.flags = flags;
1596
1597 if (oldlenp) {
1598 if (inkernel) {
1599 req.oldlen = *oldlenp;
1600 } else {
1601 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
1602 if (error)
1603 return (error);
1604 }
1605 }
1606 req.validlen = req.oldlen;
1607
1608 if (old) {
1609 if (!useracc(old, req.oldlen, VM_PROT_WRITE))
1610 return (EFAULT);
1611 req.oldptr= old;
1612 }
1613
1614 if (new != NULL) {
1615 if (!useracc(new, newlen, VM_PROT_READ))
1616 return (EFAULT);
1617 req.newlen = newlen;
1618 req.newptr = new;
1619 }
1620
1621 req.oldfunc = sysctl_old_user;
1622 req.newfunc = sysctl_new_user;
1623 req.lock = REQ_UNWIRED;
1624
1625 #ifdef KTRACE
1626 if (KTRPOINT(curthread, KTR_SYSCTL))
1627 ktrsysctl(name, namelen);
1628 #endif
1629
1630 if (req.oldlen > PAGE_SIZE) {
1631 memlocked = 1;
1632 sx_xlock(&sysctlmemlock);
1633 } else
1634 memlocked = 0;
1635 CURVNET_SET(TD_TO_VNET(td));
1636
1637 for (;;) {
1638 req.oldidx = 0;
1639 req.newidx = 0;
1640 SYSCTL_XLOCK();
1641 error = sysctl_root(0, name, namelen, &req);
1642 SYSCTL_XUNLOCK();
1643 if (error != EAGAIN)
1644 break;
1645 kern_yield(PRI_USER);
1646 }
1647
1648 CURVNET_RESTORE();
1649
1650 if (req.lock == REQ_WIRED && req.validlen > 0)
1651 vsunlock(req.oldptr, req.validlen);
1652 if (memlocked)
1653 sx_xunlock(&sysctlmemlock);
1654
1655 if (error && error != ENOMEM)
1656 return (error);
1657
1658 if (retval) {
1659 if (req.oldptr && req.oldidx > req.validlen)
1660 *retval = req.validlen;
1661 else
1662 *retval = req.oldidx;
1663 }
1664 return (error);
1665 }
1666
1667 /*
1668 * Drain into a sysctl struct. The user buffer should be wired if a page
1669 * fault would cause issue.
1670 */
1671 static int
1672 sbuf_sysctl_drain(void *arg, const char *data, int len)
1673 {
1674 struct sysctl_req *req = arg;
1675 int error;
1676
1677 error = SYSCTL_OUT(req, data, len);
1678 KASSERT(error >= 0, ("Got unexpected negative value %d", error));
1679 return (error == 0 ? len : -error);
1680 }
1681
1682 struct sbuf *
1683 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
1684 struct sysctl_req *req)
1685 {
1686
1687 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN);
1688 sbuf_set_drain(s, sbuf_sysctl_drain, req);
1689 return (s);
1690 }
Cache object: 83e45f0c4c791e71b022312569b57a01
|