1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Mike Karels at Berkeley Software Design, Inc.
7 *
8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9 * project, to make these variables more userfriendly.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/9.2/sys/kern/kern_sysctl.c 248034 2013-03-08 11:25:52Z marius $");
40
41 #include "opt_capsicum.h"
42 #include "opt_compat.h"
43 #include "opt_ktrace.h"
44
45 #include <sys/param.h>
46 #include <sys/fail.h>
47 #include <sys/systm.h>
48 #include <sys/capability.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/jail.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/sbuf.h>
58 #include <sys/sx.h>
59 #include <sys/sysproto.h>
60 #include <sys/uio.h>
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 #include <net/vnet.h>
66
67 #include <security/mac/mac_framework.h>
68
69 #include <vm/vm.h>
70 #include <vm/vm_extern.h>
71
72 static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
73 static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
74 static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
75
76 /*
77 * The sysctllock protects the MIB tree. It also protects sysctl
78 * contexts used with dynamic sysctls. The sysctl_register_oid() and
79 * sysctl_unregister_oid() routines require the sysctllock to already
80 * be held, so the sysctl_lock() and sysctl_unlock() routines are
81 * provided for the few places in the kernel which need to use that
82 * API rather than using the dynamic API. Use of the dynamic API is
83 * strongly encouraged for most code.
84 *
85 * The sysctlmemlock is used to limit the amount of user memory wired for
86 * sysctl requests. This is implemented by serializing any userland
87 * sysctl requests larger than a single page via an exclusive lock.
88 */
89 static struct sx sysctllock;
90 static struct sx sysctlmemlock;
91
92 #define SYSCTL_XLOCK() sx_xlock(&sysctllock)
93 #define SYSCTL_XUNLOCK() sx_xunlock(&sysctllock)
94 #define SYSCTL_ASSERT_XLOCKED() sx_assert(&sysctllock, SA_XLOCKED)
95 #define SYSCTL_INIT() sx_init(&sysctllock, "sysctl lock")
96 #define SYSCTL_SLEEP(ch, wmesg, timo) \
97 sx_sleep(ch, &sysctllock, 0, wmesg, timo)
98
99 static int sysctl_root(SYSCTL_HANDLER_ARGS);
100
101 struct sysctl_oid_list sysctl__children; /* root list */
102
103 static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
104 int recurse);
105
106 static struct sysctl_oid *
107 sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
108 {
109 struct sysctl_oid *oidp;
110
111 SYSCTL_ASSERT_XLOCKED();
112 SLIST_FOREACH(oidp, list, oid_link) {
113 if (strcmp(oidp->oid_name, name) == 0) {
114 return (oidp);
115 }
116 }
117 return (NULL);
118 }
119
120 /*
121 * Initialization of the MIB tree.
122 *
123 * Order by number in each list.
124 */
125 void
126 sysctl_lock(void)
127 {
128
129 SYSCTL_XLOCK();
130 }
131
132 void
133 sysctl_unlock(void)
134 {
135
136 SYSCTL_XUNLOCK();
137 }
138
139 void
140 sysctl_register_oid(struct sysctl_oid *oidp)
141 {
142 struct sysctl_oid_list *parent = oidp->oid_parent;
143 struct sysctl_oid *p;
144 struct sysctl_oid *q;
145
146 /*
147 * First check if another oid with the same name already
148 * exists in the parent's list.
149 */
150 SYSCTL_ASSERT_XLOCKED();
151 p = sysctl_find_oidname(oidp->oid_name, parent);
152 if (p != NULL) {
153 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
154 p->oid_refcnt++;
155 return;
156 } else {
157 printf("can't re-use a leaf (%s)!\n", p->oid_name);
158 return;
159 }
160 }
161 /*
162 * If this oid has a number OID_AUTO, give it a number which
163 * is greater than any current oid.
164 * NOTE: DO NOT change the starting value here, change it in
165 * <sys/sysctl.h>, and make sure it is at least 256 to
166 * accomodate e.g. net.inet.raw as a static sysctl node.
167 */
168 if (oidp->oid_number == OID_AUTO) {
169 static int newoid = CTL_AUTO_START;
170
171 oidp->oid_number = newoid++;
172 if (newoid == 0x7fffffff)
173 panic("out of oids");
174 }
175 #if 0
176 else if (oidp->oid_number >= CTL_AUTO_START) {
177 /* do not panic; this happens when unregistering sysctl sets */
178 printf("static sysctl oid too high: %d", oidp->oid_number);
179 }
180 #endif
181
182 /*
183 * Insert the oid into the parent's list in order.
184 */
185 q = NULL;
186 SLIST_FOREACH(p, parent, oid_link) {
187 if (oidp->oid_number < p->oid_number)
188 break;
189 q = p;
190 }
191 if (q)
192 SLIST_INSERT_AFTER(q, oidp, oid_link);
193 else
194 SLIST_INSERT_HEAD(parent, oidp, oid_link);
195 }
196
197 void
198 sysctl_unregister_oid(struct sysctl_oid *oidp)
199 {
200 struct sysctl_oid *p;
201 int error;
202
203 SYSCTL_ASSERT_XLOCKED();
204 error = ENOENT;
205 if (oidp->oid_number == OID_AUTO) {
206 error = EINVAL;
207 } else {
208 SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
209 if (p == oidp) {
210 SLIST_REMOVE(oidp->oid_parent, oidp,
211 sysctl_oid, oid_link);
212 error = 0;
213 break;
214 }
215 }
216 }
217
218 /*
219 * This can happen when a module fails to register and is
220 * being unloaded afterwards. It should not be a panic()
221 * for normal use.
222 */
223 if (error)
224 printf("%s: failed to unregister sysctl\n", __func__);
225 }
226
227 /* Initialize a new context to keep track of dynamically added sysctls. */
228 int
229 sysctl_ctx_init(struct sysctl_ctx_list *c)
230 {
231
232 if (c == NULL) {
233 return (EINVAL);
234 }
235
236 /*
237 * No locking here, the caller is responsible for not adding
238 * new nodes to a context until after this function has
239 * returned.
240 */
241 TAILQ_INIT(c);
242 return (0);
243 }
244
245 /* Free the context, and destroy all dynamic oids registered in this context */
246 int
247 sysctl_ctx_free(struct sysctl_ctx_list *clist)
248 {
249 struct sysctl_ctx_entry *e, *e1;
250 int error;
251
252 error = 0;
253 /*
254 * First perform a "dry run" to check if it's ok to remove oids.
255 * XXX FIXME
256 * XXX This algorithm is a hack. But I don't know any
257 * XXX better solution for now...
258 */
259 SYSCTL_XLOCK();
260 TAILQ_FOREACH(e, clist, link) {
261 error = sysctl_remove_oid_locked(e->entry, 0, 0);
262 if (error)
263 break;
264 }
265 /*
266 * Restore deregistered entries, either from the end,
267 * or from the place where error occured.
268 * e contains the entry that was not unregistered
269 */
270 if (error)
271 e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
272 else
273 e1 = TAILQ_LAST(clist, sysctl_ctx_list);
274 while (e1 != NULL) {
275 sysctl_register_oid(e1->entry);
276 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
277 }
278 if (error) {
279 SYSCTL_XUNLOCK();
280 return(EBUSY);
281 }
282 /* Now really delete the entries */
283 e = TAILQ_FIRST(clist);
284 while (e != NULL) {
285 e1 = TAILQ_NEXT(e, link);
286 error = sysctl_remove_oid_locked(e->entry, 1, 0);
287 if (error)
288 panic("sysctl_remove_oid: corrupt tree, entry: %s",
289 e->entry->oid_name);
290 free(e, M_SYSCTLOID);
291 e = e1;
292 }
293 SYSCTL_XUNLOCK();
294 return (error);
295 }
296
297 /* Add an entry to the context */
298 struct sysctl_ctx_entry *
299 sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
300 {
301 struct sysctl_ctx_entry *e;
302
303 SYSCTL_ASSERT_XLOCKED();
304 if (clist == NULL || oidp == NULL)
305 return(NULL);
306 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
307 e->entry = oidp;
308 TAILQ_INSERT_HEAD(clist, e, link);
309 return (e);
310 }
311
312 /* Find an entry in the context */
313 struct sysctl_ctx_entry *
314 sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
315 {
316 struct sysctl_ctx_entry *e;
317
318 SYSCTL_ASSERT_XLOCKED();
319 if (clist == NULL || oidp == NULL)
320 return(NULL);
321 TAILQ_FOREACH(e, clist, link) {
322 if(e->entry == oidp)
323 return(e);
324 }
325 return (e);
326 }
327
328 /*
329 * Delete an entry from the context.
330 * NOTE: this function doesn't free oidp! You have to remove it
331 * with sysctl_remove_oid().
332 */
333 int
334 sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
335 {
336 struct sysctl_ctx_entry *e;
337
338 if (clist == NULL || oidp == NULL)
339 return (EINVAL);
340 SYSCTL_XLOCK();
341 e = sysctl_ctx_entry_find(clist, oidp);
342 if (e != NULL) {
343 TAILQ_REMOVE(clist, e, link);
344 SYSCTL_XUNLOCK();
345 free(e, M_SYSCTLOID);
346 return (0);
347 } else {
348 SYSCTL_XUNLOCK();
349 return (ENOENT);
350 }
351 }
352
353 /*
354 * Remove dynamically created sysctl trees.
355 * oidp - top of the tree to be removed
356 * del - if 0 - just deregister, otherwise free up entries as well
357 * recurse - if != 0 traverse the subtree to be deleted
358 */
359 int
360 sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
361 {
362 int error;
363
364 SYSCTL_XLOCK();
365 error = sysctl_remove_oid_locked(oidp, del, recurse);
366 SYSCTL_XUNLOCK();
367 return (error);
368 }
369
370 int
371 sysctl_remove_name(struct sysctl_oid *parent, const char *name,
372 int del, int recurse)
373 {
374 struct sysctl_oid *p, *tmp;
375 int error;
376
377 error = ENOENT;
378 SYSCTL_XLOCK();
379 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
380 if (strcmp(p->oid_name, name) == 0) {
381 error = sysctl_remove_oid_locked(p, del, recurse);
382 break;
383 }
384 }
385 SYSCTL_XUNLOCK();
386
387 return (error);
388 }
389
390
391 static int
392 sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
393 {
394 struct sysctl_oid *p, *tmp;
395 int error;
396
397 SYSCTL_ASSERT_XLOCKED();
398 if (oidp == NULL)
399 return(EINVAL);
400 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
401 printf("can't remove non-dynamic nodes!\n");
402 return (EINVAL);
403 }
404 /*
405 * WARNING: normal method to do this should be through
406 * sysctl_ctx_free(). Use recursing as the last resort
407 * method to purge your sysctl tree of leftovers...
408 * However, if some other code still references these nodes,
409 * it will panic.
410 */
411 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
412 if (oidp->oid_refcnt == 1) {
413 SLIST_FOREACH_SAFE(p,
414 SYSCTL_CHILDREN(oidp), oid_link, tmp) {
415 if (!recurse)
416 return (ENOTEMPTY);
417 error = sysctl_remove_oid_locked(p, del,
418 recurse);
419 if (error)
420 return (error);
421 }
422 if (del)
423 free(SYSCTL_CHILDREN(oidp), M_SYSCTLOID);
424 }
425 }
426 if (oidp->oid_refcnt > 1 ) {
427 oidp->oid_refcnt--;
428 } else {
429 if (oidp->oid_refcnt == 0) {
430 printf("Warning: bad oid_refcnt=%u (%s)!\n",
431 oidp->oid_refcnt, oidp->oid_name);
432 return (EINVAL);
433 }
434 sysctl_unregister_oid(oidp);
435 if (del) {
436 /*
437 * Wait for all threads running the handler to drain.
438 * This preserves the previous behavior when the
439 * sysctl lock was held across a handler invocation,
440 * and is necessary for module unload correctness.
441 */
442 while (oidp->oid_running > 0) {
443 oidp->oid_kind |= CTLFLAG_DYING;
444 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
445 }
446 if (oidp->oid_descr)
447 free(__DECONST(char *, oidp->oid_descr),
448 M_SYSCTLOID);
449 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
450 free(oidp, M_SYSCTLOID);
451 }
452 }
453 return (0);
454 }
455 /*
456 * Create new sysctls at run time.
457 * clist may point to a valid context initialized with sysctl_ctx_init().
458 */
459 struct sysctl_oid *
460 sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
461 int number, const char *name, int kind, void *arg1, intptr_t arg2,
462 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
463 {
464 struct sysctl_oid *oidp;
465
466 /* You have to hook up somewhere.. */
467 if (parent == NULL)
468 return(NULL);
469 /* Check if the node already exists, otherwise create it */
470 SYSCTL_XLOCK();
471 oidp = sysctl_find_oidname(name, parent);
472 if (oidp != NULL) {
473 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
474 oidp->oid_refcnt++;
475 /* Update the context */
476 if (clist != NULL)
477 sysctl_ctx_entry_add(clist, oidp);
478 SYSCTL_XUNLOCK();
479 return (oidp);
480 } else {
481 SYSCTL_XUNLOCK();
482 printf("can't re-use a leaf (%s)!\n", name);
483 return (NULL);
484 }
485 }
486 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
487 oidp->oid_parent = parent;
488 SLIST_NEXT(oidp, oid_link) = NULL;
489 oidp->oid_number = number;
490 oidp->oid_refcnt = 1;
491 oidp->oid_name = strdup(name, M_SYSCTLOID);
492 oidp->oid_handler = handler;
493 oidp->oid_kind = CTLFLAG_DYN | kind;
494 if ((kind & CTLTYPE) == CTLTYPE_NODE) {
495 /* Allocate space for children */
496 SYSCTL_CHILDREN_SET(oidp, malloc(sizeof(struct sysctl_oid_list),
497 M_SYSCTLOID, M_WAITOK));
498 SLIST_INIT(SYSCTL_CHILDREN(oidp));
499 oidp->oid_arg2 = arg2;
500 } else {
501 oidp->oid_arg1 = arg1;
502 oidp->oid_arg2 = arg2;
503 }
504 oidp->oid_fmt = fmt;
505 if (descr)
506 oidp->oid_descr = strdup(descr, M_SYSCTLOID);
507 /* Update the context, if used */
508 if (clist != NULL)
509 sysctl_ctx_entry_add(clist, oidp);
510 /* Register this oid */
511 sysctl_register_oid(oidp);
512 SYSCTL_XUNLOCK();
513 return (oidp);
514 }
515
516 /*
517 * Rename an existing oid.
518 */
519 void
520 sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
521 {
522 char *newname;
523 char *oldname;
524
525 newname = strdup(name, M_SYSCTLOID);
526 SYSCTL_XLOCK();
527 oldname = __DECONST(char *, oidp->oid_name);
528 oidp->oid_name = newname;
529 SYSCTL_XUNLOCK();
530 free(oldname, M_SYSCTLOID);
531 }
532
533 /*
534 * Reparent an existing oid.
535 */
536 int
537 sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
538 {
539 struct sysctl_oid *oidp;
540
541 SYSCTL_XLOCK();
542 if (oid->oid_parent == parent) {
543 SYSCTL_XUNLOCK();
544 return (0);
545 }
546 oidp = sysctl_find_oidname(oid->oid_name, parent);
547 if (oidp != NULL) {
548 SYSCTL_XUNLOCK();
549 return (EEXIST);
550 }
551 sysctl_unregister_oid(oid);
552 oid->oid_parent = parent;
553 oid->oid_number = OID_AUTO;
554 sysctl_register_oid(oid);
555 SYSCTL_XUNLOCK();
556 return (0);
557 }
558
559 /*
560 * Register the kernel's oids on startup.
561 */
562 SET_DECLARE(sysctl_set, struct sysctl_oid);
563
564 static void
565 sysctl_register_all(void *arg)
566 {
567 struct sysctl_oid **oidp;
568
569 sx_init(&sysctlmemlock, "sysctl mem");
570 SYSCTL_INIT();
571 SYSCTL_XLOCK();
572 SET_FOREACH(oidp, sysctl_set)
573 sysctl_register_oid(*oidp);
574 SYSCTL_XUNLOCK();
575 }
576 SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_ANY, sysctl_register_all, 0);
577
578 /*
579 * "Staff-functions"
580 *
581 * These functions implement a presently undocumented interface
582 * used by the sysctl program to walk the tree, and get the type
583 * so it can print the value.
584 * This interface is under work and consideration, and should probably
585 * be killed with a big axe by the first person who can find the time.
586 * (be aware though, that the proper interface isn't as obvious as it
587 * may seem, there are various conflicting requirements.
588 *
589 * {0,0} printf the entire MIB-tree.
590 * {0,1,...} return the name of the "..." OID.
591 * {0,2,...} return the next OID.
592 * {0,3} return the OID of the name in "new"
593 * {0,4,...} return the kind & format info for the "..." OID.
594 * {0,5,...} return the description the "..." OID.
595 */
596
597 #ifdef SYSCTL_DEBUG
598 static void
599 sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
600 {
601 int k;
602 struct sysctl_oid *oidp;
603
604 SYSCTL_ASSERT_XLOCKED();
605 SLIST_FOREACH(oidp, l, oid_link) {
606
607 for (k=0; k<i; k++)
608 printf(" ");
609
610 printf("%d %s ", oidp->oid_number, oidp->oid_name);
611
612 printf("%c%c",
613 oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
614 oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
615
616 if (oidp->oid_handler)
617 printf(" *Handler");
618
619 switch (oidp->oid_kind & CTLTYPE) {
620 case CTLTYPE_NODE:
621 printf(" Node\n");
622 if (!oidp->oid_handler) {
623 sysctl_sysctl_debug_dump_node(
624 oidp->oid_arg1, i+2);
625 }
626 break;
627 case CTLTYPE_INT: printf(" Int\n"); break;
628 case CTLTYPE_UINT: printf(" u_int\n"); break;
629 case CTLTYPE_LONG: printf(" Long\n"); break;
630 case CTLTYPE_ULONG: printf(" u_long\n"); break;
631 case CTLTYPE_STRING: printf(" String\n"); break;
632 case CTLTYPE_U64: printf(" uint64_t\n"); break;
633 case CTLTYPE_S64: printf(" int64_t\n"); break;
634 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
635 default: printf("\n");
636 }
637
638 }
639 }
640
641 static int
642 sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
643 {
644 int error;
645
646 error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
647 if (error)
648 return (error);
649 SYSCTL_XLOCK();
650 sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
651 SYSCTL_XUNLOCK();
652 return (ENOENT);
653 }
654
655 SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD,
656 0, 0, sysctl_sysctl_debug, "-", "");
657 #endif
658
659 static int
660 sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
661 {
662 int *name = (int *) arg1;
663 u_int namelen = arg2;
664 int error = 0;
665 struct sysctl_oid *oid;
666 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
667 char buf[10];
668
669 SYSCTL_XLOCK();
670 while (namelen) {
671 if (!lsp) {
672 snprintf(buf,sizeof(buf),"%d",*name);
673 if (req->oldidx)
674 error = SYSCTL_OUT(req, ".", 1);
675 if (!error)
676 error = SYSCTL_OUT(req, buf, strlen(buf));
677 if (error)
678 goto out;
679 namelen--;
680 name++;
681 continue;
682 }
683 lsp2 = 0;
684 SLIST_FOREACH(oid, lsp, oid_link) {
685 if (oid->oid_number != *name)
686 continue;
687
688 if (req->oldidx)
689 error = SYSCTL_OUT(req, ".", 1);
690 if (!error)
691 error = SYSCTL_OUT(req, oid->oid_name,
692 strlen(oid->oid_name));
693 if (error)
694 goto out;
695
696 namelen--;
697 name++;
698
699 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
700 break;
701
702 if (oid->oid_handler)
703 break;
704
705 lsp2 = SYSCTL_CHILDREN(oid);
706 break;
707 }
708 lsp = lsp2;
709 }
710 error = SYSCTL_OUT(req, "", 1);
711 out:
712 SYSCTL_XUNLOCK();
713 return (error);
714 }
715
716 /*
717 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
718 * capability mode.
719 */
720 static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_CAPRD,
721 sysctl_sysctl_name, "");
722
723 static int
724 sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
725 int *next, int *len, int level, struct sysctl_oid **oidpp)
726 {
727 struct sysctl_oid *oidp;
728
729 SYSCTL_ASSERT_XLOCKED();
730 *len = level;
731 SLIST_FOREACH(oidp, lsp, oid_link) {
732 *next = oidp->oid_number;
733 *oidpp = oidp;
734
735 if (oidp->oid_kind & CTLFLAG_SKIP)
736 continue;
737
738 if (!namelen) {
739 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
740 return (0);
741 if (oidp->oid_handler)
742 /* We really should call the handler here...*/
743 return (0);
744 lsp = SYSCTL_CHILDREN(oidp);
745 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
746 len, level+1, oidpp))
747 return (0);
748 goto emptynode;
749 }
750
751 if (oidp->oid_number < *name)
752 continue;
753
754 if (oidp->oid_number > *name) {
755 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
756 return (0);
757 if (oidp->oid_handler)
758 return (0);
759 lsp = SYSCTL_CHILDREN(oidp);
760 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
761 next+1, len, level+1, oidpp))
762 return (0);
763 goto next;
764 }
765 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
766 continue;
767
768 if (oidp->oid_handler)
769 continue;
770
771 lsp = SYSCTL_CHILDREN(oidp);
772 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
773 len, level+1, oidpp))
774 return (0);
775 next:
776 namelen = 1;
777 emptynode:
778 *len = level;
779 }
780 return (1);
781 }
782
783 static int
784 sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
785 {
786 int *name = (int *) arg1;
787 u_int namelen = arg2;
788 int i, j, error;
789 struct sysctl_oid *oid;
790 struct sysctl_oid_list *lsp = &sysctl__children;
791 int newoid[CTL_MAXNAME];
792
793 SYSCTL_XLOCK();
794 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
795 SYSCTL_XUNLOCK();
796 if (i)
797 return (ENOENT);
798 error = SYSCTL_OUT(req, newoid, j * sizeof (int));
799 return (error);
800 }
801
802 /*
803 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
804 * capability mode.
805 */
806 static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_CAPRD,
807 sysctl_sysctl_next, "");
808
809 static int
810 name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
811 {
812 struct sysctl_oid *oidp;
813 struct sysctl_oid_list *lsp = &sysctl__children;
814 char *p;
815
816 SYSCTL_ASSERT_XLOCKED();
817
818 for (*len = 0; *len < CTL_MAXNAME;) {
819 p = strsep(&name, ".");
820
821 oidp = SLIST_FIRST(lsp);
822 for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
823 if (oidp == NULL)
824 return (ENOENT);
825 if (strcmp(p, oidp->oid_name) == 0)
826 break;
827 }
828 *oid++ = oidp->oid_number;
829 (*len)++;
830
831 if (name == NULL || *name == '\0') {
832 if (oidpp)
833 *oidpp = oidp;
834 return (0);
835 }
836
837 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
838 break;
839
840 if (oidp->oid_handler)
841 break;
842
843 lsp = SYSCTL_CHILDREN(oidp);
844 }
845 return (ENOENT);
846 }
847
848 static int
849 sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
850 {
851 char *p;
852 int error, oid[CTL_MAXNAME], len = 0;
853 struct sysctl_oid *op = 0;
854
855 if (!req->newlen)
856 return (ENOENT);
857 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
858 return (ENAMETOOLONG);
859
860 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
861
862 error = SYSCTL_IN(req, p, req->newlen);
863 if (error) {
864 free(p, M_SYSCTL);
865 return (error);
866 }
867
868 p [req->newlen] = '\0';
869
870 SYSCTL_XLOCK();
871 error = name2oid(p, oid, &len, &op);
872 SYSCTL_XUNLOCK();
873
874 free(p, M_SYSCTL);
875
876 if (error)
877 return (error);
878
879 error = SYSCTL_OUT(req, oid, len * sizeof *oid);
880 return (error);
881 }
882
883 /*
884 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
885 * capability mode.
886 */
887 SYSCTL_PROC(_sysctl, 3, name2oid,
888 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
889 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
890
891 static int
892 sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
893 {
894 struct sysctl_oid *oid;
895 int error;
896
897 SYSCTL_XLOCK();
898 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
899 if (error)
900 goto out;
901
902 if (oid->oid_fmt == NULL) {
903 error = ENOENT;
904 goto out;
905 }
906 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
907 if (error)
908 goto out;
909 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
910 out:
911 SYSCTL_XUNLOCK();
912 return (error);
913 }
914
915
916 static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
917 sysctl_sysctl_oidfmt, "");
918
919 static int
920 sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
921 {
922 struct sysctl_oid *oid;
923 int error;
924
925 SYSCTL_XLOCK();
926 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
927 if (error)
928 goto out;
929
930 if (oid->oid_descr == NULL) {
931 error = ENOENT;
932 goto out;
933 }
934 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
935 out:
936 SYSCTL_XUNLOCK();
937 return (error);
938 }
939
940 static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_CAPRD,
941 sysctl_sysctl_oiddescr, "");
942
943 /*
944 * Default "handler" functions.
945 */
946
947 /*
948 * Handle an int, signed or unsigned.
949 * Two cases:
950 * a variable: point arg1 at it.
951 * a constant: pass it in arg2.
952 */
953
954 int
955 sysctl_handle_int(SYSCTL_HANDLER_ARGS)
956 {
957 int tmpout, error = 0;
958
959 /*
960 * Attempt to get a coherent snapshot by making a copy of the data.
961 */
962 if (arg1)
963 tmpout = *(int *)arg1;
964 else
965 tmpout = arg2;
966 error = SYSCTL_OUT(req, &tmpout, sizeof(int));
967
968 if (error || !req->newptr)
969 return (error);
970
971 if (!arg1)
972 error = EPERM;
973 else
974 error = SYSCTL_IN(req, arg1, sizeof(int));
975 return (error);
976 }
977
978 /*
979 * Based on on sysctl_handle_int() convert milliseconds into ticks.
980 * Note: this is used by TCP.
981 */
982
983 int
984 sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
985 {
986 int error, s, tt;
987
988 tt = *(int *)arg1;
989 s = (int)((int64_t)tt * 1000 / hz);
990
991 error = sysctl_handle_int(oidp, &s, 0, req);
992 if (error || !req->newptr)
993 return (error);
994
995 tt = (int)((int64_t)s * hz / 1000);
996 if (tt < 1)
997 return (EINVAL);
998
999 *(int *)arg1 = tt;
1000 return (0);
1001 }
1002
1003
1004 /*
1005 * Handle a long, signed or unsigned.
1006 * Two cases:
1007 * a variable: point arg1 at it.
1008 * a constant: pass it in arg2.
1009 */
1010
1011 int
1012 sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1013 {
1014 int error = 0;
1015 long tmplong;
1016 #ifdef SCTL_MASK32
1017 int tmpint;
1018 #endif
1019
1020 /*
1021 * Attempt to get a coherent snapshot by making a copy of the data.
1022 */
1023 if (arg1)
1024 tmplong = *(long *)arg1;
1025 else
1026 tmplong = arg2;
1027 #ifdef SCTL_MASK32
1028 if (req->flags & SCTL_MASK32) {
1029 tmpint = tmplong;
1030 error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1031 } else
1032 #endif
1033 error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1034
1035 if (error || !req->newptr)
1036 return (error);
1037
1038 if (!arg1)
1039 error = EPERM;
1040 #ifdef SCTL_MASK32
1041 else if (req->flags & SCTL_MASK32) {
1042 error = SYSCTL_IN(req, &tmpint, sizeof(int));
1043 *(long *)arg1 = (long)tmpint;
1044 }
1045 #endif
1046 else
1047 error = SYSCTL_IN(req, arg1, sizeof(long));
1048 return (error);
1049 }
1050
1051 /*
1052 * Handle a 64 bit int, signed or unsigned.
1053 * Two cases:
1054 * a variable: point arg1 at it.
1055 * a constant: pass it in arg2.
1056 */
1057 int
1058 sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1059 {
1060 int error = 0;
1061 uint64_t tmpout;
1062
1063 /*
1064 * Attempt to get a coherent snapshot by making a copy of the data.
1065 */
1066 if (arg1)
1067 tmpout = *(uint64_t *)arg1;
1068 else
1069 tmpout = arg2;
1070 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1071
1072 if (error || !req->newptr)
1073 return (error);
1074
1075 if (!arg1)
1076 error = EPERM;
1077 else
1078 error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1079 return (error);
1080 }
1081
1082 /*
1083 * Handle our generic '\0' terminated 'C' string.
1084 * Two cases:
1085 * a variable string: point arg1 at it, arg2 is max length.
1086 * a constant string: point arg1 at it, arg2 is zero.
1087 */
1088
1089 int
1090 sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1091 {
1092 int error=0;
1093 char *tmparg;
1094 size_t outlen;
1095
1096 /*
1097 * Attempt to get a coherent snapshot by copying to a
1098 * temporary kernel buffer.
1099 */
1100 retry:
1101 outlen = strlen((char *)arg1)+1;
1102 tmparg = malloc(outlen, M_SYSCTLTMP, M_WAITOK);
1103
1104 if (strlcpy(tmparg, (char *)arg1, outlen) >= outlen) {
1105 free(tmparg, M_SYSCTLTMP);
1106 goto retry;
1107 }
1108
1109 error = SYSCTL_OUT(req, tmparg, outlen);
1110 free(tmparg, M_SYSCTLTMP);
1111
1112 if (error || !req->newptr)
1113 return (error);
1114
1115 if ((req->newlen - req->newidx) >= arg2) {
1116 error = EINVAL;
1117 } else {
1118 arg2 = (req->newlen - req->newidx);
1119 error = SYSCTL_IN(req, arg1, arg2);
1120 ((char *)arg1)[arg2] = '\0';
1121 }
1122
1123 return (error);
1124 }
1125
1126 /*
1127 * Handle any kind of opaque data.
1128 * arg1 points to it, arg2 is the size.
1129 */
1130
1131 int
1132 sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1133 {
1134 int error, tries;
1135 u_int generation;
1136 struct sysctl_req req2;
1137
1138 /*
1139 * Attempt to get a coherent snapshot, by using the thread
1140 * pre-emption counter updated from within mi_switch() to
1141 * determine if we were pre-empted during a bcopy() or
1142 * copyout(). Make 3 attempts at doing this before giving up.
1143 * If we encounter an error, stop immediately.
1144 */
1145 tries = 0;
1146 req2 = *req;
1147 retry:
1148 generation = curthread->td_generation;
1149 error = SYSCTL_OUT(req, arg1, arg2);
1150 if (error)
1151 return (error);
1152 tries++;
1153 if (generation != curthread->td_generation && tries < 3) {
1154 *req = req2;
1155 goto retry;
1156 }
1157
1158 error = SYSCTL_IN(req, arg1, arg2);
1159
1160 return (error);
1161 }
1162
1163 /*
1164 * Transfer functions to/from kernel space.
1165 * XXX: rather untested at this point
1166 */
1167 static int
1168 sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1169 {
1170 size_t i = 0;
1171
1172 if (req->oldptr) {
1173 i = l;
1174 if (req->oldlen <= req->oldidx)
1175 i = 0;
1176 else
1177 if (i > req->oldlen - req->oldidx)
1178 i = req->oldlen - req->oldidx;
1179 if (i > 0)
1180 bcopy(p, (char *)req->oldptr + req->oldidx, i);
1181 }
1182 req->oldidx += l;
1183 if (req->oldptr && i != l)
1184 return (ENOMEM);
1185 return (0);
1186 }
1187
1188 static int
1189 sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1190 {
1191 if (!req->newptr)
1192 return (0);
1193 if (req->newlen - req->newidx < l)
1194 return (EINVAL);
1195 bcopy((char *)req->newptr + req->newidx, p, l);
1196 req->newidx += l;
1197 return (0);
1198 }
1199
1200 int
1201 kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1202 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1203 {
1204 int error = 0;
1205 struct sysctl_req req;
1206
1207 bzero(&req, sizeof req);
1208
1209 req.td = td;
1210 req.flags = flags;
1211
1212 if (oldlenp) {
1213 req.oldlen = *oldlenp;
1214 }
1215 req.validlen = req.oldlen;
1216
1217 if (old) {
1218 req.oldptr= old;
1219 }
1220
1221 if (new != NULL) {
1222 req.newlen = newlen;
1223 req.newptr = new;
1224 }
1225
1226 req.oldfunc = sysctl_old_kernel;
1227 req.newfunc = sysctl_new_kernel;
1228 req.lock = REQ_UNWIRED;
1229
1230 SYSCTL_XLOCK();
1231 error = sysctl_root(0, name, namelen, &req);
1232 SYSCTL_XUNLOCK();
1233
1234 if (req.lock == REQ_WIRED && req.validlen > 0)
1235 vsunlock(req.oldptr, req.validlen);
1236
1237 if (error && error != ENOMEM)
1238 return (error);
1239
1240 if (retval) {
1241 if (req.oldptr && req.oldidx > req.validlen)
1242 *retval = req.validlen;
1243 else
1244 *retval = req.oldidx;
1245 }
1246 return (error);
1247 }
1248
1249 int
1250 kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1251 void *new, size_t newlen, size_t *retval, int flags)
1252 {
1253 int oid[CTL_MAXNAME];
1254 size_t oidlen, plen;
1255 int error;
1256
1257 oid[0] = 0; /* sysctl internal magic */
1258 oid[1] = 3; /* name2oid */
1259 oidlen = sizeof(oid);
1260
1261 error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1262 (void *)name, strlen(name), &plen, flags);
1263 if (error)
1264 return (error);
1265
1266 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1267 new, newlen, retval, flags);
1268 return (error);
1269 }
1270
1271 /*
1272 * Transfer function to/from user space.
1273 */
1274 static int
1275 sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1276 {
1277 size_t i, len, origidx;
1278 int error;
1279
1280 origidx = req->oldidx;
1281 req->oldidx += l;
1282 if (req->oldptr == NULL)
1283 return (0);
1284 /*
1285 * If we have not wired the user supplied buffer and we are currently
1286 * holding locks, drop a witness warning, as it's possible that
1287 * write operations to the user page can sleep.
1288 */
1289 if (req->lock != REQ_WIRED)
1290 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1291 "sysctl_old_user()");
1292 i = l;
1293 len = req->validlen;
1294 if (len <= origidx)
1295 i = 0;
1296 else {
1297 if (i > len - origidx)
1298 i = len - origidx;
1299 if (req->lock == REQ_WIRED) {
1300 error = copyout_nofault(p, (char *)req->oldptr +
1301 origidx, i);
1302 } else
1303 error = copyout(p, (char *)req->oldptr + origidx, i);
1304 if (error != 0)
1305 return (error);
1306 }
1307 if (i < l)
1308 return (ENOMEM);
1309 return (0);
1310 }
1311
1312 static int
1313 sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1314 {
1315 int error;
1316
1317 if (!req->newptr)
1318 return (0);
1319 if (req->newlen - req->newidx < l)
1320 return (EINVAL);
1321 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1322 "sysctl_new_user()");
1323 error = copyin((char *)req->newptr + req->newidx, p, l);
1324 req->newidx += l;
1325 return (error);
1326 }
1327
1328 /*
1329 * Wire the user space destination buffer. If set to a value greater than
1330 * zero, the len parameter limits the maximum amount of wired memory.
1331 */
1332 int
1333 sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1334 {
1335 int ret;
1336 size_t wiredlen;
1337
1338 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1339 ret = 0;
1340 if (req->lock != REQ_WIRED && req->oldptr &&
1341 req->oldfunc == sysctl_old_user) {
1342 if (wiredlen != 0) {
1343 ret = vslock(req->oldptr, wiredlen);
1344 if (ret != 0) {
1345 if (ret != ENOMEM)
1346 return (ret);
1347 wiredlen = 0;
1348 }
1349 }
1350 req->lock = REQ_WIRED;
1351 req->validlen = wiredlen;
1352 }
1353 return (0);
1354 }
1355
1356 int
1357 sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1358 int *nindx, struct sysctl_req *req)
1359 {
1360 struct sysctl_oid_list *lsp;
1361 struct sysctl_oid *oid;
1362 int indx;
1363
1364 SYSCTL_ASSERT_XLOCKED();
1365 lsp = &sysctl__children;
1366 indx = 0;
1367 while (indx < CTL_MAXNAME) {
1368 SLIST_FOREACH(oid, lsp, oid_link) {
1369 if (oid->oid_number == name[indx])
1370 break;
1371 }
1372 if (oid == NULL)
1373 return (ENOENT);
1374
1375 indx++;
1376 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1377 if (oid->oid_handler != NULL || indx == namelen) {
1378 *noid = oid;
1379 if (nindx != NULL)
1380 *nindx = indx;
1381 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1382 ("%s found DYING node %p", __func__, oid));
1383 return (0);
1384 }
1385 lsp = SYSCTL_CHILDREN(oid);
1386 } else if (indx == namelen) {
1387 *noid = oid;
1388 if (nindx != NULL)
1389 *nindx = indx;
1390 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1391 ("%s found DYING node %p", __func__, oid));
1392 return (0);
1393 } else {
1394 return (ENOTDIR);
1395 }
1396 }
1397 return (ENOENT);
1398 }
1399
1400 /*
1401 * Traverse our tree, and find the right node, execute whatever it points
1402 * to, and return the resulting error code.
1403 */
1404
1405 static int
1406 sysctl_root(SYSCTL_HANDLER_ARGS)
1407 {
1408 struct sysctl_oid *oid;
1409 int error, indx, lvl;
1410
1411 SYSCTL_ASSERT_XLOCKED();
1412
1413 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1414 if (error)
1415 return (error);
1416
1417 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1418 /*
1419 * You can't call a sysctl when it's a node, but has
1420 * no handler. Inform the user that it's a node.
1421 * The indx may or may not be the same as namelen.
1422 */
1423 if (oid->oid_handler == NULL)
1424 return (EISDIR);
1425 }
1426
1427 /* Is this sysctl writable? */
1428 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR))
1429 return (EPERM);
1430
1431 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1432
1433 #ifdef CAPABILITY_MODE
1434 /*
1435 * If the process is in capability mode, then don't permit reading or
1436 * writing unless specifically granted for the node.
1437 */
1438 if (IN_CAPABILITY_MODE(req->td)) {
1439 if (req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD))
1440 return (EPERM);
1441 if (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))
1442 return (EPERM);
1443 }
1444 #endif
1445
1446 /* Is this sysctl sensitive to securelevels? */
1447 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1448 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1449 error = securelevel_gt(req->td->td_ucred, lvl);
1450 if (error)
1451 return (error);
1452 }
1453
1454 /* Is this sysctl writable by only privileged users? */
1455 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1456 int priv;
1457
1458 if (oid->oid_kind & CTLFLAG_PRISON)
1459 priv = PRIV_SYSCTL_WRITEJAIL;
1460 #ifdef VIMAGE
1461 else if ((oid->oid_kind & CTLFLAG_VNET) &&
1462 prison_owns_vnet(req->td->td_ucred))
1463 priv = PRIV_SYSCTL_WRITEJAIL;
1464 #endif
1465 else
1466 priv = PRIV_SYSCTL_WRITE;
1467 error = priv_check(req->td, priv);
1468 if (error)
1469 return (error);
1470 }
1471
1472 if (!oid->oid_handler)
1473 return (EINVAL);
1474
1475 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1476 arg1 = (int *)arg1 + indx;
1477 arg2 -= indx;
1478 } else {
1479 arg1 = oid->oid_arg1;
1480 arg2 = oid->oid_arg2;
1481 }
1482 #ifdef MAC
1483 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1484 req);
1485 if (error != 0)
1486 return (error);
1487 #endif
1488 oid->oid_running++;
1489 SYSCTL_XUNLOCK();
1490
1491 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
1492 mtx_lock(&Giant);
1493 error = oid->oid_handler(oid, arg1, arg2, req);
1494 if (!(oid->oid_kind & CTLFLAG_MPSAFE))
1495 mtx_unlock(&Giant);
1496
1497 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
1498
1499 SYSCTL_XLOCK();
1500 oid->oid_running--;
1501 if (oid->oid_running == 0 && (oid->oid_kind & CTLFLAG_DYING) != 0)
1502 wakeup(&oid->oid_running);
1503 return (error);
1504 }
1505
1506 #ifndef _SYS_SYSPROTO_H_
1507 struct sysctl_args {
1508 int *name;
1509 u_int namelen;
1510 void *old;
1511 size_t *oldlenp;
1512 void *new;
1513 size_t newlen;
1514 };
1515 #endif
1516 int
1517 sys___sysctl(struct thread *td, struct sysctl_args *uap)
1518 {
1519 int error, i, name[CTL_MAXNAME];
1520 size_t j;
1521
1522 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
1523 return (EINVAL);
1524
1525 error = copyin(uap->name, &name, uap->namelen * sizeof(int));
1526 if (error)
1527 return (error);
1528
1529 error = userland_sysctl(td, name, uap->namelen,
1530 uap->old, uap->oldlenp, 0,
1531 uap->new, uap->newlen, &j, 0);
1532 if (error && error != ENOMEM)
1533 return (error);
1534 if (uap->oldlenp) {
1535 i = copyout(&j, uap->oldlenp, sizeof(j));
1536 if (i)
1537 return (i);
1538 }
1539 return (error);
1540 }
1541
1542 /*
1543 * This is used from various compatibility syscalls too. That's why name
1544 * must be in kernel space.
1545 */
1546 int
1547 userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1548 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
1549 int flags)
1550 {
1551 int error = 0, memlocked;
1552 struct sysctl_req req;
1553
1554 bzero(&req, sizeof req);
1555
1556 req.td = td;
1557 req.flags = flags;
1558
1559 if (oldlenp) {
1560 if (inkernel) {
1561 req.oldlen = *oldlenp;
1562 } else {
1563 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
1564 if (error)
1565 return (error);
1566 }
1567 }
1568 req.validlen = req.oldlen;
1569
1570 if (old) {
1571 if (!useracc(old, req.oldlen, VM_PROT_WRITE))
1572 return (EFAULT);
1573 req.oldptr= old;
1574 }
1575
1576 if (new != NULL) {
1577 if (!useracc(new, newlen, VM_PROT_READ))
1578 return (EFAULT);
1579 req.newlen = newlen;
1580 req.newptr = new;
1581 }
1582
1583 req.oldfunc = sysctl_old_user;
1584 req.newfunc = sysctl_new_user;
1585 req.lock = REQ_UNWIRED;
1586
1587 #ifdef KTRACE
1588 if (KTRPOINT(curthread, KTR_SYSCTL))
1589 ktrsysctl(name, namelen);
1590 #endif
1591
1592 if (req.oldlen > PAGE_SIZE) {
1593 memlocked = 1;
1594 sx_xlock(&sysctlmemlock);
1595 } else
1596 memlocked = 0;
1597 CURVNET_SET(TD_TO_VNET(td));
1598
1599 for (;;) {
1600 req.oldidx = 0;
1601 req.newidx = 0;
1602 SYSCTL_XLOCK();
1603 error = sysctl_root(0, name, namelen, &req);
1604 SYSCTL_XUNLOCK();
1605 if (error != EAGAIN)
1606 break;
1607 kern_yield(PRI_USER);
1608 }
1609
1610 CURVNET_RESTORE();
1611
1612 if (req.lock == REQ_WIRED && req.validlen > 0)
1613 vsunlock(req.oldptr, req.validlen);
1614 if (memlocked)
1615 sx_xunlock(&sysctlmemlock);
1616
1617 if (error && error != ENOMEM)
1618 return (error);
1619
1620 if (retval) {
1621 if (req.oldptr && req.oldidx > req.validlen)
1622 *retval = req.validlen;
1623 else
1624 *retval = req.oldidx;
1625 }
1626 return (error);
1627 }
1628
1629 /*
1630 * Drain into a sysctl struct. The user buffer should be wired if a page
1631 * fault would cause issue.
1632 */
1633 static int
1634 sbuf_sysctl_drain(void *arg, const char *data, int len)
1635 {
1636 struct sysctl_req *req = arg;
1637 int error;
1638
1639 error = SYSCTL_OUT(req, data, len);
1640 KASSERT(error >= 0, ("Got unexpected negative value %d", error));
1641 return (error == 0 ? len : -error);
1642 }
1643
1644 struct sbuf *
1645 sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
1646 struct sysctl_req *req)
1647 {
1648
1649 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN);
1650 sbuf_set_drain(s, sbuf_sysctl_drain, req);
1651 return (s);
1652 }
Cache object: a1f0ad6d9d941a985236aa23780a2b78
|