FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_hhook.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010,2013 Lawrence Stewart <lstewart@freebsd.org>
5 * Copyright (c) 2010 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * This software was developed by Lawrence Stewart while studying at the Centre
9 * for Advanced Internet Architectures, Swinburne University of Technology,
10 * made possible in part by grants from the FreeBSD Foundation and Cisco
11 * University Research Program Fund at Community Foundation Silicon Valley.
12 *
13 * Portions of this software were developed at the Centre for Advanced
14 * Internet Architectures, Swinburne University of Technology, Melbourne,
15 * Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_hhook.c 326271 2017-11-27 15:20:12Z pfg $");
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/hhook.h>
45 #include <sys/khelp.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/module_khelp.h>
49 #include <sys/osd.h>
50 #include <sys/queue.h>
51 #include <sys/refcount.h>
52 #include <sys/systm.h>
53
54 #include <net/vnet.h>
55
56 struct hhook {
57 hhook_func_t hhk_func;
58 struct helper *hhk_helper;
59 void *hhk_udata;
60 STAILQ_ENTRY(hhook) hhk_next;
61 };
62
63 static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists");
64
65 LIST_HEAD(hhookheadhead, hhook_head);
66 struct hhookheadhead hhook_head_list;
67 VNET_DEFINE(struct hhookheadhead, hhook_vhead_list);
68 #define V_hhook_vhead_list VNET(hhook_vhead_list)
69
70 static struct mtx hhook_head_list_lock;
71 MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock",
72 MTX_DEF);
73
74 /* Protected by hhook_head_list_lock. */
75 static uint32_t n_hhookheads;
76
77 /* Private function prototypes. */
78 static void hhook_head_destroy(struct hhook_head *hhh);
79 void khelp_new_hhook_registered(struct hhook_head *hhh, uint32_t flags);
80
81 #define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock)
82 #define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock)
83 #define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED)
84
85 #define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock")
86 #define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock)
87 #define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock)
88 #define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock)
89 #define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt))
90 #define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt))
91
92 /*
93 * Run all helper hook functions for a given hook point.
94 */
95 void
96 hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd)
97 {
98 struct hhook *hhk;
99 void *hdata;
100 struct rm_priotracker rmpt;
101
102 KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh));
103
104 HHH_RLOCK(hhh, &rmpt);
105 STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) {
106 if (hhk->hhk_helper != NULL &&
107 hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) {
108 hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id);
109 if (hdata == NULL)
110 continue;
111 } else
112 hdata = NULL;
113
114 /*
115 * XXXLAS: We currently ignore the int returned by the hook,
116 * but will likely want to handle it in future to allow hhook to
117 * be used like pfil and effect changes at the hhook calling
118 * site e.g. we could define a new hook type of HHOOK_TYPE_PFIL
119 * and standardise what particular return values mean and set
120 * the context data to pass exactly the same information as pfil
121 * hooks currently receive, thus replicating pfil with hhook.
122 */
123 hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata,
124 ctx_data, hdata, hosd);
125 }
126 HHH_RUNLOCK(hhh, &rmpt);
127 }
128
129 /*
130 * Register a new helper hook function with a helper hook point.
131 */
132 int
133 hhook_add_hook(struct hhook_head *hhh, struct hookinfo *hki, uint32_t flags)
134 {
135 struct hhook *hhk, *tmp;
136 int error;
137
138 error = 0;
139
140 if (hhh == NULL)
141 return (ENOENT);
142
143 hhk = malloc(sizeof(struct hhook), M_HHOOK,
144 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
145
146 if (hhk == NULL)
147 return (ENOMEM);
148
149 hhk->hhk_helper = hki->hook_helper;
150 hhk->hhk_func = hki->hook_func;
151 hhk->hhk_udata = hki->hook_udata;
152
153 HHH_WLOCK(hhh);
154 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
155 if (tmp->hhk_func == hki->hook_func &&
156 tmp->hhk_udata == hki->hook_udata) {
157 /* The helper hook function is already registered. */
158 error = EEXIST;
159 break;
160 }
161 }
162
163 if (!error) {
164 STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next);
165 hhh->hhh_nhooks++;
166 } else
167 free(hhk, M_HHOOK);
168
169 HHH_WUNLOCK(hhh);
170
171 return (error);
172 }
173
174 /*
175 * Register a helper hook function with a helper hook point (including all
176 * virtual instances of the hook point if it is virtualised).
177 *
178 * The logic is unfortunately far more complex than for
179 * hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with
180 * M_WAITOK and thus we cannot call hhook_add_hook() with the
181 * hhook_head_list_lock held.
182 *
183 * The logic assembles an array of hhook_head structs that correspond to the
184 * helper hook point being hooked and bumps the refcount on each (all done with
185 * the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and
186 * hhook_add_hook() is called and the refcount dropped for each hhook_head
187 * struct in the array.
188 */
189 int
190 hhook_add_hook_lookup(struct hookinfo *hki, uint32_t flags)
191 {
192 struct hhook_head **heads_to_hook, *hhh;
193 int error, i, n_heads_to_hook;
194
195 tryagain:
196 error = i = 0;
197 /*
198 * Accessing n_hhookheads without hhook_head_list_lock held opens up a
199 * race with hhook_head_register() which we are unlikely to lose, but
200 * nonetheless have to cope with - hence the complex goto logic.
201 */
202 n_heads_to_hook = n_hhookheads;
203 heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *),
204 M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT);
205 if (heads_to_hook == NULL)
206 return (ENOMEM);
207
208 HHHLIST_LOCK();
209 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
210 if (hhh->hhh_type == hki->hook_type &&
211 hhh->hhh_id == hki->hook_id) {
212 if (i < n_heads_to_hook) {
213 heads_to_hook[i] = hhh;
214 refcount_acquire(&heads_to_hook[i]->hhh_refcount);
215 i++;
216 } else {
217 /*
218 * We raced with hhook_head_register() which
219 * inserted a hhook_head that we need to hook
220 * but did not malloc space for. Abort this run
221 * and try again.
222 */
223 for (i--; i >= 0; i--)
224 refcount_release(&heads_to_hook[i]->hhh_refcount);
225 free(heads_to_hook, M_HHOOK);
226 HHHLIST_UNLOCK();
227 goto tryagain;
228 }
229 }
230 }
231 HHHLIST_UNLOCK();
232
233 for (i--; i >= 0; i--) {
234 if (!error)
235 error = hhook_add_hook(heads_to_hook[i], hki, flags);
236 refcount_release(&heads_to_hook[i]->hhh_refcount);
237 }
238
239 free(heads_to_hook, M_HHOOK);
240
241 return (error);
242 }
243
244 /*
245 * Remove a helper hook function from a helper hook point.
246 */
247 int
248 hhook_remove_hook(struct hhook_head *hhh, struct hookinfo *hki)
249 {
250 struct hhook *tmp;
251
252 if (hhh == NULL)
253 return (ENOENT);
254
255 HHH_WLOCK(hhh);
256 STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
257 if (tmp->hhk_func == hki->hook_func &&
258 tmp->hhk_udata == hki->hook_udata) {
259 STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next);
260 free(tmp, M_HHOOK);
261 hhh->hhh_nhooks--;
262 break;
263 }
264 }
265 HHH_WUNLOCK(hhh);
266
267 return (0);
268 }
269
270 /*
271 * Remove a helper hook function from a helper hook point (including all
272 * virtual instances of the hook point if it is virtualised).
273 */
274 int
275 hhook_remove_hook_lookup(struct hookinfo *hki)
276 {
277 struct hhook_head *hhh;
278
279 HHHLIST_LOCK();
280 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
281 if (hhh->hhh_type == hki->hook_type &&
282 hhh->hhh_id == hki->hook_id)
283 hhook_remove_hook(hhh, hki);
284 }
285 HHHLIST_UNLOCK();
286
287 return (0);
288 }
289
290 /*
291 * Register a new helper hook point.
292 */
293 int
294 hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh,
295 uint32_t flags)
296 {
297 struct hhook_head *tmphhh;
298
299 tmphhh = hhook_head_get(hhook_type, hhook_id);
300
301 if (tmphhh != NULL) {
302 /* Hook point previously registered. */
303 hhook_head_release(tmphhh);
304 return (EEXIST);
305 }
306
307 tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK,
308 M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
309
310 if (tmphhh == NULL)
311 return (ENOMEM);
312
313 tmphhh->hhh_type = hhook_type;
314 tmphhh->hhh_id = hhook_id;
315 tmphhh->hhh_nhooks = 0;
316 STAILQ_INIT(&tmphhh->hhh_hooks);
317 HHH_LOCK_INIT(tmphhh);
318 refcount_init(&tmphhh->hhh_refcount, 1);
319
320 HHHLIST_LOCK();
321 if (flags & HHOOK_HEADISINVNET) {
322 tmphhh->hhh_flags |= HHH_ISINVNET;
323 #ifdef VIMAGE
324 KASSERT(curvnet != NULL, ("curvnet is NULL"));
325 tmphhh->hhh_vid = (uintptr_t)curvnet;
326 LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext);
327 #endif
328 }
329 LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next);
330 n_hhookheads++;
331 HHHLIST_UNLOCK();
332
333 khelp_new_hhook_registered(tmphhh, flags);
334
335 if (hhh != NULL)
336 *hhh = tmphhh;
337 else
338 refcount_release(&tmphhh->hhh_refcount);
339
340 return (0);
341 }
342
343 static void
344 hhook_head_destroy(struct hhook_head *hhh)
345 {
346 struct hhook *tmp, *tmp2;
347
348 HHHLIST_LOCK_ASSERT();
349 KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0"));
350
351 LIST_REMOVE(hhh, hhh_next);
352 #ifdef VIMAGE
353 if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET)
354 LIST_REMOVE(hhh, hhh_vnext);
355 #endif
356 HHH_WLOCK(hhh);
357 STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2)
358 free(tmp, M_HHOOK);
359 HHH_WUNLOCK(hhh);
360 HHH_LOCK_DESTROY(hhh);
361 free(hhh, M_HHOOK);
362 n_hhookheads--;
363 }
364
365 /*
366 * Remove a helper hook point.
367 */
368 int
369 hhook_head_deregister(struct hhook_head *hhh)
370 {
371 int error;
372
373 error = 0;
374
375 HHHLIST_LOCK();
376 if (hhh == NULL)
377 error = ENOENT;
378 else if (hhh->hhh_refcount > 1)
379 error = EBUSY;
380 else
381 hhook_head_destroy(hhh);
382 HHHLIST_UNLOCK();
383
384 return (error);
385 }
386
387 /*
388 * Remove a helper hook point via a hhook_head lookup.
389 */
390 int
391 hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id)
392 {
393 struct hhook_head *hhh;
394 int error;
395
396 hhh = hhook_head_get(hhook_type, hhook_id);
397 error = hhook_head_deregister(hhh);
398
399 if (error == EBUSY)
400 hhook_head_release(hhh);
401
402 return (error);
403 }
404
405 /*
406 * Lookup and return the hhook_head struct associated with the specified type
407 * and id, or NULL if not found. If found, the hhook_head's refcount is bumped.
408 */
409 struct hhook_head *
410 hhook_head_get(int32_t hhook_type, int32_t hhook_id)
411 {
412 struct hhook_head *hhh;
413
414 HHHLIST_LOCK();
415 LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
416 if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) {
417 #ifdef VIMAGE
418 if (hhook_head_is_virtualised(hhh) ==
419 HHOOK_HEADISINVNET) {
420 KASSERT(curvnet != NULL, ("curvnet is NULL"));
421 if (hhh->hhh_vid != (uintptr_t)curvnet)
422 continue;
423 }
424 #endif
425 refcount_acquire(&hhh->hhh_refcount);
426 break;
427 }
428 }
429 HHHLIST_UNLOCK();
430
431 return (hhh);
432 }
433
434 void
435 hhook_head_release(struct hhook_head *hhh)
436 {
437
438 refcount_release(&hhh->hhh_refcount);
439 }
440
441 /*
442 * Check the hhook_head private flags and return the appropriate public
443 * representation of the flag to the caller. The function is implemented in a
444 * way that allows us to cope with other subsystems becoming virtualised in the
445 * future.
446 */
447 uint32_t
448 hhook_head_is_virtualised(struct hhook_head *hhh)
449 {
450 uint32_t ret;
451
452 ret = 0;
453
454 if (hhh != NULL) {
455 if (hhh->hhh_flags & HHH_ISINVNET)
456 ret = HHOOK_HEADISINVNET;
457 }
458
459 return (ret);
460 }
461
462 uint32_t
463 hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id)
464 {
465 struct hhook_head *hhh;
466 uint32_t ret;
467
468 hhh = hhook_head_get(hook_type, hook_id);
469
470 if (hhh == NULL)
471 return (0);
472
473 ret = hhook_head_is_virtualised(hhh);
474 hhook_head_release(hhh);
475
476 return (ret);
477 }
478
479 /*
480 * Vnet created and being initialised.
481 */
482 static void
483 hhook_vnet_init(const void *unused __unused)
484 {
485
486 LIST_INIT(&V_hhook_vhead_list);
487 }
488
489 /*
490 * Vnet being torn down and destroyed.
491 */
492 static void
493 hhook_vnet_uninit(const void *unused __unused)
494 {
495 struct hhook_head *hhh, *tmphhh;
496
497 /*
498 * If subsystems which export helper hook points use the hhook KPI
499 * correctly, the loop below should have no work to do because the
500 * subsystem should have already called hhook_head_deregister().
501 */
502 HHHLIST_LOCK();
503 LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) {
504 printf("%s: hhook_head type=%d, id=%d cleanup required\n",
505 __func__, hhh->hhh_type, hhh->hhh_id);
506 hhook_head_destroy(hhh);
507 }
508 HHHLIST_UNLOCK();
509 }
510
511
512 /*
513 * When a vnet is created and being initialised, init the V_hhook_vhead_list.
514 */
515 VNET_SYSINIT(hhook_vnet_init, SI_SUB_INIT_IF, SI_ORDER_FIRST,
516 hhook_vnet_init, NULL);
517
518 /*
519 * The hhook KPI provides a mechanism for subsystems which export helper hook
520 * points to clean up on vnet tear down, but in case the KPI is misused,
521 * provide a function to clean up and free memory for a vnet being destroyed.
522 */
523 VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
524 hhook_vnet_uninit, NULL);
Cache object: 147a227673d4ac78bb2675d0a1af40e1
|