1 /*-
2 * Copyright (c) 2017 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <linux/slab.h>
31 #include <linux/rcupdate.h>
32 #include <linux/kernel.h>
33 #include <linux/irq_work.h>
34 #include <linux/llist.h>
35
36 #include <sys/param.h>
37 #include <sys/taskqueue.h>
38 #include <vm/uma.h>
39
40 struct linux_kmem_rcu {
41 struct rcu_head rcu_head;
42 struct linux_kmem_cache *cache;
43 };
44
45 struct linux_kmem_cache {
46 uma_zone_t cache_zone;
47 linux_kmem_ctor_t *cache_ctor;
48 unsigned cache_flags;
49 unsigned cache_size;
50 struct llist_head cache_items;
51 struct task cache_task;
52 };
53
54 #define LINUX_KMEM_TO_RCU(c, m) \
55 ((struct linux_kmem_rcu *)((char *)(m) + \
56 (c)->cache_size - sizeof(struct linux_kmem_rcu)))
57
58 #define LINUX_RCU_TO_KMEM(r) \
59 ((void *)((char *)(r) + sizeof(struct linux_kmem_rcu) - \
60 (r)->cache->cache_size))
61
62 static LLIST_HEAD(linux_kfree_async_list);
63
64 static void lkpi_kmem_cache_free_async_fn(void *, int);
65
66 void *
67 lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
68 {
69 return (uma_zalloc_arg(c->cache_zone, c,
70 linux_check_m_flags(flags)));
71 }
72
73 void *
74 lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
75 {
76 return (uma_zalloc_arg(c->cache_zone, c,
77 linux_check_m_flags(flags | M_ZERO)));
78 }
79
80 static int
81 linux_kmem_ctor(void *mem, int size, void *arg, int flags)
82 {
83 struct linux_kmem_cache *c = arg;
84
85 if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
86 struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, mem);
87
88 /* duplicate cache pointer */
89 rcu->cache = c;
90 }
91
92 /* check for constructor */
93 if (likely(c->cache_ctor != NULL))
94 c->cache_ctor(mem);
95
96 return (0);
97 }
98
99 static void
100 linux_kmem_cache_free_rcu_callback(struct rcu_head *head)
101 {
102 struct linux_kmem_rcu *rcu =
103 container_of(head, struct linux_kmem_rcu, rcu_head);
104
105 uma_zfree(rcu->cache->cache_zone, LINUX_RCU_TO_KMEM(rcu));
106 }
107
108 struct linux_kmem_cache *
109 linux_kmem_cache_create(const char *name, size_t size, size_t align,
110 unsigned flags, linux_kmem_ctor_t *ctor)
111 {
112 struct linux_kmem_cache *c;
113
114 c = malloc(sizeof(*c), M_KMALLOC, M_WAITOK);
115
116 if (flags & SLAB_HWCACHE_ALIGN)
117 align = UMA_ALIGN_CACHE;
118 else if (align != 0)
119 align--;
120
121 if (flags & SLAB_TYPESAFE_BY_RCU) {
122 /* make room for RCU structure */
123 size = ALIGN(size, sizeof(void *));
124 size += sizeof(struct linux_kmem_rcu);
125
126 /* create cache_zone */
127 c->cache_zone = uma_zcreate(name, size,
128 linux_kmem_ctor, NULL, NULL, NULL,
129 align, UMA_ZONE_ZINIT);
130 } else {
131 /* make room for async task list items */
132 size = MAX(size, sizeof(struct llist_node));
133
134 /* create cache_zone */
135 c->cache_zone = uma_zcreate(name, size,
136 ctor ? linux_kmem_ctor : NULL, NULL,
137 NULL, NULL, align, 0);
138 }
139
140 c->cache_flags = flags;
141 c->cache_ctor = ctor;
142 c->cache_size = size;
143 init_llist_head(&c->cache_items);
144 TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
145 return (c);
146 }
147
148 static inline void
149 lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
150 {
151 struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
152
153 call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
154 }
155
156 static inline void
157 lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
158 {
159 uma_zfree(c->cache_zone, m);
160 }
161
162 static void
163 lkpi_kmem_cache_free_async_fn(void *context, int pending)
164 {
165 struct linux_kmem_cache *c = context;
166 struct llist_node *freed, *next;
167
168 llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
169 lkpi_kmem_cache_free_sync(c, freed);
170 }
171
172 static inline void
173 lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
174 {
175 if (m == NULL)
176 return;
177
178 llist_add(m, &c->cache_items);
179 taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
180 }
181
182 void
183 lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
184 {
185 if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
186 lkpi_kmem_cache_free_rcu(c, m);
187 else if (unlikely(curthread->td_critnest != 0))
188 lkpi_kmem_cache_free_async(c, m);
189 else
190 lkpi_kmem_cache_free_sync(c, m);
191 }
192
193 void
194 linux_kmem_cache_destroy(struct linux_kmem_cache *c)
195 {
196 if (c == NULL)
197 return;
198
199 if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU)) {
200 /* make sure all free callbacks have been called */
201 rcu_barrier();
202 }
203
204 if (!llist_empty(&c->cache_items))
205 taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
206 taskqueue_drain(linux_irq_work_tq, &c->cache_task);
207 uma_zdestroy(c->cache_zone);
208 free(c, M_KMALLOC);
209 }
210
211 static void
212 linux_kfree_async_fn(void *context, int pending)
213 {
214 struct llist_node *freed;
215
216 while((freed = llist_del_first(&linux_kfree_async_list)) != NULL)
217 kfree(freed);
218 }
219 static struct task linux_kfree_async_task =
220 TASK_INITIALIZER(0, linux_kfree_async_fn, &linux_kfree_async_task);
221
222 void
223 linux_kfree_async(void *addr)
224 {
225 if (addr == NULL)
226 return;
227 llist_add(addr, &linux_kfree_async_list);
228 taskqueue_enqueue(linux_irq_work_tq, &linux_kfree_async_task);
229 }
Cache object: 0701c1713eae88c3e42d1bc95c180ec2
|