1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/12.0/sys/i386/i386/copyout.c 337051 2018-08-01 18:45:51Z kib $");
34
35 #include <sys/param.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/pcpu.h>
39 #include <sys/proc.h>
40 #include <sys/sched.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_extern.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49
50 #if defined(PAE) || defined(PAE_TABLES)
51 #define KCR3 ((u_int)IdlePDPT)
52 #else
53 #define KCR3 ((u_int)IdlePTD)
54 #endif
55
56 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
57 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
58 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
59 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
60 int fubyte_fast(volatile const void *base, u_int kcr3);
61 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
62 int fuword16_fast(volatile const void *base, u_int kcr3);
63 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
64 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
65 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
66 int subyte_fast(volatile void *base, int val, u_int kcr3);
67 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
68 int suword16_fast(volatile void *base, int val, u_int kcr3);
69 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
70 int suword_fast(volatile void *base, long val, u_int kcr3);
71 static int (*suword_fast_tramp)(volatile void *, long, u_int);
72
73 static int fast_copyout = 1;
74 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
75 &fast_copyout, 0,
76 "");
77
78 void
79 copyout_init_tramp(void)
80 {
81
82 copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
83 (uintptr_t)copyin_fast + setidt_disp);
84 copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
85 (uintptr_t)copyout_fast + setidt_disp);
86 fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
87 (uintptr_t)fubyte_fast + setidt_disp);
88 fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
89 (uintptr_t)fuword16_fast + setidt_disp);
90 fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
91 (uintptr_t)fueword_fast + setidt_disp);
92 subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
93 (uintptr_t)subyte_fast + setidt_disp);
94 suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
95 (uintptr_t)suword16_fast + setidt_disp);
96 suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
97 (uintptr_t)suword_fast + setidt_disp);
98 }
99
100 int
101 cp_slow0(vm_offset_t uva, size_t len, bool write,
102 void (*f)(vm_offset_t, void *), void *arg)
103 {
104 struct pcpu *pc;
105 vm_page_t m[2];
106 pt_entry_t *pte;
107 vm_offset_t kaddr;
108 int error, i, plen;
109 bool sleepable;
110
111 plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
112 MPASS(plen <= nitems(m));
113 error = 0;
114 i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
115 (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
116 m, nitems(m));
117 if (i != plen)
118 return (EFAULT);
119 sched_pin();
120 pc = get_pcpu();
121 if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
122 (curthread->td_pflags & TDP_NOFAULTING) != 0) {
123 sleepable = false;
124 mtx_lock(&pc->pc_copyout_mlock);
125 kaddr = pc->pc_copyout_maddr;
126 } else {
127 sleepable = true;
128 sx_xlock(&pc->pc_copyout_slock);
129 kaddr = pc->pc_copyout_saddr;
130 }
131 for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) {
132 *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(m[i]) |
133 pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m[i]),
134 FALSE);
135 invlpg(kaddr + ptoa(i));
136 }
137 kaddr += uva - trunc_page(uva);
138 f(kaddr, arg);
139 sched_unpin();
140 if (sleepable)
141 sx_xunlock(&pc->pc_copyout_slock);
142 else
143 mtx_unlock(&pc->pc_copyout_mlock);
144 vm_page_unhold_pages(m, plen);
145 return (error);
146 }
147
148 struct copyinstr_arg0 {
149 vm_offset_t kc;
150 size_t len;
151 size_t alen;
152 bool end;
153 };
154
155 static void
156 copyinstr_slow0(vm_offset_t kva, void *arg)
157 {
158 struct copyinstr_arg0 *ca;
159 char c;
160
161 ca = arg;
162 MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
163 while (ca->alen < ca->len && !ca->end) {
164 c = *(char *)(kva + ca->alen);
165 *(char *)ca->kc = c;
166 ca->alen++;
167 ca->kc++;
168 if (c == '\0')
169 ca->end = true;
170 }
171 }
172
173 int
174 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
175 {
176 struct copyinstr_arg0 ca;
177 vm_offset_t uc;
178 size_t plen;
179 int error;
180
181 error = 0;
182 ca.end = false;
183 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
184 plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
185 ca.len = round_page(uc) - uc;
186 if (ca.len == 0)
187 ca.len = PAGE_SIZE;
188 if (plen + ca.len > maxlen)
189 ca.len = maxlen - plen;
190 ca.alen = 0;
191 if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
192 error = EFAULT;
193 break;
194 }
195 }
196 if (!ca.end && plen == maxlen && error == 0)
197 error = ENAMETOOLONG;
198 if (lencopied != NULL)
199 *lencopied = plen;
200 return (error);
201 }
202
203 struct copyin_arg0 {
204 vm_offset_t kc;
205 size_t len;
206 };
207
208 static void
209 copyin_slow0(vm_offset_t kva, void *arg)
210 {
211 struct copyin_arg0 *ca;
212
213 ca = arg;
214 bcopy((void *)kva, (void *)ca->kc, ca->len);
215 }
216
217 int
218 copyin(const void *udaddr, void *kaddr, size_t len)
219 {
220 struct copyin_arg0 ca;
221 vm_offset_t uc;
222 size_t plen;
223
224 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
225 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
226 return (EFAULT);
227 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
228 copyin_fast_tramp(udaddr, kaddr, len, KCR3) == 0))
229 return (0);
230 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
231 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
232 ca.len = round_page(uc) - uc;
233 if (ca.len == 0)
234 ca.len = PAGE_SIZE;
235 if (plen + ca.len > len)
236 ca.len = len - plen;
237 if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
238 return (EFAULT);
239 }
240 return (0);
241 }
242
243 static void
244 copyout_slow0(vm_offset_t kva, void *arg)
245 {
246 struct copyin_arg0 *ca;
247
248 ca = arg;
249 bcopy((void *)ca->kc, (void *)kva, ca->len);
250 }
251
252 int
253 copyout(const void *kaddr, void *udaddr, size_t len)
254 {
255 struct copyin_arg0 ca;
256 vm_offset_t uc;
257 size_t plen;
258
259 if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
260 (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
261 return (EFAULT);
262 if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
263 copyout_fast_tramp(kaddr, udaddr, len, KCR3) == 0))
264 return (0);
265 for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
266 plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
267 ca.len = round_page(uc) - uc;
268 if (ca.len == 0)
269 ca.len = PAGE_SIZE;
270 if (plen + ca.len > len)
271 ca.len = len - plen;
272 if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
273 return (EFAULT);
274 }
275 return (0);
276 }
277
278 /*
279 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
280 * memory.
281 */
282
283 static void
284 fubyte_slow0(vm_offset_t kva, void *arg)
285 {
286
287 *(int *)arg = *(u_char *)kva;
288 }
289
290 int
291 fubyte(volatile const void *base)
292 {
293 int res;
294
295 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
296 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
297 return (-1);
298 if (fast_copyout) {
299 res = fubyte_fast_tramp(base, KCR3);
300 if (res != -1)
301 return (res);
302 }
303 if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
304 &res) != 0)
305 return (-1);
306 return (res);
307 }
308
309 static void
310 fuword16_slow0(vm_offset_t kva, void *arg)
311 {
312
313 *(int *)arg = *(uint16_t *)kva;
314 }
315
316 int
317 fuword16(volatile const void *base)
318 {
319 int res;
320
321 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
322 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
323 return (-1);
324 if (fast_copyout) {
325 res = fuword16_fast_tramp(base, KCR3);
326 if (res != -1)
327 return (res);
328 }
329 if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
330 fuword16_slow0, &res) != 0)
331 return (-1);
332 return (res);
333 }
334
335 static void
336 fueword_slow0(vm_offset_t kva, void *arg)
337 {
338
339 *(uint32_t *)arg = *(uint32_t *)kva;
340 }
341
342 int
343 fueword(volatile const void *base, long *val)
344 {
345 uint32_t res;
346
347 if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
348 (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
349 return (-1);
350 if (fast_copyout) {
351 if (fueword_fast_tramp(base, val, KCR3) == 0)
352 return (0);
353 }
354 if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
355 &res) != 0)
356 return (-1);
357 *val = res;
358 return (0);
359 }
360
361 int
362 fueword32(volatile const void *base, int32_t *val)
363 {
364
365 return (fueword(base, (long *)val));
366 }
367
368 /*
369 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
370 */
371
372 static void
373 subyte_slow0(vm_offset_t kva, void *arg)
374 {
375
376 *(u_char *)kva = *(int *)arg;
377 }
378
379 int
380 subyte(volatile void *base, int byte)
381 {
382
383 if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
384 (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
385 return (-1);
386 if (fast_copyout && subyte_fast_tramp(base, byte, KCR3) == 0)
387 return (0);
388 return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
389 &byte) != 0 ? -1 : 0);
390 }
391
392 static void
393 suword16_slow0(vm_offset_t kva, void *arg)
394 {
395
396 *(int *)kva = *(uint16_t *)arg;
397 }
398
399 int
400 suword16(volatile void *base, int word)
401 {
402
403 if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
404 (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
405 return (-1);
406 if (fast_copyout && suword16_fast_tramp(base, word, KCR3) == 0)
407 return (0);
408 return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
409 suword16_slow0, &word) != 0 ? -1 : 0);
410 }
411
412 static void
413 suword_slow0(vm_offset_t kva, void *arg)
414 {
415
416 *(int *)kva = *(uint32_t *)arg;
417 }
418
419 int
420 suword(volatile void *base, long word)
421 {
422
423 if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
424 (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
425 return (-1);
426 if (fast_copyout && suword_fast_tramp(base, word, KCR3) == 0)
427 return (0);
428 return (cp_slow0((vm_offset_t)base, sizeof(long), true,
429 suword_slow0, &word) != 0 ? -1 : 0);
430 }
431
432 int
433 suword32(volatile void *base, int32_t word)
434 {
435
436 return (suword(base, word));
437 }
438
439 struct casueword_arg0 {
440 uint32_t oldval;
441 uint32_t newval;
442 };
443
444 static void
445 casueword_slow0(vm_offset_t kva, void *arg)
446 {
447 struct casueword_arg0 *ca;
448
449 ca = arg;
450 atomic_fcmpset_int((u_int *)kva, &ca->oldval, ca->newval);
451 }
452
453 int
454 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
455 uint32_t newval)
456 {
457 struct casueword_arg0 ca;
458 int res;
459
460 ca.oldval = oldval;
461 ca.newval = newval;
462 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
463 casueword_slow0, &ca);
464 if (res == 0) {
465 *oldvalp = ca.oldval;
466 return (0);
467 }
468 return (-1);
469 }
470
471 int
472 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
473 {
474 struct casueword_arg0 ca;
475 int res;
476
477 ca.oldval = oldval;
478 ca.newval = newval;
479 res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
480 casueword_slow0, &ca);
481 if (res == 0) {
482 *oldvalp = ca.oldval;
483 return (0);
484 }
485 return (-1);
486 }
Cache object: acd614d8661574605fb8543cee28857b
|