1 /*-
2 * Copyright (c) 2015 Nuxi, https://nuxi.nl/
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD: releng/11.1/sys/compat/cloudabi/cloudabi_mem.c 316574 2017-04-06 15:10:36Z ed $");
28
29 #include <sys/param.h>
30 #include <sys/mman.h>
31 #include <sys/proc.h>
32 #include <sys/syscallsubr.h>
33
34 #include <contrib/cloudabi/cloudabi_types_common.h>
35
36 #include <compat/cloudabi/cloudabi_proto.h>
37
38 /* Converts CloudABI's memory protection flags to FreeBSD's. */
39 static int
40 convert_mprot(cloudabi_mprot_t in, int *out)
41 {
42
43 /* Unknown protection flags. */
44 if ((in & ~(CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE |
45 CLOUDABI_PROT_READ)) != 0)
46 return (ENOTSUP);
47 /* W^X: Write and exec cannot be enabled at the same time. */
48 if ((in & (CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE)) ==
49 (CLOUDABI_PROT_EXEC | CLOUDABI_PROT_WRITE))
50 return (ENOTSUP);
51
52 *out = 0;
53 if (in & CLOUDABI_PROT_EXEC)
54 *out |= PROT_EXEC;
55 if (in & CLOUDABI_PROT_WRITE)
56 *out |= PROT_WRITE;
57 if (in & CLOUDABI_PROT_READ)
58 *out |= PROT_READ;
59 return (0);
60 }
61
62 int
63 cloudabi_sys_mem_advise(struct thread *td,
64 struct cloudabi_sys_mem_advise_args *uap)
65 {
66 int behav;
67
68 switch (uap->advice) {
69 case CLOUDABI_ADVICE_DONTNEED:
70 behav = MADV_DONTNEED;
71 break;
72 case CLOUDABI_ADVICE_NORMAL:
73 behav = MADV_NORMAL;
74 break;
75 case CLOUDABI_ADVICE_RANDOM:
76 behav = MADV_RANDOM;
77 break;
78 case CLOUDABI_ADVICE_SEQUENTIAL:
79 behav = MADV_SEQUENTIAL;
80 break;
81 case CLOUDABI_ADVICE_WILLNEED:
82 behav = MADV_WILLNEED;
83 break;
84 default:
85 return (EINVAL);
86 }
87
88 return (kern_madvise(td, (uintptr_t)uap->mapping, uap->mapping_len,
89 behav));
90 }
91
92 int
93 cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap)
94 {
95
96 return (kern_mlock(td->td_proc, td->td_ucred,
97 __DECONST(uintptr_t, uap->mapping), uap->mapping_len));
98 }
99
100 int
101 cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap)
102 {
103 int error, flags, prot;
104
105 /* Translate flags. */
106 flags = 0;
107 if (uap->flags & CLOUDABI_MAP_ANON)
108 flags |= MAP_ANON;
109 if (uap->flags & CLOUDABI_MAP_FIXED)
110 flags |= MAP_FIXED;
111 if (uap->flags & CLOUDABI_MAP_PRIVATE)
112 flags |= MAP_PRIVATE;
113 if (uap->flags & CLOUDABI_MAP_SHARED)
114 flags |= MAP_SHARED;
115
116 /* Translate protection. */
117 error = convert_mprot(uap->prot, &prot);
118 if (error != 0)
119 return (error);
120
121 return (kern_mmap(td, (uintptr_t)uap->addr, uap->len, prot, flags,
122 uap->fd, uap->off));
123 }
124
125 int
126 cloudabi_sys_mem_protect(struct thread *td,
127 struct cloudabi_sys_mem_protect_args *uap)
128 {
129 int error, prot;
130
131 /* Translate protection. */
132 error = convert_mprot(uap->prot, &prot);
133 if (error != 0)
134 return (error);
135
136 return (kern_mprotect(td, (uintptr_t)uap->mapping, uap->mapping_len,
137 prot));
138 }
139
140 int
141 cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap)
142 {
143 int flags;
144
145 /* Convert flags. */
146 switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) {
147 case CLOUDABI_MS_ASYNC:
148 flags = MS_ASYNC;
149 break;
150 case CLOUDABI_MS_SYNC:
151 flags = MS_SYNC;
152 break;
153 default:
154 return (EINVAL);
155 }
156 if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0)
157 flags |= MS_INVALIDATE;
158
159 return (kern_msync(td, (uintptr_t)uap->mapping, uap->mapping_len,
160 flags));
161 }
162
163 int
164 cloudabi_sys_mem_unlock(struct thread *td,
165 struct cloudabi_sys_mem_unlock_args *uap)
166 {
167
168 return (kern_munlock(td, __DECONST(uintptr_t, uap->mapping),
169 uap->mapping_len));
170 }
171
172 int
173 cloudabi_sys_mem_unmap(struct thread *td,
174 struct cloudabi_sys_mem_unmap_args *uap)
175 {
176
177 return (kern_munmap(td, (uintptr_t)uap->mapping, uap->mapping_len));
178 }
Cache object: c1b07eb996e258140a7668a2e58fecaa
|