1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department, and code derived from software contributed to
11 * Berkeley by William Jolitz.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: Utah $Hdr: mem.c 1.13 89/10/08$
38 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 /*
45 * Memory special file
46 */
47
48 #include <sys/param.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/ioccom.h>
54 #include <sys/malloc.h>
55 #include <sys/memrange.h>
56 #include <sys/module.h>
57 #include <sys/mutex.h>
58 #include <sys/proc.h>
59 #include <sys/msgbuf.h>
60 #include <sys/systm.h>
61 #include <sys/signalvar.h>
62 #include <sys/uio.h>
63
64 #include <machine/md_var.h>
65 #include <machine/vmparam.h>
66
67 #include <vm/vm.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_page.h>
71
72 #include <machine/memdev.h>
73
74 static void ppc_mrinit(struct mem_range_softc *);
75 static int ppc_mrset(struct mem_range_softc *, struct mem_range_desc *, int *);
76
77 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
78
79 static struct mem_range_ops ppc_mem_range_ops = {
80 ppc_mrinit,
81 ppc_mrset,
82 NULL,
83 NULL
84 };
85 struct mem_range_softc mem_range_softc = {
86 &ppc_mem_range_ops,
87 0, 0, NULL
88 };
89
90 /* ARGSUSED */
91 int
92 memrw(struct cdev *dev, struct uio *uio, int flags)
93 {
94 struct iovec *iov;
95 int error = 0;
96 vm_offset_t va, eva, off, v;
97 vm_prot_t prot;
98 struct vm_page m;
99 vm_page_t marr;
100 vm_size_t cnt;
101 ssize_t orig_resid;
102
103 cnt = 0;
104 error = 0;
105 orig_resid = uio->uio_resid;
106
107 while (uio->uio_resid > 0 && !error) {
108 iov = uio->uio_iov;
109 if (iov->iov_len == 0) {
110 uio->uio_iov++;
111 uio->uio_iovcnt--;
112 if (uio->uio_iovcnt < 0)
113 panic("memrw");
114 continue;
115 }
116 if (dev2unit(dev) == CDEV_MINOR_MEM) {
117 v = uio->uio_offset;
118
119 kmem_direct_mapped: off = v & PAGE_MASK;
120 cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
121 PAGE_MASK);
122 cnt = min(cnt, PAGE_SIZE - off);
123 cnt = min(cnt, iov->iov_len);
124
125 if (mem_valid(v, cnt)) {
126 error = EFAULT;
127 break;
128 }
129
130 if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
131 error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
132 uio);
133 } else {
134 m.phys_addr = trunc_page(v);
135 marr = &m;
136 error = uiomove_fromphys(&marr, off, cnt, uio);
137 }
138 }
139 else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
140 va = uio->uio_offset;
141
142 if (hw_direct_map &&
143 ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end))) {
144 v = DMAP_TO_PHYS(va);
145 goto kmem_direct_mapped;
146 }
147
148 va = trunc_page(uio->uio_offset);
149 eva = round_page(uio->uio_offset
150 + iov->iov_len);
151
152 /*
153 * Make sure that all the pages are currently resident
154 * so that we don't create any zero-fill pages.
155 */
156
157 for (; va < eva; va += PAGE_SIZE) {
158 if (pmap_extract(kernel_pmap, va) == 0) {
159 error = EFAULT;
160 break;
161 }
162 }
163 if (error != 0)
164 break;
165
166 prot = (uio->uio_rw == UIO_READ)
167 ? VM_PROT_READ : VM_PROT_WRITE;
168
169 va = uio->uio_offset;
170 if (((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) &&
171 !kernacc((void *) va, iov->iov_len, prot)) {
172 error = EFAULT;
173 break;
174 }
175
176 error = uiomove((void *)va, iov->iov_len, uio);
177 }
178 }
179 /*
180 * Don't return error if any byte was written. Read and write
181 * can return error only if no i/o was performed.
182 */
183 if (uio->uio_resid != orig_resid)
184 error = 0;
185 return (error);
186 }
187
188 /*
189 * allow user processes to MMAP some memory sections
190 * instead of going through read/write
191 */
192 int
193 memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
194 int prot, vm_memattr_t *memattr)
195 {
196 int i;
197
198 if (dev2unit(dev) == CDEV_MINOR_MEM)
199 *paddr = offset;
200 else
201 return (EFAULT);
202
203 for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
204 if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
205 continue;
206
207 if (offset >= mem_range_softc.mr_desc[i].mr_base &&
208 offset < mem_range_softc.mr_desc[i].mr_base +
209 mem_range_softc.mr_desc[i].mr_len) {
210 switch (mem_range_softc.mr_desc[i].mr_flags &
211 MDF_ATTRMASK) {
212 case MDF_WRITEBACK:
213 *memattr = VM_MEMATTR_WRITE_BACK;
214 break;
215 case MDF_WRITECOMBINE:
216 *memattr = VM_MEMATTR_WRITE_COMBINING;
217 break;
218 case MDF_UNCACHEABLE:
219 *memattr = VM_MEMATTR_UNCACHEABLE;
220 break;
221 case MDF_WRITETHROUGH:
222 *memattr = VM_MEMATTR_WRITE_THROUGH;
223 break;
224 }
225
226 break;
227 }
228 }
229
230 return (0);
231 }
232
233 static void
234 ppc_mrinit(struct mem_range_softc *sc)
235 {
236 sc->mr_cap = 0;
237 sc->mr_ndesc = 8; /* XXX: Should be dynamically expandable */
238 sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc),
239 M_MEMDESC, M_WAITOK | M_ZERO);
240 }
241
242 static int
243 ppc_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
244 {
245 int i;
246
247 switch(*arg) {
248 case MEMRANGE_SET_UPDATE:
249 for (i = 0; i < sc->mr_ndesc; i++) {
250 if (!sc->mr_desc[i].mr_len) {
251 sc->mr_desc[i] = *desc;
252 sc->mr_desc[i].mr_flags |= MDF_ACTIVE;
253 return (0);
254 }
255 if (sc->mr_desc[i].mr_base == desc->mr_base &&
256 sc->mr_desc[i].mr_len == desc->mr_len)
257 return (EEXIST);
258 }
259 return (ENOSPC);
260 case MEMRANGE_SET_REMOVE:
261 for (i = 0; i < sc->mr_ndesc; i++)
262 if (sc->mr_desc[i].mr_base == desc->mr_base &&
263 sc->mr_desc[i].mr_len == desc->mr_len) {
264 bzero(&sc->mr_desc[i], sizeof(sc->mr_desc[i]));
265 return (0);
266 }
267 return (ENOENT);
268 default:
269 return (EOPNOTSUPP);
270 }
271
272 return (0);
273 }
274
275 /*
276 * Operations for changing memory attributes.
277 *
278 * This is basically just an ioctl shim for mem_range_attr_get
279 * and mem_range_attr_set.
280 */
281 int
282 memioctl_md(struct cdev *dev __unused, u_long cmd, caddr_t data, int flags,
283 struct thread *td)
284 {
285 int nd, error = 0;
286 struct mem_range_op *mo = (struct mem_range_op *)data;
287 struct mem_range_desc *md;
288
289 /* is this for us? */
290 if ((cmd != MEMRANGE_GET) &&
291 (cmd != MEMRANGE_SET))
292 return (ENOTTY);
293
294 /* any chance we can handle this? */
295 if (mem_range_softc.mr_op == NULL)
296 return (EOPNOTSUPP);
297
298 /* do we have any descriptors? */
299 if (mem_range_softc.mr_ndesc == 0)
300 return (ENXIO);
301
302 switch (cmd) {
303 case MEMRANGE_GET:
304 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
305 if (nd > 0) {
306 md = (struct mem_range_desc *)
307 malloc(nd * sizeof(struct mem_range_desc),
308 M_MEMDESC, M_WAITOK);
309 error = mem_range_attr_get(md, &nd);
310 if (!error)
311 error = copyout(md, mo->mo_desc,
312 nd * sizeof(struct mem_range_desc));
313 free(md, M_MEMDESC);
314 }
315 else
316 nd = mem_range_softc.mr_ndesc;
317 mo->mo_arg[0] = nd;
318 break;
319
320 case MEMRANGE_SET:
321 md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc),
322 M_MEMDESC, M_WAITOK);
323 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
324 /* clamp description string */
325 md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
326 if (error == 0)
327 error = mem_range_attr_set(md, &mo->mo_arg[0]);
328 free(md, M_MEMDESC);
329 break;
330 }
331 return (error);
332 }
Cache object: 44fc4db08d92a7cadd0048a272891e7f
|