FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/mem.c
1 /*-
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * from: Utah $Hdr: mem.c 1.13 89/10/08$
40 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
41 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.38.2.4 1999/09/05 08:11:12 peter Exp $
42 */
43
44 /*
45 * Memory special file
46 */
47
48 #include "opt_perfmon.h"
49
50 #include <sys/param.h>
51 #include <sys/conf.h>
52 #include <sys/buf.h>
53 #ifdef DEVFS
54 #include <sys/devfsext.h>
55 #endif /* DEVFS */
56 #include <sys/kernel.h>
57 #include <sys/systm.h>
58 #include <sys/uio.h>
59 #include <sys/malloc.h>
60 #include <sys/proc.h>
61
62 #include <machine/cpu.h>
63 #include <machine/random.h>
64 #include <machine/psl.h>
65 #ifdef PERFMON
66 #include <machine/perfmon.h>
67 #endif
68
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/lock.h>
72 #include <vm/vm_prot.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_extern.h>
75
76
77
78 static d_open_t mmopen;
79 static d_close_t mmclose;
80 static d_read_t mmrw;
81 static d_ioctl_t mmioctl;
82 static d_mmap_t memmmap;
83 static d_select_t mmselect;
84
85 #define CDEV_MAJOR 2
86 static struct cdevsw mem_cdevsw =
87 { mmopen, mmclose, mmrw, mmrw, /*2*/
88 mmioctl, nullstop, nullreset, nodevtotty,/* memory */
89 mmselect, memmmap, NULL, "mem", NULL, -1 };
90
91 static caddr_t zbuf;
92
93 #ifdef DEVFS
94 static void *mem_devfs_token;
95 static void *kmem_devfs_token;
96 static void *null_devfs_token;
97 static void *random_devfs_token;
98 static void *urandom_devfs_token;
99 static void *zero_devfs_token;
100 static void *io_devfs_token;
101 #ifdef PERFMON
102 static void *perfmon_devfs_token;
103 #endif
104
105 static void memdevfs_init __P((void));
106
107 static void
108 memdevfs_init()
109 {
110 mem_devfs_token =
111 devfs_add_devswf(&mem_cdevsw, 0, DV_CHR,
112 UID_ROOT, GID_KMEM, 0640, "mem");
113 kmem_devfs_token =
114 devfs_add_devswf(&mem_cdevsw, 1, DV_CHR,
115 UID_ROOT, GID_KMEM, 0640, "kmem");
116 null_devfs_token =
117 devfs_add_devswf(&mem_cdevsw, 2, DV_CHR,
118 UID_ROOT, GID_WHEEL, 0666, "null");
119 random_devfs_token =
120 devfs_add_devswf(&mem_cdevsw, 3, DV_CHR,
121 UID_ROOT, GID_WHEEL, 0644, "random");
122 urandom_devfs_token =
123 devfs_add_devswf(&mem_cdevsw, 4, DV_CHR,
124 UID_ROOT, GID_WHEEL, 0644, "urandom");
125 zero_devfs_token =
126 devfs_add_devswf(&mem_cdevsw, 12, DV_CHR,
127 UID_ROOT, GID_WHEEL, 0666, "zero");
128 io_devfs_token =
129 devfs_add_devswf(&mem_cdevsw, 14, DV_CHR,
130 UID_ROOT, GID_WHEEL, 0600, "io");
131 #ifdef PERFMON
132 perfmon_devfs_token =
133 devfs_add_devswf(&mem_cdevsw, 32, DV_CHR,
134 UID_ROOT, GID_KMEM, 0640, "perfmon");
135 #endif /* PERFMON */
136 }
137 #endif /* DEVFS */
138
139 extern char *ptvmmap; /* poor name! */
140
141 static int
142 mmclose(dev, flags, fmt, p)
143 dev_t dev;
144 int flags;
145 int fmt;
146 struct proc *p;
147 {
148 struct trapframe *fp;
149
150 switch (minor(dev)) {
151 #ifdef PERFMON
152 case 32:
153 return perfmon_close(dev, flags, fmt, p);
154 #endif
155 case 14:
156 fp = (struct trapframe *)curproc->p_md.md_regs;
157 fp->tf_eflags &= ~PSL_IOPL;
158 break;
159 default:
160 break;
161 }
162 return(0);
163 }
164
165 static int
166 mmopen(dev, flags, fmt, p)
167 dev_t dev;
168 int flags;
169 int fmt;
170 struct proc *p;
171 {
172 int error;
173 struct trapframe *fp;
174
175 switch (minor(dev)) {
176 case 32:
177 #ifdef PERFMON
178 return perfmon_open(dev, flags, fmt, p);
179 #else
180 return ENODEV;
181 #endif
182 case 14:
183 error = suser(p->p_ucred, &p->p_acflag);
184 if (error != 0)
185 return (error);
186 if (securelevel > 0)
187 return (EPERM);
188 fp = (struct trapframe *)curproc->p_md.md_regs;
189 fp->tf_eflags |= PSL_IOPL;
190 break;
191 default:
192 break;
193 }
194 return(0);
195 }
196
197 static int
198 mmrw(dev, uio, flags)
199 dev_t dev;
200 struct uio *uio;
201 int flags;
202 {
203 register int o;
204 register u_int c, v;
205 u_int poolsize;
206 register struct iovec *iov;
207 int error = 0;
208 caddr_t buf = NULL;
209
210 while (uio->uio_resid > 0 && error == 0) {
211 iov = uio->uio_iov;
212 if (iov->iov_len == 0) {
213 uio->uio_iov++;
214 uio->uio_iovcnt--;
215 if (uio->uio_iovcnt < 0)
216 panic("mmrw");
217 continue;
218 }
219 switch (minor(dev)) {
220
221 /* minor device 0 is physical memory */
222 case 0:
223 v = uio->uio_offset;
224 pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v,
225 uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE,
226 TRUE);
227 o = (int)uio->uio_offset & PAGE_MASK;
228 c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
229 c = min(c, (u_int)(PAGE_SIZE - o));
230 c = min(c, (u_int)iov->iov_len);
231 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
232 pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap,
233 (vm_offset_t)&ptvmmap[PAGE_SIZE]);
234 continue;
235
236 /* minor device 1 is kernel memory */
237 case 1: {
238 vm_offset_t addr, eaddr;
239 c = iov->iov_len;
240
241 /*
242 * Make sure that all of the pages are currently resident so
243 * that we don't create any zero-fill pages.
244 */
245 addr = trunc_page(uio->uio_offset);
246 eaddr = round_page(uio->uio_offset + c);
247
248 if (addr < (vm_offset_t)VADDR(PTDPTDI, 0))
249 return EFAULT;
250 if (eaddr >= (vm_offset_t)VADDR(APTDPTDI, 0))
251 return EFAULT;
252 for (; addr < eaddr; addr += PAGE_SIZE)
253 if (pmap_extract(kernel_pmap, addr) == 0)
254 return EFAULT;
255
256 if (!kernacc((caddr_t)(int)uio->uio_offset, c,
257 uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
258 return(EFAULT);
259 error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
260 continue;
261 }
262
263 /* minor device 2 is EOF/RATHOLE */
264 case 2:
265 if (uio->uio_rw == UIO_READ)
266 return (0);
267 c = iov->iov_len;
268 break;
269
270 /* minor device 3 (/dev/random) is source of filth on read, rathole on write */
271 case 3:
272 if (uio->uio_rw == UIO_WRITE) {
273 c = iov->iov_len;
274 break;
275 }
276 if (buf == NULL)
277 buf = (caddr_t)
278 malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
279 c = min(iov->iov_len, PAGE_SIZE);
280 poolsize = read_random(buf, c);
281 if (poolsize == 0) {
282 if (buf)
283 free(buf, M_TEMP);
284 return (0);
285 }
286 c = min(c, poolsize);
287 error = uiomove(buf, (int)c, uio);
288 continue;
289
290 /* minor device 4 (/dev/urandom) is source of muck on read, rathole on write */
291 case 4:
292 if (uio->uio_rw == UIO_WRITE) {
293 c = iov->iov_len;
294 break;
295 }
296 if (buf == NULL)
297 buf = (caddr_t)
298 malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
299 c = min(iov->iov_len, PAGE_SIZE);
300 poolsize = read_random_unlimited(buf, c);
301 c = min(c, poolsize);
302 error = uiomove(buf, (int)c, uio);
303 continue;
304
305 /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
306 case 12:
307 if (uio->uio_rw == UIO_WRITE) {
308 c = iov->iov_len;
309 break;
310 }
311 if (zbuf == NULL) {
312 zbuf = (caddr_t)
313 malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
314 bzero(zbuf, PAGE_SIZE);
315 }
316 c = min(iov->iov_len, PAGE_SIZE);
317 error = uiomove(zbuf, (int)c, uio);
318 continue;
319
320 #ifdef notyet
321 /* 386 I/O address space (/dev/ioport[bwl]) is a read/write access to seperate
322 i/o device address bus, different than memory bus. Semantics here are
323 very different than ordinary read/write, as if iov_len is a multiple
324 an implied string move from a single port will be done. Note that lseek
325 must be used to set the port number reliably. */
326 case 14:
327 if (iov->iov_len == 1) {
328 u_char tmp;
329 tmp = inb(uio->uio_offset);
330 error = uiomove (&tmp, iov->iov_len, uio);
331 } else {
332 if (!useracc((caddr_t)iov->iov_base,
333 iov->iov_len, uio->uio_rw))
334 return (EFAULT);
335 insb(uio->uio_offset, iov->iov_base,
336 iov->iov_len);
337 }
338 break;
339 case 15:
340 if (iov->iov_len == sizeof (short)) {
341 u_short tmp;
342 tmp = inw(uio->uio_offset);
343 error = uiomove (&tmp, iov->iov_len, uio);
344 } else {
345 if (!useracc((caddr_t)iov->iov_base,
346 iov->iov_len, uio->uio_rw))
347 return (EFAULT);
348 insw(uio->uio_offset, iov->iov_base,
349 iov->iov_len/ sizeof (short));
350 }
351 break;
352 case 16:
353 if (iov->iov_len == sizeof (long)) {
354 u_long tmp;
355 tmp = inl(uio->uio_offset);
356 error = uiomove (&tmp, iov->iov_len, uio);
357 } else {
358 if (!useracc((caddr_t)iov->iov_base,
359 iov->iov_len, uio->uio_rw))
360 return (EFAULT);
361 insl(uio->uio_offset, iov->iov_base,
362 iov->iov_len/ sizeof (long));
363 }
364 break;
365 #endif
366
367 default:
368 return (ENXIO);
369 }
370 if (error)
371 break;
372 iov->iov_base += c;
373 iov->iov_len -= c;
374 uio->uio_offset += c;
375 uio->uio_resid -= c;
376 }
377 if (buf)
378 free(buf, M_TEMP);
379 return (error);
380 }
381
382
383
384
385 /*******************************************************\
386 * allow user processes to MMAP some memory sections *
387 * instead of going through read/write *
388 \*******************************************************/
389 static int
390 memmmap(dev_t dev, vm_offset_t offset, int nprot)
391 {
392 switch (minor(dev))
393 {
394
395 /* minor device 0 is physical memory */
396 case 0:
397 return i386_btop(offset);
398
399 /* minor device 1 is kernel memory */
400 case 1:
401 return i386_btop(vtophys(offset));
402
403 default:
404 return -1;
405 }
406 }
407
408 /*
409 * Allow userland to select which interrupts will be used in the muck
410 * gathering business.
411 */
412 static int
413 mmioctl(dev, cmd, cmdarg, flags, p)
414 dev_t dev;
415 int cmd;
416 caddr_t cmdarg;
417 int flags;
418 struct proc *p;
419 {
420 static u_int16_t interrupt_allowed = 0;
421 u_int16_t interrupt_mask;
422 int error;
423
424 switch(minor(dev)) {
425 case 3:
426 case 4:
427 break;
428
429 #ifdef PERFMON
430 case 32:
431 return perfmon_ioctl(dev, cmd, cmdarg, flags, p);
432 #endif
433 default:
434 return ENODEV;
435 }
436
437 if (*(u_int16_t *)cmdarg >= 16)
438 return (EINVAL);
439
440 /* Only root can do this */
441 error = suser(p->p_ucred, &p->p_acflag);
442 if (error) {
443 return (error);
444 }
445 interrupt_mask = 1 << *(u_int16_t *)cmdarg;
446
447 switch (cmd) {
448
449 case MEM_SETIRQ:
450 if (!(interrupt_allowed & interrupt_mask)) {
451 disable_intr();
452 interrupt_allowed |= interrupt_mask;
453 sec_intr_handler[*(u_int16_t *)cmdarg] =
454 intr_handler[*(u_int16_t *)cmdarg];
455 intr_handler[*(u_int16_t *)cmdarg] =
456 add_interrupt_randomness;
457 sec_intr_unit[*(u_int16_t *)cmdarg] =
458 intr_unit[*(u_int16_t *)cmdarg];
459 intr_unit[*(u_int16_t *)cmdarg] =
460 *(u_int16_t *)cmdarg;
461 enable_intr();
462 }
463 else return (EPERM);
464 break;
465
466 case MEM_CLEARIRQ:
467 if (interrupt_allowed & interrupt_mask) {
468 disable_intr();
469 interrupt_allowed &= ~(interrupt_mask);
470 intr_handler[*(u_int16_t *)cmdarg] =
471 sec_intr_handler[*(u_int16_t *)cmdarg];
472 intr_unit[*(u_int16_t *)cmdarg] =
473 sec_intr_unit[*(u_int16_t *)cmdarg];
474 enable_intr();
475 }
476 else return (EPERM);
477 break;
478
479 case MEM_RETURNIRQ:
480 *(u_int16_t *)cmdarg = interrupt_allowed;
481 break;
482
483 default:
484 return (ENOTTY);
485 }
486 return (0);
487 }
488
489 int
490 mmselect(dev, rw, p)
491 dev_t dev;
492 int rw;
493 struct proc *p;
494 {
495 switch (minor(dev)) {
496 case 3: /* /dev/random */
497 return random_select(dev, rw, p);
498 case 4: /* /dev/urandom */
499 default:
500 return seltrue(dev, rw, p);
501 }
502 }
503
504 /*
505 * Routine that identifies /dev/mem and /dev/kmem.
506 *
507 * A minimal stub routine can always return 0.
508 */
509 int
510 iskmemdev(dev)
511 dev_t dev;
512 {
513
514 return ((major(dev) == mem_cdevsw.d_maj)
515 && (minor(dev) == 0 || minor(dev) == 1));
516 }
517
518 int
519 iszerodev(dev)
520 dev_t dev;
521 {
522 return ((major(dev) == mem_cdevsw.d_maj)
523 && minor(dev) == 12);
524 }
525
526
527
528 static mem_devsw_installed = 0;
529
530 static void
531 mem_drvinit(void *unused)
532 {
533 dev_t dev;
534
535 if( ! mem_devsw_installed ) {
536 dev = makedev(CDEV_MAJOR, 0);
537 cdevsw_add(&dev,&mem_cdevsw, NULL);
538 mem_devsw_installed = 1;
539 #ifdef DEVFS
540 memdevfs_init();
541 #endif
542 }
543 }
544
545 SYSINIT(memdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,mem_drvinit,NULL)
546
Cache object: ae295ea60c8e85d060882f93136ffcee
|