FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_device.c
1 /* $NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
28 */
29
30 /*
31 * uvm_device.c: the device pager.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.80 2022/07/07 13:27:02 riastradh Exp $");
36
37 #include "opt_uvmhist.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/conf.h>
42 #include <sys/proc.h>
43 #include <sys/kmem.h>
44
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_device.h>
47 #include <uvm/uvm_pmap.h>
48
49 /*
50 * private global data structure
51 *
52 * we keep a list of active device objects in the system.
53 */
54
55 LIST_HEAD(udv_list_struct, uvm_device);
56 static struct udv_list_struct udv_list;
57 static kmutex_t udv_lock __cacheline_aligned;
58
59 /*
60 * functions
61 */
62
63 static void udv_init(void);
64 static void udv_reference(struct uvm_object *);
65 static void udv_detach(struct uvm_object *);
66 static int udv_fault(struct uvm_faultinfo *, vaddr_t,
67 struct vm_page **, int, int, vm_prot_t,
68 int);
69
70 /*
71 * master pager structure
72 */
73
74 const struct uvm_pagerops uvm_deviceops = {
75 .pgo_init = udv_init,
76 .pgo_reference = udv_reference,
77 .pgo_detach = udv_detach,
78 .pgo_fault = udv_fault,
79 };
80
81 /*
82 * the ops!
83 */
84
85 /*
86 * udv_init
87 *
88 * init pager private data structures.
89 */
90
91 static void
92 udv_init(void)
93 {
94 LIST_INIT(&udv_list);
95 mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE);
96 }
97
98 /*
99 * udv_attach
100 *
101 * get a VM object that is associated with a device. allocate a new
102 * one if needed.
103 *
104 * => caller must _not_ already be holding the lock on the uvm_object.
105 * => in fact, nothing should be locked so that we can sleep here.
106 */
107
108 struct uvm_object *
109 udv_attach(dev_t device, vm_prot_t accessprot,
110 voff_t off, /* used only for access check */
111 vsize_t size /* used only for access check */)
112 {
113 struct uvm_device *udv, *lcv;
114 const struct cdevsw *cdev;
115 dev_mmap_t *mapfn;
116
117 UVMHIST_FUNC(__func__);
118 UVMHIST_CALLARGS(maphist, "(device=%#jx)", device,0,0,0);
119
120 KASSERT(size > 0);
121
122 /*
123 * before we do anything, ensure this device supports mmap
124 */
125
126 cdev = cdevsw_lookup(device);
127 if (cdev == NULL) {
128 return NULL;
129 }
130 mapfn = cdev->d_mmap;
131 if (mapfn == NULL || mapfn == nommap) {
132 return NULL;
133 }
134
135 /*
136 * Negative offsets on the object are not allowed, unless the
137 * device has affirmatively set D_NEGOFFSAFE.
138 */
139 if ((cdev->d_flag & D_NEGOFFSAFE) == 0 && off != UVM_UNKNOWN_OFFSET) {
140 if (off < 0)
141 return NULL;
142 #if SIZE_MAX > UINT32_MAX /* XXX -Wtype-limits */
143 if (size > __type_max(voff_t))
144 return NULL;
145 #endif
146 if (off > __type_max(voff_t) - size)
147 return NULL;
148 }
149
150 /*
151 * Check that the specified range of the device allows the
152 * desired protection.
153 *
154 * XXX assumes VM_PROT_* == PROT_*
155 * XXX clobbers off and size, but nothing else here needs them.
156 */
157 do {
158 KASSERTMSG((off % PAGE_SIZE) == 0, "off=%jd", (intmax_t)off);
159 KASSERTMSG(size >= PAGE_SIZE, "size=%"PRIuVSIZE, size);
160 if (cdev_mmap(device, off, accessprot) == -1)
161 return NULL;
162 KASSERT(off <= __type_max(voff_t) - PAGE_SIZE ||
163 (cdev->d_flag & D_NEGOFFSAFE) != 0);
164 if (__predict_false(off > __type_max(voff_t) - PAGE_SIZE)) {
165 /*
166 * off += PAGE_SIZE, with two's-complement
167 * wraparound, or
168 *
169 * off += PAGE_SIZE - 2*(VOFF_MAX + 1).
170 */
171 CTASSERT(MIN_PAGE_SIZE >= 2);
172 off -= __type_max(voff_t);
173 off += PAGE_SIZE - 2;
174 off -= __type_max(voff_t);
175 } else {
176 off += PAGE_SIZE;
177 }
178 size -= PAGE_SIZE;
179 } while (size != 0);
180
181 /*
182 * keep looping until we get it
183 */
184
185 for (;;) {
186
187 /*
188 * first, attempt to find it on the main list
189 */
190
191 mutex_enter(&udv_lock);
192 LIST_FOREACH(lcv, &udv_list, u_list) {
193 if (device == lcv->u_device)
194 break;
195 }
196
197 /*
198 * got it on main list. put a hold on it and unlock udv_lock.
199 */
200
201 if (lcv) {
202
203 /*
204 * if someone else has a hold on it, sleep and start
205 * over again.
206 */
207
208 if (lcv->u_flags & UVM_DEVICE_HOLD) {
209 lcv->u_flags |= UVM_DEVICE_WANTED;
210 UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
211 "udv_attach",0);
212 continue;
213 }
214
215 /* we are now holding it */
216 lcv->u_flags |= UVM_DEVICE_HOLD;
217 mutex_exit(&udv_lock);
218
219 /*
220 * bump reference count, unhold, return.
221 */
222
223 rw_enter(lcv->u_obj.vmobjlock, RW_WRITER);
224 lcv->u_obj.uo_refs++;
225 rw_exit(lcv->u_obj.vmobjlock);
226
227 mutex_enter(&udv_lock);
228 if (lcv->u_flags & UVM_DEVICE_WANTED)
229 wakeup(lcv);
230 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
231 mutex_exit(&udv_lock);
232 return &lcv->u_obj;
233 }
234
235 /*
236 * Did not find it on main list. Need to allocate a new one.
237 */
238
239 mutex_exit(&udv_lock);
240
241 /* Note: both calls may allocate memory and sleep. */
242 udv = kmem_alloc(sizeof(*udv), KM_SLEEP);
243 uvm_obj_init(&udv->u_obj, &uvm_deviceops, true, 1);
244
245 mutex_enter(&udv_lock);
246
247 /*
248 * now we have to double check to make sure no one added it
249 * to the list while we were sleeping...
250 */
251
252 LIST_FOREACH(lcv, &udv_list, u_list) {
253 if (device == lcv->u_device)
254 break;
255 }
256
257 /*
258 * did we lose a race to someone else?
259 * free our memory and retry.
260 */
261
262 if (lcv) {
263 mutex_exit(&udv_lock);
264 uvm_obj_destroy(&udv->u_obj, true);
265 kmem_free(udv, sizeof(*udv));
266 continue;
267 }
268
269 /*
270 * we have it! init the data structures, add to list
271 * and return.
272 */
273
274 udv->u_flags = 0;
275 udv->u_device = device;
276 LIST_INSERT_HEAD(&udv_list, udv, u_list);
277 mutex_exit(&udv_lock);
278 return &udv->u_obj;
279 }
280 /*NOTREACHED*/
281 }
282
283 /*
284 * udv_reference
285 *
286 * add a reference to a VM object. Note that the reference count must
287 * already be one (the passed in reference) so there is no chance of the
288 * udv being released or locked out here.
289 *
290 * => caller must call with object unlocked.
291 */
292
293 static void
294 udv_reference(struct uvm_object *uobj)
295 {
296 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
297
298 rw_enter(uobj->vmobjlock, RW_WRITER);
299 uobj->uo_refs++;
300 UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)",
301 (uintptr_t)uobj, uobj->uo_refs,0,0);
302 rw_exit(uobj->vmobjlock);
303 }
304
305 /*
306 * udv_detach
307 *
308 * remove a reference to a VM object.
309 *
310 * => caller must call with object unlocked and map locked.
311 */
312
313 static void
314 udv_detach(struct uvm_object *uobj)
315 {
316 struct uvm_device *udv = (struct uvm_device *)uobj;
317 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
318
319 /*
320 * loop until done
321 */
322 again:
323 rw_enter(uobj->vmobjlock, RW_WRITER);
324 if (uobj->uo_refs > 1) {
325 uobj->uo_refs--;
326 rw_exit(uobj->vmobjlock);
327 UVMHIST_LOG(maphist," <- done, uobj=%#jx, ref=%jd",
328 (uintptr_t)uobj,uobj->uo_refs,0,0);
329 return;
330 }
331
332 /*
333 * is it being held? if so, wait until others are done.
334 */
335
336 mutex_enter(&udv_lock);
337 if (udv->u_flags & UVM_DEVICE_HOLD) {
338 udv->u_flags |= UVM_DEVICE_WANTED;
339 rw_exit(uobj->vmobjlock);
340 UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
341 goto again;
342 }
343
344 /*
345 * got it! nuke it now.
346 */
347
348 LIST_REMOVE(udv, u_list);
349 if (udv->u_flags & UVM_DEVICE_WANTED)
350 wakeup(udv);
351 mutex_exit(&udv_lock);
352 rw_exit(uobj->vmobjlock);
353
354 uvm_obj_destroy(uobj, true);
355 kmem_free(udv, sizeof(*udv));
356 UVMHIST_LOG(maphist," <- done, freed uobj=%#jx", (uintptr_t)uobj,
357 0, 0, 0);
358 }
359
360 /*
361 * udv_fault: non-standard fault routine for device "pages"
362 *
363 * => rather than having a "get" function, we have a fault routine
364 * since we don't return vm_pages we need full control over the
365 * pmap_enter map in
366 * => all the usual fault data structured are locked by the caller
367 * (i.e. maps(read), amap (if any), uobj)
368 * => on return, we unlock all fault data structures
369 * => flags: PGO_ALLPAGES: get all of the pages
370 * PGO_LOCKED: fault data structures are locked
371 * XXX: currently PGO_LOCKED is always required ... consider removing
372 * it as a flag
373 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
374 */
375
376 static int
377 udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
378 int npages, int centeridx, vm_prot_t access_type,
379 int flags)
380 {
381 struct vm_map_entry *entry = ufi->entry;
382 struct uvm_object *uobj = entry->object.uvm_obj;
383 struct uvm_device *udv = (struct uvm_device *)uobj;
384 vaddr_t curr_va;
385 off_t curr_offset;
386 paddr_t paddr, mdpgno;
387 u_int mmapflags;
388 int lcv, retval;
389 dev_t device;
390 vm_prot_t mapprot;
391 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
392 UVMHIST_LOG(maphist," flags=%#jx", flags,0,0,0);
393
394 /*
395 * we do not allow device mappings to be mapped copy-on-write
396 * so we kill any attempt to do so here.
397 */
398
399 if (UVM_ET_ISCOPYONWRITE(entry)) {
400 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=%#jx)",
401 entry->etype, 0,0,0);
402 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
403 return EIO;
404 }
405
406 /*
407 * get device map function.
408 */
409
410 device = udv->u_device;
411 if (cdevsw_lookup(device) == NULL) {
412 /* XXX This should not happen */
413 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
414 return EIO;
415 }
416
417 /*
418 * now we must determine the offset in udv to use and the VA to
419 * use for pmap_enter. note that we always use orig_map's pmap
420 * for pmap_enter (even if we have a submap). since virtual
421 * addresses in a submap must match the main map, this is ok.
422 */
423
424 /* udv offset = (offset from start of entry) + entry's offset */
425 curr_offset = entry->offset + (vaddr - entry->start);
426 /* pmap va = vaddr (virtual address of pps[0]) */
427 curr_va = vaddr;
428
429 /*
430 * loop over the page range entering in as needed
431 */
432
433 retval = 0;
434 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
435 curr_va += PAGE_SIZE) {
436 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
437 continue;
438
439 if (pps[lcv] == PGO_DONTCARE)
440 continue;
441
442 mdpgno = cdev_mmap(device, curr_offset, access_type);
443 if (mdpgno == -1) {
444 retval = EIO;
445 break;
446 }
447 paddr = pmap_phys_address(mdpgno);
448 mmapflags = pmap_mmap_flags(mdpgno);
449 mapprot = ufi->entry->protection;
450 UVMHIST_LOG(maphist,
451 " MAPPING: device: pm=%#jx, va=%#jx, pa=%#jx, at=%jd",
452 (uintptr_t)ufi->orig_map->pmap, curr_va, paddr, mapprot);
453 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
454 PMAP_CANFAIL | mapprot | mmapflags) != 0) {
455 /*
456 * pmap_enter() didn't have the resource to
457 * enter this mapping. Unlock everything,
458 * wait for the pagedaemon to free up some
459 * pages, and then tell uvm_fault() to start
460 * the fault again.
461 *
462 * XXX Needs some rethinking for the PGO_ALLPAGES
463 * XXX case.
464 */
465 pmap_update(ufi->orig_map->pmap); /* sync what we have so far */
466 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
467 uobj);
468 return ENOMEM;
469 }
470 }
471
472 pmap_update(ufi->orig_map->pmap);
473 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
474 return retval;
475 }
Cache object: 3c7ea47baa54e94794def2736f028c60
|