1 /*
2 * Mach Operating System
3 * Copyright (c) 1991 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie the
24 * rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: xmm_buffer.c,v $
29 * Revision 2.2 92/03/10 16:28:53 jsb
30 * Merged in norma branch changes as of NORMA_MK7.
31 * [92/03/09 12:51:00 jsb]
32 *
33 * Revision 2.1.2.5 92/02/21 11:25:30 jsb
34 * Let xmm_kobj_link handle multiple init detection.
35 * [92/02/18 08:04:06 jsb]
36 *
37 * Explicitly provide name parameter to xmm_decl macro.
38 * Changed debugging printf. Changed termination logic.
39 * [92/02/16 15:18:10 jsb]
40 *
41 * First real implementation.
42 * [92/02/09 14:17:44 jsb]
43 *
44 * Revision 2.1.2.3 92/01/21 21:53:42 jsb
45 * De-linted. Supports new (dlb) memory object routines.
46 * Supports arbitrary reply ports to lock_request, etc.
47 * Converted mach_port_t (and port_t) to ipc_port_t.
48 * [92/01/20 17:18:38 jsb]
49 *
50 * Revision 2.1.2.2 92/01/03 16:38:41 jsb
51 * First checkin.
52 * [91/12/31 17:26:56 jsb]
53 *
54 * Revision 2.1.2.1 92/01/03 08:57:47 jsb
55 * First NORMA branch checkin.
56 *
57 */
58 /*
59 * File: xmm_buffer.c
60 * Author: Joseph S. Barrera III
61 * Date: 1991
62 *
63 * Xmm layer which buffers a small amount of data_written data.
64 */
65
66 #ifdef KERNEL
67 #include <kern/queue.h>
68 #include <norma/xmm_obj.h>
69 #include <mach/vm_param.h>
70 #include <vm/vm_fault.h>
71 #else KERNEL
72 #include <xmm_obj.h>
73 #endif KERNEL
74
75 #define dprintf xmm_buffer_dprintf
76
77 typedef struct buffer *buffer_t;
78 #define BUFFER_NULL ((buffer_t) 0)
79
80 struct buffer {
81 queue_chain_t lruq;
82 queue_chain_t mobjq;
83 vm_map_copy_t copy;
84 xmm_obj_t mobj;
85 vm_offset_t offset;
86 buffer_t next_free;
87 };
88
89 struct mobj {
90 struct xmm_obj obj;
91 queue_head_t buffers;
92 boolean_t ready;
93 };
94
95 #undef KOBJ
96 #define KOBJ ((struct mobj *) kobj)
97
98 #define m_buffer_copy m_interpose_copy
99 #define m_buffer_data_unlock m_interpose_data_unlock
100 #define m_buffer_lock_completed m_interpose_lock_completed
101 #define m_buffer_supply_completed m_interpose_supply_completed
102 #define m_buffer_data_return m_interpose_data_return
103 #define m_buffer_change_completed m_interpose_change_completed
104
105 #define k_buffer_data_unavailable k_interpose_data_unavailable
106 #define k_buffer_get_attributes k_interpose_get_attributes
107 #define k_buffer_lock_request k_interpose_lock_request
108 #define k_buffer_data_error k_interpose_data_error
109 #define k_buffer_destroy k_interpose_destroy
110 #define k_buffer_data_supply k_interpose_data_supply
111
112 xmm_decl(buffer, "buffer", sizeof(struct mobj));
113
114 #define XMM_BUFFER_COUNT 16
115
116 struct buffer xmm_buffers[XMM_BUFFER_COUNT];
117 buffer_t xmm_buffer_free_list;
118 queue_head_t xmm_buffer_lru;
119
120 buffer_t
121 xmm_buffer_alloc()
122 {
123 register buffer_t buffer;
124 kern_return_t kr;
125 xmm_obj_t mobj;
126
127 /*
128 * First check the free list.
129 */
130 buffer = xmm_buffer_free_list;
131 if (buffer != BUFFER_NULL) {
132 xmm_buffer_free_list = buffer->next_free;
133 return buffer;
134 }
135
136 /*
137 * There's nothing on the free list, so take the oldest element
138 * from the lru queue, if any.
139 */
140 if (queue_empty(&xmm_buffer_lru)) {
141 /*
142 * This can happen if all the buffers are being written out.
143 */
144 return BUFFER_NULL;
145 }
146
147 /*
148 * Remove buffer off of its obj queue as well, and write out its data.
149 *
150 * XXX
151 * Is it right for the caller to have to wait for someone else's
152 * data write to be processed?
153 */
154 queue_remove_first(&xmm_buffer_lru, buffer, buffer_t, lruq);
155 assert(buffer != BUFFER_NULL);
156 mobj = buffer->mobj;
157 queue_remove(&MOBJ->buffers, buffer, buffer_t, mobjq);
158 kr = M_DATA_WRITE(mobj, mobj, buffer->offset,
159 (vm_offset_t) buffer->copy, PAGE_SIZE);
160 if (kr != KERN_SUCCESS) {
161 /*
162 * XXX
163 * What do we do here? (eternal buffering problem)
164 */
165 printf("xmm_buffer_alloc: kr=%d/0x%x\n", kr, kr);
166 }
167
168 /*
169 * The buffer is now free. Return it.
170 */
171 #if 666
172 buffer->mobj = (xmm_obj_t) 0xdd66dd66;
173 buffer->offset = 0xcccccccc;
174 #endif
175 return buffer;
176 }
177
178 void
179 xmm_buffer_free(buffer)
180 register buffer_t buffer;
181 {
182 buffer->next_free = xmm_buffer_free_list;
183 xmm_buffer_free_list = buffer;
184 }
185
186 xmm_buffer_init()
187 {
188 int i;
189 buffer_t buffer;
190
191 queue_init(&xmm_buffer_lru);
192 for (i = 0; i < XMM_BUFFER_COUNT; i++) {
193 buffer = &xmm_buffers[i];
194 queue_init(&buffer->mobjq);
195 queue_init(&buffer->lruq);
196 xmm_buffer_free(buffer);
197 }
198 }
199
200 kern_return_t
201 xmm_buffer_create(old_mobj, new_mobj)
202 xmm_obj_t old_mobj;
203 xmm_obj_t *new_mobj;
204 {
205 xmm_obj_t mobj;
206 kern_return_t kr;
207
208 kr = xmm_obj_allocate(&buffer_class, old_mobj, &mobj);
209 if (kr != KERN_SUCCESS) {
210 return kr;
211 }
212 queue_init(&MOBJ->buffers);
213 MOBJ->ready = FALSE;
214 *new_mobj = mobj;
215 return KERN_SUCCESS;
216 }
217
218 kern_return_t
219 m_buffer_init(mobj, k_kobj, pagesize, internal, size)
220 xmm_obj_t mobj;
221 xmm_obj_t k_kobj;
222 vm_size_t pagesize;
223 boolean_t internal;
224 vm_size_t size;
225 {
226 #ifdef lint
227 M_INIT(mobj, k_kobj, pagesize, internal, size);
228 #endif lint
229 assert(pagesize == PAGE_SIZE);
230 xmm_kobj_link(mobj, k_kobj);
231 M_INIT(mobj, mobj, pagesize, internal, size);
232 return KERN_SUCCESS;
233 }
234
235 kern_return_t
236 m_buffer_terminate(mobj, kobj)
237 xmm_obj_t mobj;
238 xmm_obj_t kobj;
239 {
240 buffer_t buffer;
241 kern_return_t kr;
242 vm_offset_t offset;
243 vm_map_copy_t copy;
244
245 #ifdef lint
246 M_TERMINATE(mobj, kobj);
247 #endif lint
248 dprintf("xmm_buffer_terminate\n");
249 while (! queue_empty(&MOBJ->buffers)) {
250 queue_remove_first(&MOBJ->buffers, buffer, buffer_t, mobjq);
251 assert(buffer != BUFFER_NULL);
252 queue_remove(&xmm_buffer_lru, buffer, buffer_t, lruq);
253 offset = buffer->offset;
254 copy = buffer->copy;
255 dprintf("dealloc 0x%x copy 0x%x\n", offset, copy);
256 xmm_buffer_free(buffer);
257 kr = M_DATA_WRITE(mobj, kobj, offset, (vm_offset_t) copy,
258 PAGE_SIZE);
259 if (kr != KERN_SUCCESS) {
260 /*
261 * XXX
262 * What do we do here? (eternal buffering problem)
263 */
264 printf("xmm_buffer_terminate: kr=%d/0x%x\n", kr, kr);
265 }
266 }
267 dprintf("terminate done\n");
268 return M_TERMINATE(mobj, kobj);
269 }
270
271 void
272 m_buffer_deallocate(mobj)
273 xmm_obj_t mobj;
274 {
275 }
276
277 buffer_t
278 xmm_buffer_lookup(mobj, offset)
279 xmm_obj_t mobj;
280 vm_offset_t offset;
281 {
282 buffer_t buffer;
283
284 /*
285 * Search through buffers associated with this mobj.
286 * There are typically very few buffers associated
287 * with any given object, so it's not worth having
288 * a hash table or anything tricky.
289 */
290 queue_iterate(&MOBJ->buffers, buffer, buffer_t, mobjq) {
291 if (buffer->offset == offset) {
292 return buffer;
293 }
294 }
295 return BUFFER_NULL;
296 }
297
298 vm_page_t
299 xmm_buffer_find_page(object, offset)
300 vm_object_t object;
301 vm_offset_t offset;
302 {
303 vm_page_t m;
304
305 /*
306 * Try to find the page of data.
307 */
308 vm_object_lock(object);
309 vm_object_paging_begin(object);
310 m = vm_page_lookup(object, offset);
311 if ((m != VM_PAGE_NULL) && !m->busy && !m->fictitious &&
312 !m->absent && !m->error) {
313 } else {
314 vm_prot_t result_prot;
315 vm_page_t top_page;
316 kern_return_t kr;
317
318 for (;;) {
319 result_prot = VM_PROT_READ;
320 kr = vm_fault_page(object, offset,
321 VM_PROT_READ, FALSE, FALSE,
322 &result_prot, &m, &top_page,
323 FALSE, (void (*)()) 0);
324 if (kr == VM_FAULT_MEMORY_SHORTAGE) {
325 VM_PAGE_WAIT((void (*)()) 0);
326 vm_object_lock(object);
327 vm_object_paging_begin(object);
328 continue;
329 }
330 if (kr != VM_FAULT_SUCCESS) {
331 /* XXX what about data_error? */
332 vm_object_lock(object);
333 vm_object_paging_begin(object);
334 continue;
335 }
336 if (top_page != VM_PAGE_NULL) {
337 vm_object_lock(object);
338 VM_PAGE_FREE(top_page);
339 vm_object_paging_end(object);
340 vm_object_unlock(object);
341 }
342 break;
343 }
344 }
345 assert(m);
346 assert(! m->busy);
347 vm_object_paging_end(object);
348 vm_object_unlock(object);
349 return m;
350 }
351
352 vm_map_copy_t
353 xmm_buffer_copy(old_copy)
354 vm_map_copy_t old_copy;
355 {
356 vm_map_copy_t new_copy;
357 vm_page_t old_m, new_m;
358 extern zone_t vm_map_copy_zone;
359
360 dprintf("xmm_buffer_copy 0x%x type %d\n", old_copy, old_copy->type);
361
362 /*
363 * Allocate a new copy object.
364 */
365 new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
366 if (new_copy == VM_MAP_COPY_NULL) {
367 panic("xmm_buffer_copy: zalloc");
368 }
369 new_copy->type = VM_MAP_COPY_PAGE_LIST;
370 new_copy->cpy_npages = 1;
371 new_copy->offset = 0;
372 new_copy->size = PAGE_SIZE;
373 new_copy->cpy_cont = ((kern_return_t (*)()) 0);
374 new_copy->cpy_cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL;
375
376 /*
377 * Allocate a new page and insert it into new copy object.
378 */
379 new_m = vm_page_grab();
380 if (new_m == VM_PAGE_NULL) {
381 panic("xmm_buffer_copy: vm_page_grab");
382 }
383 new_copy->cpy_page_list[0] = new_m;
384
385 /*
386 * Find old page.
387 */
388 assert(old_copy->size == PAGE_SIZE);
389 assert(old_copy->offset == 0);
390 if (old_copy->type == VM_MAP_COPY_PAGE_LIST) {
391 old_m = old_copy->cpy_page_list[0];
392 } else {
393 assert(old_copy->type == VM_MAP_COPY_OBJECT);
394 old_m = xmm_buffer_find_page(old_copy->cpy_object, 0);
395 }
396
397 /*
398 * Copy old page into new, and return new copy object.
399 */
400 pmap_copy_page(old_m->phys_addr, new_m->phys_addr);
401 return new_copy;
402 }
403
404 kern_return_t
405 m_buffer_data_request(mobj, kobj, offset, length, desired_access)
406 xmm_obj_t mobj;
407 xmm_obj_t kobj;
408 vm_offset_t offset;
409 vm_size_t length;
410 vm_prot_t desired_access;
411 {
412 buffer_t buffer;
413 vm_map_copy_t copy;
414
415 #ifdef lint
416 M_DATA_REQUEST(mobj, kobj, offset, length, desired_access);
417 #endif lint
418 /*
419 * If this page was not buffered, then pass the data request through.
420 */
421 buffer = xmm_buffer_lookup(mobj, offset);
422 if (buffer == BUFFER_NULL) {
423 return M_DATA_REQUEST(mobj, kobj, offset, length,
424 desired_access);
425 }
426
427 /*
428 * The page was buffered. Move it to the front of the lru queue.
429 */
430 queue_remove(&xmm_buffer_lru, buffer, buffer_t, lruq);
431 queue_enter(&xmm_buffer_lru, buffer, buffer_t, lruq);
432
433 /*
434 * This copy is unfortunate and could be avoided.
435 */
436 copy = xmm_buffer_copy(buffer->copy);
437
438 /*
439 * Return data.
440 */
441 return K_DATA_SUPPLY(kobj, offset, (vm_offset_t) copy, length,
442 VM_PROT_NONE, FALSE, XMM_REPLY_NULL);
443 }
444
445 /*
446 * Write data through to memory manager;
447 * discard any corresponding buffered data.
448 */
449 kern_return_t
450 m_buffer_data_write(mobj, kobj, offset, data, length)
451 xmm_obj_t mobj;
452 xmm_obj_t kobj;
453 vm_offset_t offset;
454 vm_offset_t data;
455 vm_size_t length;
456 {
457 buffer_t buffer;
458
459 #ifdef lint
460 M_DATA_WRITE(mobj, kobj, offset, data, length);
461 #endif lint
462 /*
463 * Find and deallocate old data, if any.
464 */
465 buffer = xmm_buffer_lookup(mobj, offset);
466 if (buffer != BUFFER_NULL) {
467 vm_map_copy_t copy = buffer->copy;
468 queue_remove(&MOBJ->buffers, buffer, buffer_t, mobjq);
469 queue_remove(&xmm_buffer_lru, buffer, buffer_t, lruq);
470 xmm_buffer_free(buffer);
471 dprintf("discard copy=0x%x type=%d\n", copy, copy->type);
472 vm_map_copy_discard(copy);
473 }
474
475 /*
476 * Write new data.
477 */
478 return M_DATA_WRITE(mobj, kobj, offset, data, PAGE_SIZE);
479 }
480
481 /*
482 * Buffer data to be written;
483 * replace any preexisting corresponding buffered data.
484 */
485 m_buffer_data_write_buffered(mobj, kobj, offset, data, length)
486 xmm_obj_t mobj;
487 xmm_obj_t kobj;
488 vm_offset_t offset;
489 vm_offset_t data;
490 vm_size_t length;
491 {
492 buffer_t buffer;
493 vm_map_copy_t copy = (vm_map_copy_t) data;
494
495 #ifdef lint
496 M_DATA_WRITE(mobj, kobj, offset, data, length);
497 #endif lint
498 /*
499 * Be assertive.
500 */
501 assert(mobj == kobj);
502 assert(length == PAGE_SIZE);
503 assert(copy->type == VM_MAP_COPY_OBJECT ||
504 copy->type == VM_MAP_COPY_PAGE_LIST);
505 assert(copy->offset == 0);
506 assert(copy->size == PAGE_SIZE);
507
508 /*
509 * Check to see whether we have old data for this page.
510 */
511 buffer = xmm_buffer_lookup(mobj, offset);
512 if (buffer != BUFFER_NULL) {
513 /*
514 * Replace data in buffer.
515 */
516 vm_map_copy_t old_copy = buffer->copy;
517 buffer->copy = copy;
518 dprintf("write_buffered: replace\n");
519
520 /*
521 * Move buffer to head of lru queue.
522 */
523 queue_remove(&xmm_buffer_lru, buffer, buffer_t, lruq);
524 queue_enter(&xmm_buffer_lru, buffer, buffer_t, lruq);
525
526 /*
527 * Discard data, and return.
528 */
529 dprintf("replace copy=0x%x[type=%d] with copy=0x%x[%d]\n",
530 old_copy, old_copy->type,
531 copy, copy->type);
532 vm_map_copy_discard(old_copy);
533 return KERN_SUCCESS;
534 }
535
536 /*
537 * We don't have old data for this page, so allocate a new buffer
538 * and enter it in the queues.
539 */
540 dprintf("write_buffered: new\n");
541 buffer = xmm_buffer_alloc();
542 assert(buffer != BUFFER_NULL);
543 buffer->mobj = mobj;
544 buffer->copy = copy;
545 buffer->offset = offset;
546 queue_enter(&MOBJ->buffers, buffer, buffer_t, mobjq);
547 queue_enter(&xmm_buffer_lru, buffer, buffer_t, lruq);
548 return KERN_SUCCESS;
549 }
550
551 kern_return_t
552 m_buffer_unbuffer_data(mobj, offset, length, should_clean, should_flush)
553 xmm_obj_t mobj;
554 vm_offset_t offset;
555 vm_size_t length;
556 boolean_t should_clean;
557 boolean_t should_flush;
558 {
559 buffer_t buffer;
560 vm_map_copy_t copy;
561
562 /*
563 * If we have no data for this page, return.
564 */
565 buffer = xmm_buffer_lookup(mobj, offset);
566 if (buffer == BUFFER_NULL) {
567 return KERN_SUCCESS;
568 }
569
570 /*
571 * Dequeue and free buffer.
572 */
573 copy = buffer->copy;
574 queue_remove(&MOBJ->buffers, buffer, buffer_t, mobjq);
575 queue_remove(&xmm_buffer_lru, buffer, buffer_t, lruq);
576 xmm_buffer_free(buffer);
577
578 /*
579 * Clean (write) or flush (deallocate) data.
580 */
581 assert(should_clean || should_flush);
582 dprintf("unbuffer: clean=%d flush=%d\n", should_clean, should_flush);
583 if (should_clean) {
584 return M_DATA_WRITE(mobj, mobj, offset, (vm_offset_t) copy,
585 PAGE_SIZE);
586 } else {
587 vm_map_copy_discard(copy);
588 return KERN_SUCCESS;
589 }
590 }
591
592 k_buffer_set_ready(kobj, object_ready, may_cache, copy_strategy,
593 use_old_pageout, memory_object_name, reply)
594 xmm_obj_t kobj;
595 boolean_t object_ready;
596 boolean_t may_cache;
597 memory_object_copy_strategy_t copy_strategy;
598 boolean_t use_old_pageout;
599 ipc_port_t memory_object_name;
600 xmm_reply_t reply;
601 {
602 #ifdef lint
603 K_SET_READY(kobj, object_ready, may_cache, copy_strategy,
604 use_old_pageout, memory_object_name, reply);
605 #endif lint
606 if (object_ready) {
607 KOBJ->ready = TRUE;
608 }
609 K_SET_READY(kobj, object_ready, may_cache, copy_strategy,
610 use_old_pageout, memory_object_name, reply);
611 return KERN_SUCCESS;
612 }
613
614 /*
615 * These won't work if we are interposing. Do we care?
616 */
617
618 kern_return_t
619 M_BUFFERED_DATA_WRITE(mobj, kobj, offset, data, length)
620 xmm_obj_t mobj;
621 xmm_obj_t kobj;
622 vm_offset_t offset;
623 vm_offset_t data;
624 vm_size_t length;
625 {
626 if (mobj->m_mobj->class == &buffer_class) {
627 return m_buffer_data_write_buffered(mobj->m_mobj, kobj->m_kobj,
628 offset, data, length);
629 } else {
630 return M_DATA_WRITE(mobj, kobj, offset, data, length);
631 }
632 }
633
634 kern_return_t
635 M_UNBUFFER_DATA(mobj, offset, length, should_clean, should_flush)
636 xmm_obj_t mobj;
637 boolean_t should_clean;
638 boolean_t should_flush;
639 vm_offset_t offset;
640 vm_size_t length;
641 {
642 if (mobj->m_mobj->class == &buffer_class) {
643 return m_buffer_unbuffer_data(mobj->m_mobj, offset, length,
644 should_clean, should_flush);
645 } else {
646 return KERN_SUCCESS;
647 }
648 }
649
650 #include <sys/varargs.h>
651
652 int xmm_buffer_debug = 0;
653
654 /* VARARGS */
655 xmm_buffer_dprintf(fmt, va_alist)
656 char *fmt;
657 va_dcl
658 {
659 va_list listp;
660
661 if (xmm_buffer_debug) {
662 va_start(listp);
663 printf(fmt, &listp);
664 va_end(listp);
665 }
666 }
Cache object: c61b4d9ce3ac3b18f9aaa6178b4b0b56
|