FreeBSD/Linux Kernel Cross Reference
sys/kern/uipc_jumbo.c
1 /*-
2 * Copyright (c) 1997, Duke University
3 * All rights reserved.
4 *
5 * Author:
6 * Andrew Gallatin <gallatin@cs.duke.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of Duke University may not be used to endorse or promote
17 * products derived from this software without specific prior written
18 * permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY DUKE UNIVERSITY ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DUKE UNIVERSITY BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITSOR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
30 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: releng/5.0/sys/kern/uipc_jumbo.c 100448 2002-07-21 19:06:46Z alc $
33 */
34 /*
35 * This is a set of routines for allocating large-sized mbuf payload
36 * areas, and is primarily intended for use in receive side mbuf
37 * allocation.
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/types.h>
43 #include <sys/sockio.h>
44 #include <sys/uio.h>
45 #include <sys/lock.h>
46 #include <sys/kernel.h>
47 #include <sys/mutex.h>
48 #include <sys/malloc.h>
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_extern.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_pageout.h>
57 #include <sys/vmmeter.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_kern.h>
61 #include <sys/proc.h>
62 #include <sys/jumbo.h>
63
64 /*
65 * XXX this may be too high or too low.
66 */
67 #define JUMBO_MAX_PAGES 3072
68
69 struct jumbo_kmap {
70 vm_offset_t kva;
71 SLIST_ENTRY(jumbo_kmap) entries; /* Singly-linked List. */
72 };
73
74 static SLIST_HEAD(jumbo_kmap_head, jumbo_kmap) jumbo_kmap_free,
75 jumbo_kmap_inuse;
76
77 static struct mtx jumbo_mutex;
78 MTX_SYSINIT(jumbo_lock, &jumbo_mutex, "jumbo mutex", MTX_DEF);
79
80 static struct vm_object *jumbo_vm_object;
81 static unsigned long jumbo_vmuiomove_pgs_freed = 0;
82 #if 0
83 static int jumbo_vm_wakeup_wanted = 0;
84 #endif
85 vm_offset_t jumbo_basekva;
86
87 int
88 jumbo_vm_init(void)
89 {
90 int i;
91 struct jumbo_kmap *entry;
92
93 mtx_lock(&jumbo_mutex);
94
95 if (jumbo_vm_object != NULL) {
96 mtx_unlock(&jumbo_mutex);
97 return (1);
98 }
99
100 /* allocate our object */
101 jumbo_vm_object = vm_object_allocate_wait(OBJT_DEFAULT, JUMBO_MAX_PAGES,
102 M_NOWAIT);
103
104 if (jumbo_vm_object == NULL) {
105 mtx_unlock(&jumbo_mutex);
106 return (0);
107 }
108
109 SLIST_INIT(&jumbo_kmap_free);
110 SLIST_INIT(&jumbo_kmap_inuse);
111
112 /* grab some kernel virtual address space */
113 jumbo_basekva = kmem_alloc_pageable(kernel_map,
114 PAGE_SIZE * JUMBO_MAX_PAGES);
115 if (jumbo_basekva == 0) {
116 vm_object_deallocate(jumbo_vm_object);
117 jumbo_vm_object = NULL;
118 mtx_unlock(&jumbo_mutex);
119 return 0;
120 }
121 for (i = 0; i < JUMBO_MAX_PAGES; i++) {
122 entry = malloc(sizeof(struct jumbo_kmap), M_TEMP, M_NOWAIT);
123 if (!entry && !i) {
124 mtx_unlock(&jumbo_mutex);
125 panic("jumbo_vm_init: unable to allocated kvas");
126 } else if (!entry) {
127 printf("warning: jumbo_vm_init allocated only %d kva\n",
128 i);
129 mtx_unlock(&jumbo_mutex);
130 return 1;
131 }
132 entry->kva = jumbo_basekva + (vm_offset_t)i * PAGE_SIZE;
133 SLIST_INSERT_HEAD(&jumbo_kmap_free, entry, entries);
134 }
135 mtx_unlock(&jumbo_mutex);
136 return 1;
137 }
138
139 void
140 jumbo_freem(void *addr, void *args)
141 {
142 vm_page_t frame;
143
144 frame = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)addr));
145
146 /*
147 * Need giant for looking at the hold count below. Convert this
148 * to the vm mutex once the VM code has been moved out from under
149 * giant.
150 */
151 GIANT_REQUIRED;
152
153 if (frame->hold_count == 0)
154 jumbo_pg_free((vm_offset_t)addr);
155 else
156 printf("jumbo_freem: hold count for %p is %d!!??\n",
157 frame, frame->hold_count);
158 }
159
160 void
161 jumbo_pg_steal(vm_page_t pg)
162 {
163 vm_offset_t addr;
164 struct jumbo_kmap *entry;
165
166 addr = ptoa(pg->pindex) + jumbo_basekva;
167
168 if (pg->object != jumbo_vm_object)
169 panic("stealing a non jumbo_vm_object page");
170 vm_page_remove(pg);
171
172 mtx_lock(&jumbo_mutex);
173
174 pmap_qremove(addr,1);
175 entry = SLIST_FIRST(&jumbo_kmap_inuse);
176 entry->kva = addr;
177 SLIST_REMOVE_HEAD(&jumbo_kmap_inuse, entries);
178 SLIST_INSERT_HEAD(&jumbo_kmap_free, entry, entries);
179
180 mtx_unlock(&jumbo_mutex);
181
182 #if 0
183 if (jumbo_vm_wakeup_wanted)
184 wakeup(jumbo_vm_object);
185 #endif
186 }
187
188
189 vm_page_t
190 jumbo_pg_alloc(void)
191 {
192 vm_page_t pg;
193 vm_pindex_t pindex;
194 struct jumbo_kmap *entry;
195
196 pg = NULL;
197 mtx_lock(&jumbo_mutex);
198
199 entry = SLIST_FIRST(&jumbo_kmap_free);
200 if (entry != NULL){
201 pindex = atop(entry->kva - jumbo_basekva);
202 pg = vm_page_alloc(jumbo_vm_object, pindex, VM_ALLOC_INTERRUPT);
203 if (pg != NULL) {
204 SLIST_REMOVE_HEAD(&jumbo_kmap_free, entries);
205 SLIST_INSERT_HEAD(&jumbo_kmap_inuse, entry, entries);
206 pmap_qenter(entry->kva, &pg, 1);
207 }
208 }
209 mtx_unlock(&jumbo_mutex);
210 return(pg);
211 }
212
213 void
214 jumbo_pg_free(vm_offset_t addr)
215 {
216 struct jumbo_kmap *entry;
217 vm_offset_t paddr;
218 vm_page_t pg;
219
220 paddr = pmap_kextract((vm_offset_t)addr);
221 pg = PHYS_TO_VM_PAGE(paddr);
222
223 if (pg->object != jumbo_vm_object) {
224 jumbo_vmuiomove_pgs_freed++;
225 /* if(vm_page_lookup(jumbo_vm_object, atop(addr - jumbo_basekva)))
226 panic("vm_page_rename didn't");
227 printf("freeing uiomoved pg:\t pindex = %d, padd = 0x%lx\n",
228 atop(addr - jumbo_basekva), paddr);
229 */
230 } else {
231 vm_page_lock_queues();
232 vm_page_busy(pg); /* vm_page_free wants pages to be busy*/
233 vm_page_free(pg);
234 vm_page_unlock_queues();
235 }
236
237 mtx_lock(&jumbo_mutex);
238
239 pmap_qremove(addr,1);
240 entry = SLIST_FIRST(&jumbo_kmap_inuse);
241 entry->kva = addr;
242 SLIST_REMOVE_HEAD(&jumbo_kmap_inuse, entries);
243 SLIST_INSERT_HEAD(&jumbo_kmap_free, entry, entries);
244
245 mtx_unlock(&jumbo_mutex);
246
247 #if 0
248 if (jumbo_vm_wakeup_wanted)
249 wakeup(jumbo_vm_object);
250 #endif
251 }
Cache object: 6406fdd709cdc85bfb57e28695375dbf
|