FreeBSD/Linux Kernel Cross Reference
sys/device/blkio.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: blkio.c,v $
29 * Revision 2.12 93/03/09 10:54:01 danner
30 * block_io_mmap() must return a vm_offset_t.
31 * [93/03/07 af]
32 *
33 * Revision 2.11 92/08/03 17:32:43 jfriedl
34 * removed silly prototypes
35 * [92/08/02 jfriedl]
36 *
37 * Revision 2.10 92/05/21 17:08:27 jfriedl
38 * Cleanup to quiet gcc warnings.
39 * Function args to block_io are void.
40 * [92/05/16 jfriedl]
41 *
42 * Revision 2.9 91/08/28 11:11:05 jsb
43 * From rpd: documented what (*max_count)() does in block_io.
44 * [91/08/22 15:30:18 jsb]
45 *
46 * Unlimited size is for write only.
47 * [91/08/12 17:44:31 dlb]
48 *
49 * Support synchronous waits for writes with vm continuations.
50 * Remove limit on size of operations.
51 * [91/08/12 17:22:59 dlb]
52 *
53 * Revision 2.8 91/07/30 15:45:38 rvb
54 * From rpd: temp(!!) jack up MAXPHYS to 256K to make newfs
55 * work. rpd is working on having blkio loop, doing NBYTES
56 * per interation, rather than having a fixed max size.
57 * [91/07/30 rvb]
58 *
59 * Revision 2.7 91/05/14 15:38:36 mrt
60 * Correcting copyright
61 *
62 * Revision 2.6 91/05/13 06:02:15 af
63 * Do not trim io_count to DEV_BSIZE, we know how to cope
64 * and this is used for tapes also [which might be block
65 * devices or not, depends].
66 * [91/05/12 15:48:48 af]
67 *
68 * Revision 2.5 91/02/05 17:07:46 mrt
69 * Changed to new Mach copyright
70 * [91/01/31 17:26:03 mrt]
71 *
72 * Revision 2.4 90/08/27 21:54:15 dbg
73 * Add disk sort routine.
74 * [90/06/20 dbg]
75 *
76 * Revision 2.3 89/09/08 11:22:56 dbg
77 * Converted to run in kernel task.
78 * [89/07/27 dbg]
79 *
80 */
81 /*
82 * Author: David B. Golub, Carnegie Mellon University
83 * Date: 7/89
84 *
85 * Block IO driven from generic kernel IO interface.
86 */
87 #include <mach/kern_return.h>
88
89 #include <device/param.h>
90 #include <device/device_types.h>
91 #include <device/io_req.h>
92 #include <device/ds_routines.h>
93
94
95
96 io_return_t block_io(strat, max_count, ior)
97 void (*strat)();
98 void (*max_count)();
99 register io_req_t ior;
100 {
101 register kern_return_t rc;
102 boolean_t wait = FALSE;
103
104 /*
105 * Make sure the size is not too large by letting max_count
106 * change io_count. If we are doing a write, then io_alloc_size
107 * preserves the original io_count.
108 */
109 (*max_count)(ior);
110
111 /*
112 * If reading, allocate memory. If writing, wire
113 * down the incoming memory.
114 */
115 if (ior->io_op & IO_READ)
116 rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
117 else
118 rc = device_write_get(ior, &wait);
119
120 if (rc != KERN_SUCCESS)
121 return (rc);
122
123 /*
124 * Queue the operation for the device.
125 */
126 (*strat)(ior);
127
128 /*
129 * The io is now queued. Wait for it if needed.
130 */
131 if (wait) {
132 iowait(ior);
133 return(D_SUCCESS);
134 }
135
136 return (D_IO_QUEUED);
137 }
138
139 /*
140 * 'standard' max_count routine. VM continuations mean that this
141 * code can cope with arbitrarily-sized write operations (they won't be
142 * atomic, but any caller that cares will do the op synchronously).
143 */
144 #define MAX_PHYS (256 * 1024)
145
146 void minphys(ior)
147 register io_req_t ior;
148 {
149 if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE)
150 return;
151
152 if (ior->io_count > MAX_PHYS)
153 ior->io_count = MAX_PHYS;
154 }
155
156 /*
157 * Dummy routine placed in device switch entries to indicate that
158 * block device may be mapped.
159 */
160 vm_offset_t block_io_mmap()
161 {
162 return (0);
163 }
164
165 /*
166 * Disk sort routine.
167 *
168 * We order the disk request chain so that the disk head will sweep
169 * back and forth across the disk. The chain is divided into two
170 * pieces, with requests ordered in opposite directions. Assume that
171 * the first part of the chain holds increasing cylinder numbers.
172 * If a new request has a higher cylinder number than the head of
173 * the chain, the disk head has not yet reached it; the new request
174 * can go in the first part of the chain. If the new request has
175 * a lower cylinder number, the disk head has already passed it and
176 * must catch it on the way back; so the new request goes in the
177 * second (descending) part of the chain.
178 * When all of the requests in the ascending portion are filled,
179 * the descending chain becomes the first chain, and requests above
180 * the first now go in the second part of the chain (ascending).
181 */
182
183 #define io_cylinder io_residual
184 /* Disk drivers put cylinder here */
185 #define h_head io_next
186 #define h_tail io_prev
187 /* IORs are chained here */
188
189 void disksort(head, ior)
190 io_req_t head; /* (sort of) */
191 io_req_t ior;
192 {
193 register int cylinder = ior->io_cylinder;
194 register io_req_t next, prev;
195
196 next = head->h_head;
197 if (next == 0) {
198 head->h_head = ior;
199 head->h_tail = ior;
200 ior->io_next = 0;
201 return;
202 }
203
204 do {
205 prev = next;
206 next = prev->io_next;
207 } while (next != 0 && prev->io_cylinder == next->io_cylinder);
208
209 if (next == 0) {
210 prev->io_next = ior;
211 head->h_tail = ior;
212 ior->io_next = 0;
213 return;
214 }
215
216 if (prev->io_cylinder < next->io_cylinder) {
217 /*
218 * Ascending list first.
219 */
220 if (prev->io_cylinder <= cylinder) {
221 /*
222 * Insert in ascending list.
223 */
224 while (next != 0 &&
225 next->io_cylinder <= cylinder &&
226 prev->io_cylinder <= next->io_cylinder)
227 {
228 prev = next;
229 next = prev->io_next;
230 }
231 }
232 else {
233 /*
234 * Insert in descending list
235 */
236 do {
237 prev = next;
238 next = prev->io_next;
239 } while (next != 0 &&
240 prev->io_cylinder <= next->io_cylinder);
241
242 while (next != 0 &&
243 next->io_cylinder >= cylinder)
244 {
245 prev = next;
246 next = prev->io_next;
247 }
248 }
249 }
250 else {
251 /*
252 * Descending first.
253 */
254 if (prev->io_cylinder >= cylinder) {
255 /*
256 * Insert in descending list.
257 */
258 while (next != 0 &&
259 next->io_cylinder >= cylinder &&
260 prev->io_cylinder >= next->io_cylinder)
261 {
262 prev = next;
263 next = prev->io_next;
264 }
265 }
266 else {
267 /*
268 * Insert in ascending list
269 */
270 do {
271 prev = next;
272 next = prev->io_next;
273 } while (next != 0 &&
274 prev->io_cylinder >= next->io_cylinder);
275 while (next != 0 &&
276 next->io_cylinder <= cylinder)
277 {
278 prev = next;
279 next = prev->io_next;
280 }
281 }
282 }
283 /*
284 * Insert between prev and next.
285 */
286 prev->io_next = ior;
287 ior->io_next = next;
288 if (next == 0) {
289 /* At tail of list. */
290 head->h_tail = ior;
291 }
292 }
293
Cache object: 1024d5a89b815f02f58a08c84ef21d59
|