1 /*-
2 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/9.0/sys/kern/subr_devstat.c 223062 2011-06-13 22:08:24Z ken $");
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/bio.h>
36 #include <sys/devicestat.h>
37 #include <sys/sysctl.h>
38 #include <sys/malloc.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/conf.h>
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44
45 #include <machine/atomic.h>
46
47 static int devstat_num_devs;
48 static long devstat_generation = 1;
49 static int devstat_version = DEVSTAT_VERSION;
50 static int devstat_current_devnumber;
51 static struct mtx devstat_mutex;
52 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
53
54 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
55 static struct devstat *devstat_alloc(void);
56 static void devstat_free(struct devstat *);
57 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
58 int unit_number, uint32_t block_size,
59 devstat_support_flags flags,
60 devstat_type_flags device_type,
61 devstat_priority priority);
62
63 /*
64 * Allocate a devstat and initialize it
65 */
66 struct devstat *
67 devstat_new_entry(const void *dev_name,
68 int unit_number, uint32_t block_size,
69 devstat_support_flags flags,
70 devstat_type_flags device_type,
71 devstat_priority priority)
72 {
73 struct devstat *ds;
74
75 mtx_assert(&devstat_mutex, MA_NOTOWNED);
76
77 ds = devstat_alloc();
78 mtx_lock(&devstat_mutex);
79 if (unit_number == -1) {
80 ds->id = dev_name;
81 binuptime(&ds->creation_time);
82 devstat_generation++;
83 } else {
84 devstat_add_entry(ds, dev_name, unit_number, block_size,
85 flags, device_type, priority);
86 }
87 mtx_unlock(&devstat_mutex);
88 return (ds);
89 }
90
91 /*
92 * Take a malloced and zeroed devstat structure given to us, fill it in
93 * and add it to the queue of devices.
94 */
95 static void
96 devstat_add_entry(struct devstat *ds, const void *dev_name,
97 int unit_number, uint32_t block_size,
98 devstat_support_flags flags,
99 devstat_type_flags device_type,
100 devstat_priority priority)
101 {
102 struct devstatlist *devstat_head;
103 struct devstat *ds_tmp;
104
105 mtx_assert(&devstat_mutex, MA_OWNED);
106 devstat_num_devs++;
107
108 devstat_head = &device_statq;
109
110 /*
111 * Priority sort. Each driver passes in its priority when it adds
112 * its devstat entry. Drivers are sorted first by priority, and
113 * then by probe order.
114 *
115 * For the first device, we just insert it, since the priority
116 * doesn't really matter yet. Subsequent devices are inserted into
117 * the list using the order outlined above.
118 */
119 if (devstat_num_devs == 1)
120 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
121 else {
122 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
123 struct devstat *ds_next;
124
125 ds_next = STAILQ_NEXT(ds_tmp, dev_links);
126
127 /*
128 * If we find a break between higher and lower
129 * priority items, and if this item fits in the
130 * break, insert it. This also applies if the
131 * "lower priority item" is the end of the list.
132 */
133 if ((priority <= ds_tmp->priority)
134 && ((ds_next == NULL)
135 || (priority > ds_next->priority))) {
136 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
137 dev_links);
138 break;
139 } else if (priority > ds_tmp->priority) {
140 /*
141 * If this is the case, we should be able
142 * to insert ourselves at the head of the
143 * list. If we can't, something is wrong.
144 */
145 if (ds_tmp == STAILQ_FIRST(devstat_head)) {
146 STAILQ_INSERT_HEAD(devstat_head,
147 ds, dev_links);
148 break;
149 } else {
150 STAILQ_INSERT_TAIL(devstat_head,
151 ds, dev_links);
152 printf("devstat_add_entry: HELP! "
153 "sorting problem detected "
154 "for name %p unit %d\n",
155 dev_name, unit_number);
156 break;
157 }
158 }
159 }
160 }
161
162 ds->device_number = devstat_current_devnumber++;
163 ds->unit_number = unit_number;
164 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
165 ds->block_size = block_size;
166 ds->flags = flags;
167 ds->device_type = device_type;
168 ds->priority = priority;
169 binuptime(&ds->creation_time);
170 devstat_generation++;
171 }
172
173 /*
174 * Remove a devstat structure from the list of devices.
175 */
176 void
177 devstat_remove_entry(struct devstat *ds)
178 {
179 struct devstatlist *devstat_head;
180
181 mtx_assert(&devstat_mutex, MA_NOTOWNED);
182 if (ds == NULL)
183 return;
184
185 mtx_lock(&devstat_mutex);
186
187 devstat_head = &device_statq;
188
189 /* Remove this entry from the devstat queue */
190 atomic_add_acq_int(&ds->sequence1, 1);
191 if (ds->id == NULL) {
192 devstat_num_devs--;
193 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
194 }
195 devstat_free(ds);
196 devstat_generation++;
197 mtx_unlock(&devstat_mutex);
198 }
199
200 /*
201 * Record a transaction start.
202 *
203 * See comments for devstat_end_transaction(). Ordering is very important
204 * here.
205 */
206 void
207 devstat_start_transaction(struct devstat *ds, struct bintime *now)
208 {
209
210 mtx_assert(&devstat_mutex, MA_NOTOWNED);
211
212 /* sanity check */
213 if (ds == NULL)
214 return;
215
216 atomic_add_acq_int(&ds->sequence1, 1);
217 /*
218 * We only want to set the start time when we are going from idle
219 * to busy. The start time is really the start of the latest busy
220 * period.
221 */
222 if (ds->start_count == ds->end_count) {
223 if (now != NULL)
224 ds->busy_from = *now;
225 else
226 binuptime(&ds->busy_from);
227 }
228 ds->start_count++;
229 atomic_add_rel_int(&ds->sequence0, 1);
230 }
231
232 void
233 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
234 {
235
236 mtx_assert(&devstat_mutex, MA_NOTOWNED);
237
238 /* sanity check */
239 if (ds == NULL)
240 return;
241
242 binuptime(&bp->bio_t0);
243 devstat_start_transaction(ds, &bp->bio_t0);
244 }
245
246 /*
247 * Record the ending of a transaction, and incrment the various counters.
248 *
249 * Ordering in this function, and in devstat_start_transaction() is VERY
250 * important. The idea here is to run without locks, so we are very
251 * careful to only modify some fields on the way "down" (i.e. at
252 * transaction start) and some fields on the way "up" (i.e. at transaction
253 * completion). One exception is busy_from, which we only modify in
254 * devstat_start_transaction() when there are no outstanding transactions,
255 * and thus it can't be modified in devstat_end_transaction()
256 * simultaneously.
257 *
258 * The sequence0 and sequence1 fields are provided to enable an application
259 * spying on the structures with mmap(2) to tell when a structure is in a
260 * consistent state or not.
261 *
262 * For this to work 100% reliably, it is important that the two fields
263 * are at opposite ends of the structure and that they are incremented
264 * in the opposite order of how a memcpy(3) in userland would copy them.
265 * We assume that the copying happens front to back, but there is actually
266 * no way short of writing your own memcpy(3) replacement to guarantee
267 * this will be the case.
268 *
269 * In addition to this, being a kind of locks, they must be updated with
270 * atomic instructions using appropriate memory barriers.
271 */
272 void
273 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
274 devstat_tag_type tag_type, devstat_trans_flags flags,
275 struct bintime *now, struct bintime *then)
276 {
277 struct bintime dt, lnow;
278
279 /* sanity check */
280 if (ds == NULL)
281 return;
282
283 if (now == NULL) {
284 now = &lnow;
285 binuptime(now);
286 }
287
288 atomic_add_acq_int(&ds->sequence1, 1);
289 /* Update byte and operations counts */
290 ds->bytes[flags] += bytes;
291 ds->operations[flags]++;
292
293 /*
294 * Keep a count of the various tag types sent.
295 */
296 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
297 tag_type != DEVSTAT_TAG_NONE)
298 ds->tag_types[tag_type]++;
299
300 if (then != NULL) {
301 /* Update duration of operations */
302 dt = *now;
303 bintime_sub(&dt, then);
304 bintime_add(&ds->duration[flags], &dt);
305 }
306
307 /* Accumulate busy time */
308 dt = *now;
309 bintime_sub(&dt, &ds->busy_from);
310 bintime_add(&ds->busy_time, &dt);
311 ds->busy_from = *now;
312
313 ds->end_count++;
314 atomic_add_rel_int(&ds->sequence0, 1);
315 }
316
317 void
318 devstat_end_transaction_bio(struct devstat *ds, struct bio *bp)
319 {
320 devstat_trans_flags flg;
321
322 /* sanity check */
323 if (ds == NULL)
324 return;
325
326 if (bp->bio_cmd == BIO_DELETE)
327 flg = DEVSTAT_FREE;
328 else if (bp->bio_cmd == BIO_READ)
329 flg = DEVSTAT_READ;
330 else if (bp->bio_cmd == BIO_WRITE)
331 flg = DEVSTAT_WRITE;
332 else
333 flg = DEVSTAT_NO_DATA;
334
335 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
336 DEVSTAT_TAG_SIMPLE, flg, NULL, &bp->bio_t0);
337 }
338
339 /*
340 * This is the sysctl handler for the devstat package. The data pushed out
341 * on the kern.devstat.all sysctl variable consists of the current devstat
342 * generation number, and then an array of devstat structures, one for each
343 * device in the system.
344 *
345 * This is more cryptic that obvious, but basically we neither can nor
346 * want to hold the devstat_mutex for any amount of time, so we grab it
347 * only when we need to and keep an eye on devstat_generation all the time.
348 */
349 static int
350 sysctl_devstat(SYSCTL_HANDLER_ARGS)
351 {
352 int error;
353 long mygen;
354 struct devstat *nds;
355
356 mtx_assert(&devstat_mutex, MA_NOTOWNED);
357
358 /*
359 * XXX devstat_generation should really be "volatile" but that
360 * XXX freaks out the sysctl macro below. The places where we
361 * XXX change it and inspect it are bracketed in the mutex which
362 * XXX guarantees us proper write barriers. I don't belive the
363 * XXX compiler is allowed to optimize mygen away across calls
364 * XXX to other functions, so the following is belived to be safe.
365 */
366 mygen = devstat_generation;
367
368 error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
369
370 if (devstat_num_devs == 0)
371 return(0);
372
373 if (error != 0)
374 return (error);
375
376 mtx_lock(&devstat_mutex);
377 nds = STAILQ_FIRST(&device_statq);
378 if (mygen != devstat_generation)
379 error = EBUSY;
380 mtx_unlock(&devstat_mutex);
381
382 if (error != 0)
383 return (error);
384
385 for (;nds != NULL;) {
386 error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
387 if (error != 0)
388 return (error);
389 mtx_lock(&devstat_mutex);
390 if (mygen != devstat_generation)
391 error = EBUSY;
392 else
393 nds = STAILQ_NEXT(nds, dev_links);
394 mtx_unlock(&devstat_mutex);
395 if (error != 0)
396 return (error);
397 }
398 return(error);
399 }
400
401 /*
402 * Sysctl entries for devstat. The first one is a node that all the rest
403 * hang off of.
404 */
405 SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL, "Device Statistics");
406
407 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE,
408 NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list");
409 /*
410 * Export the number of devices in the system so that userland utilities
411 * can determine how much memory to allocate to hold all the devices.
412 */
413 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
414 &devstat_num_devs, 0, "Number of devices in the devstat list");
415 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
416 &devstat_generation, 0, "Devstat list generation");
417 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
418 &devstat_version, 0, "Devstat list version number");
419
420 /*
421 * Allocator for struct devstat structures. We sub-allocate these from pages
422 * which we get from malloc. These pages are exported for mmap(2)'ing through
423 * a miniature device driver
424 */
425
426 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
427
428 static d_mmap_t devstat_mmap;
429
430 static struct cdevsw devstat_cdevsw = {
431 .d_version = D_VERSION,
432 .d_flags = D_NEEDGIANT,
433 .d_mmap = devstat_mmap,
434 .d_name = "devstat",
435 };
436
437 struct statspage {
438 TAILQ_ENTRY(statspage) list;
439 struct devstat *stat;
440 u_int nfree;
441 };
442
443 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
444 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
445
446 static int
447 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
448 int nprot, vm_memattr_t *memattr)
449 {
450 struct statspage *spp;
451
452 if (nprot != VM_PROT_READ)
453 return (-1);
454 TAILQ_FOREACH(spp, &pagelist, list) {
455 if (offset == 0) {
456 *paddr = vtophys(spp->stat);
457 return (0);
458 }
459 offset -= PAGE_SIZE;
460 }
461 return (-1);
462 }
463
464 static struct devstat *
465 devstat_alloc(void)
466 {
467 struct devstat *dsp;
468 struct statspage *spp, *spp2;
469 u_int u;
470 static int once;
471
472 mtx_assert(&devstat_mutex, MA_NOTOWNED);
473 if (!once) {
474 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
475 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0400,
476 DEVSTAT_DEVICE_NAME);
477 once = 1;
478 }
479 spp2 = NULL;
480 mtx_lock(&devstat_mutex);
481 for (;;) {
482 TAILQ_FOREACH(spp, &pagelist, list) {
483 if (spp->nfree > 0)
484 break;
485 }
486 if (spp != NULL)
487 break;
488 mtx_unlock(&devstat_mutex);
489 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
490 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
491 spp2->nfree = statsperpage;
492
493 /*
494 * If free statspages were added while the lock was released
495 * just reuse them.
496 */
497 mtx_lock(&devstat_mutex);
498 TAILQ_FOREACH(spp, &pagelist, list)
499 if (spp->nfree > 0)
500 break;
501 if (spp == NULL) {
502 spp = spp2;
503
504 /*
505 * It would make more sense to add the new page at the
506 * head but the order on the list determine the
507 * sequence of the mapping so we can't do that.
508 */
509 TAILQ_INSERT_TAIL(&pagelist, spp, list);
510 } else
511 break;
512 }
513 dsp = spp->stat;
514 for (u = 0; u < statsperpage; u++) {
515 if (dsp->allocated == 0)
516 break;
517 dsp++;
518 }
519 spp->nfree--;
520 dsp->allocated = 1;
521 mtx_unlock(&devstat_mutex);
522 if (spp2 != NULL && spp2 != spp) {
523 free(spp2->stat, M_DEVSTAT);
524 free(spp2, M_DEVSTAT);
525 }
526 return (dsp);
527 }
528
529 static void
530 devstat_free(struct devstat *dsp)
531 {
532 struct statspage *spp;
533
534 mtx_assert(&devstat_mutex, MA_OWNED);
535 bzero(dsp, sizeof *dsp);
536 TAILQ_FOREACH(spp, &pagelist, list) {
537 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
538 spp->nfree++;
539 return;
540 }
541 }
542 }
543
544 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
545 NULL, sizeof(struct devstat), "sizeof(struct devstat)");
Cache object: 1e97c92bafb41dc4df911716ef61dd54
|