1 /*-
2 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/10.2/sys/kern/subr_devstat.c 273736 2014-10-27 14:38:00Z hselasky $");
31
32 #include "opt_kdtrace.h"
33
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/bio.h>
38 #include <sys/devicestat.h>
39 #include <sys/sdt.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/conf.h>
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47
48 #include <machine/atomic.h>
49
50 SDT_PROVIDER_DEFINE(io);
51
52 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
53 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
54 SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *",
55 "struct devstat *");
56 SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *",
57 "struct devstat *");
58
59 #define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds)
60 #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds)
61 #define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds)
62 #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds)
63 #define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds)
64 #define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds)
65
66 static int devstat_num_devs;
67 static long devstat_generation = 1;
68 static int devstat_version = DEVSTAT_VERSION;
69 static int devstat_current_devnumber;
70 static struct mtx devstat_mutex;
71 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
72
73 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
74 static struct devstat *devstat_alloc(void);
75 static void devstat_free(struct devstat *);
76 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
77 int unit_number, uint32_t block_size,
78 devstat_support_flags flags,
79 devstat_type_flags device_type,
80 devstat_priority priority);
81
82 /*
83 * Allocate a devstat and initialize it
84 */
85 struct devstat *
86 devstat_new_entry(const void *dev_name,
87 int unit_number, uint32_t block_size,
88 devstat_support_flags flags,
89 devstat_type_flags device_type,
90 devstat_priority priority)
91 {
92 struct devstat *ds;
93
94 mtx_assert(&devstat_mutex, MA_NOTOWNED);
95
96 ds = devstat_alloc();
97 mtx_lock(&devstat_mutex);
98 if (unit_number == -1) {
99 ds->unit_number = unit_number;
100 ds->id = dev_name;
101 binuptime(&ds->creation_time);
102 devstat_generation++;
103 } else {
104 devstat_add_entry(ds, dev_name, unit_number, block_size,
105 flags, device_type, priority);
106 }
107 mtx_unlock(&devstat_mutex);
108 return (ds);
109 }
110
111 /*
112 * Take a malloced and zeroed devstat structure given to us, fill it in
113 * and add it to the queue of devices.
114 */
115 static void
116 devstat_add_entry(struct devstat *ds, const void *dev_name,
117 int unit_number, uint32_t block_size,
118 devstat_support_flags flags,
119 devstat_type_flags device_type,
120 devstat_priority priority)
121 {
122 struct devstatlist *devstat_head;
123 struct devstat *ds_tmp;
124
125 mtx_assert(&devstat_mutex, MA_OWNED);
126 devstat_num_devs++;
127
128 devstat_head = &device_statq;
129
130 /*
131 * Priority sort. Each driver passes in its priority when it adds
132 * its devstat entry. Drivers are sorted first by priority, and
133 * then by probe order.
134 *
135 * For the first device, we just insert it, since the priority
136 * doesn't really matter yet. Subsequent devices are inserted into
137 * the list using the order outlined above.
138 */
139 if (devstat_num_devs == 1)
140 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
141 else {
142 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
143 struct devstat *ds_next;
144
145 ds_next = STAILQ_NEXT(ds_tmp, dev_links);
146
147 /*
148 * If we find a break between higher and lower
149 * priority items, and if this item fits in the
150 * break, insert it. This also applies if the
151 * "lower priority item" is the end of the list.
152 */
153 if ((priority <= ds_tmp->priority)
154 && ((ds_next == NULL)
155 || (priority > ds_next->priority))) {
156 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
157 dev_links);
158 break;
159 } else if (priority > ds_tmp->priority) {
160 /*
161 * If this is the case, we should be able
162 * to insert ourselves at the head of the
163 * list. If we can't, something is wrong.
164 */
165 if (ds_tmp == STAILQ_FIRST(devstat_head)) {
166 STAILQ_INSERT_HEAD(devstat_head,
167 ds, dev_links);
168 break;
169 } else {
170 STAILQ_INSERT_TAIL(devstat_head,
171 ds, dev_links);
172 printf("devstat_add_entry: HELP! "
173 "sorting problem detected "
174 "for name %p unit %d\n",
175 dev_name, unit_number);
176 break;
177 }
178 }
179 }
180 }
181
182 ds->device_number = devstat_current_devnumber++;
183 ds->unit_number = unit_number;
184 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
185 ds->block_size = block_size;
186 ds->flags = flags;
187 ds->device_type = device_type;
188 ds->priority = priority;
189 binuptime(&ds->creation_time);
190 devstat_generation++;
191 }
192
193 /*
194 * Remove a devstat structure from the list of devices.
195 */
196 void
197 devstat_remove_entry(struct devstat *ds)
198 {
199 struct devstatlist *devstat_head;
200
201 mtx_assert(&devstat_mutex, MA_NOTOWNED);
202 if (ds == NULL)
203 return;
204
205 mtx_lock(&devstat_mutex);
206
207 devstat_head = &device_statq;
208
209 /* Remove this entry from the devstat queue */
210 atomic_add_acq_int(&ds->sequence1, 1);
211 if (ds->unit_number != -1) {
212 devstat_num_devs--;
213 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
214 }
215 devstat_free(ds);
216 devstat_generation++;
217 mtx_unlock(&devstat_mutex);
218 }
219
220 /*
221 * Record a transaction start.
222 *
223 * See comments for devstat_end_transaction(). Ordering is very important
224 * here.
225 */
226 void
227 devstat_start_transaction(struct devstat *ds, struct bintime *now)
228 {
229
230 mtx_assert(&devstat_mutex, MA_NOTOWNED);
231
232 /* sanity check */
233 if (ds == NULL)
234 return;
235
236 atomic_add_acq_int(&ds->sequence1, 1);
237 /*
238 * We only want to set the start time when we are going from idle
239 * to busy. The start time is really the start of the latest busy
240 * period.
241 */
242 if (ds->start_count == ds->end_count) {
243 if (now != NULL)
244 ds->busy_from = *now;
245 else
246 binuptime(&ds->busy_from);
247 }
248 ds->start_count++;
249 atomic_add_rel_int(&ds->sequence0, 1);
250 DTRACE_DEVSTAT_START();
251 }
252
253 void
254 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
255 {
256
257 mtx_assert(&devstat_mutex, MA_NOTOWNED);
258
259 /* sanity check */
260 if (ds == NULL)
261 return;
262
263 binuptime(&bp->bio_t0);
264 devstat_start_transaction(ds, &bp->bio_t0);
265 DTRACE_DEVSTAT_BIO_START();
266 }
267
268 /*
269 * Record the ending of a transaction, and incrment the various counters.
270 *
271 * Ordering in this function, and in devstat_start_transaction() is VERY
272 * important. The idea here is to run without locks, so we are very
273 * careful to only modify some fields on the way "down" (i.e. at
274 * transaction start) and some fields on the way "up" (i.e. at transaction
275 * completion). One exception is busy_from, which we only modify in
276 * devstat_start_transaction() when there are no outstanding transactions,
277 * and thus it can't be modified in devstat_end_transaction()
278 * simultaneously.
279 *
280 * The sequence0 and sequence1 fields are provided to enable an application
281 * spying on the structures with mmap(2) to tell when a structure is in a
282 * consistent state or not.
283 *
284 * For this to work 100% reliably, it is important that the two fields
285 * are at opposite ends of the structure and that they are incremented
286 * in the opposite order of how a memcpy(3) in userland would copy them.
287 * We assume that the copying happens front to back, but there is actually
288 * no way short of writing your own memcpy(3) replacement to guarantee
289 * this will be the case.
290 *
291 * In addition to this, being a kind of locks, they must be updated with
292 * atomic instructions using appropriate memory barriers.
293 */
294 void
295 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
296 devstat_tag_type tag_type, devstat_trans_flags flags,
297 struct bintime *now, struct bintime *then)
298 {
299 struct bintime dt, lnow;
300
301 /* sanity check */
302 if (ds == NULL)
303 return;
304
305 if (now == NULL) {
306 now = &lnow;
307 binuptime(now);
308 }
309
310 atomic_add_acq_int(&ds->sequence1, 1);
311 /* Update byte and operations counts */
312 ds->bytes[flags] += bytes;
313 ds->operations[flags]++;
314
315 /*
316 * Keep a count of the various tag types sent.
317 */
318 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
319 tag_type != DEVSTAT_TAG_NONE)
320 ds->tag_types[tag_type]++;
321
322 if (then != NULL) {
323 /* Update duration of operations */
324 dt = *now;
325 bintime_sub(&dt, then);
326 bintime_add(&ds->duration[flags], &dt);
327 }
328
329 /* Accumulate busy time */
330 dt = *now;
331 bintime_sub(&dt, &ds->busy_from);
332 bintime_add(&ds->busy_time, &dt);
333 ds->busy_from = *now;
334
335 ds->end_count++;
336 atomic_add_rel_int(&ds->sequence0, 1);
337 DTRACE_DEVSTAT_DONE();
338 }
339
340 void
341 devstat_end_transaction_bio(struct devstat *ds, struct bio *bp)
342 {
343
344 devstat_end_transaction_bio_bt(ds, bp, NULL);
345 }
346
347 void
348 devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp,
349 struct bintime *now)
350 {
351 devstat_trans_flags flg;
352
353 /* sanity check */
354 if (ds == NULL)
355 return;
356
357 if (bp->bio_cmd == BIO_DELETE)
358 flg = DEVSTAT_FREE;
359 else if (bp->bio_cmd == BIO_READ)
360 flg = DEVSTAT_READ;
361 else if (bp->bio_cmd == BIO_WRITE)
362 flg = DEVSTAT_WRITE;
363 else
364 flg = DEVSTAT_NO_DATA;
365
366 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
367 DEVSTAT_TAG_SIMPLE, flg, now, &bp->bio_t0);
368 DTRACE_DEVSTAT_BIO_DONE();
369 }
370
371 /*
372 * This is the sysctl handler for the devstat package. The data pushed out
373 * on the kern.devstat.all sysctl variable consists of the current devstat
374 * generation number, and then an array of devstat structures, one for each
375 * device in the system.
376 *
377 * This is more cryptic that obvious, but basically we neither can nor
378 * want to hold the devstat_mutex for any amount of time, so we grab it
379 * only when we need to and keep an eye on devstat_generation all the time.
380 */
381 static int
382 sysctl_devstat(SYSCTL_HANDLER_ARGS)
383 {
384 int error;
385 long mygen;
386 struct devstat *nds;
387
388 mtx_assert(&devstat_mutex, MA_NOTOWNED);
389
390 /*
391 * XXX devstat_generation should really be "volatile" but that
392 * XXX freaks out the sysctl macro below. The places where we
393 * XXX change it and inspect it are bracketed in the mutex which
394 * XXX guarantees us proper write barriers. I don't belive the
395 * XXX compiler is allowed to optimize mygen away across calls
396 * XXX to other functions, so the following is belived to be safe.
397 */
398 mygen = devstat_generation;
399
400 error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
401
402 if (devstat_num_devs == 0)
403 return(0);
404
405 if (error != 0)
406 return (error);
407
408 mtx_lock(&devstat_mutex);
409 nds = STAILQ_FIRST(&device_statq);
410 if (mygen != devstat_generation)
411 error = EBUSY;
412 mtx_unlock(&devstat_mutex);
413
414 if (error != 0)
415 return (error);
416
417 for (;nds != NULL;) {
418 error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
419 if (error != 0)
420 return (error);
421 mtx_lock(&devstat_mutex);
422 if (mygen != devstat_generation)
423 error = EBUSY;
424 else
425 nds = STAILQ_NEXT(nds, dev_links);
426 mtx_unlock(&devstat_mutex);
427 if (error != 0)
428 return (error);
429 }
430 return(error);
431 }
432
433 /*
434 * Sysctl entries for devstat. The first one is a node that all the rest
435 * hang off of.
436 */
437 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL,
438 "Device Statistics");
439
440 SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE,
441 NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list");
442 /*
443 * Export the number of devices in the system so that userland utilities
444 * can determine how much memory to allocate to hold all the devices.
445 */
446 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
447 &devstat_num_devs, 0, "Number of devices in the devstat list");
448 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
449 &devstat_generation, 0, "Devstat list generation");
450 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
451 &devstat_version, 0, "Devstat list version number");
452
453 /*
454 * Allocator for struct devstat structures. We sub-allocate these from pages
455 * which we get from malloc. These pages are exported for mmap(2)'ing through
456 * a miniature device driver
457 */
458
459 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
460
461 static d_mmap_t devstat_mmap;
462
463 static struct cdevsw devstat_cdevsw = {
464 .d_version = D_VERSION,
465 .d_mmap = devstat_mmap,
466 .d_name = "devstat",
467 };
468
469 struct statspage {
470 TAILQ_ENTRY(statspage) list;
471 struct devstat *stat;
472 u_int nfree;
473 };
474
475 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
476 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
477
478 static int
479 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
480 int nprot, vm_memattr_t *memattr)
481 {
482 struct statspage *spp;
483
484 if (nprot != VM_PROT_READ)
485 return (-1);
486 mtx_lock(&devstat_mutex);
487 TAILQ_FOREACH(spp, &pagelist, list) {
488 if (offset == 0) {
489 *paddr = vtophys(spp->stat);
490 mtx_unlock(&devstat_mutex);
491 return (0);
492 }
493 offset -= PAGE_SIZE;
494 }
495 mtx_unlock(&devstat_mutex);
496 return (-1);
497 }
498
499 static struct devstat *
500 devstat_alloc(void)
501 {
502 struct devstat *dsp;
503 struct statspage *spp, *spp2;
504 u_int u;
505 static int once;
506
507 mtx_assert(&devstat_mutex, MA_NOTOWNED);
508 if (!once) {
509 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
510 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
511 DEVSTAT_DEVICE_NAME);
512 once = 1;
513 }
514 spp2 = NULL;
515 mtx_lock(&devstat_mutex);
516 for (;;) {
517 TAILQ_FOREACH(spp, &pagelist, list) {
518 if (spp->nfree > 0)
519 break;
520 }
521 if (spp != NULL)
522 break;
523 mtx_unlock(&devstat_mutex);
524 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
525 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
526 spp2->nfree = statsperpage;
527
528 /*
529 * If free statspages were added while the lock was released
530 * just reuse them.
531 */
532 mtx_lock(&devstat_mutex);
533 TAILQ_FOREACH(spp, &pagelist, list)
534 if (spp->nfree > 0)
535 break;
536 if (spp == NULL) {
537 spp = spp2;
538
539 /*
540 * It would make more sense to add the new page at the
541 * head but the order on the list determine the
542 * sequence of the mapping so we can't do that.
543 */
544 TAILQ_INSERT_TAIL(&pagelist, spp, list);
545 } else
546 break;
547 }
548 dsp = spp->stat;
549 for (u = 0; u < statsperpage; u++) {
550 if (dsp->allocated == 0)
551 break;
552 dsp++;
553 }
554 spp->nfree--;
555 dsp->allocated = 1;
556 mtx_unlock(&devstat_mutex);
557 if (spp2 != NULL && spp2 != spp) {
558 free(spp2->stat, M_DEVSTAT);
559 free(spp2, M_DEVSTAT);
560 }
561 return (dsp);
562 }
563
564 static void
565 devstat_free(struct devstat *dsp)
566 {
567 struct statspage *spp;
568
569 mtx_assert(&devstat_mutex, MA_OWNED);
570 bzero(dsp, sizeof *dsp);
571 TAILQ_FOREACH(spp, &pagelist, list) {
572 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
573 spp->nfree++;
574 return;
575 }
576 }
577 }
578
579 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
580 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
Cache object: 2a0ebf4440218a442e63f59b268779d2
|