1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/bio.h>
38 #include <sys/devicestat.h>
39 #include <sys/sdt.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/conf.h>
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47
48 #include <machine/atomic.h>
49
50 SDT_PROVIDER_DEFINE(io);
51
52 SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
53 SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
54 SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *",
55 "struct devstat *");
56 SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *",
57 "struct devstat *");
58
59 #define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds)
60 #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds)
61 #define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds)
62 #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds)
63 #define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds)
64 #define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds)
65
66 static int devstat_num_devs;
67 static long devstat_generation = 1;
68 static int devstat_version = DEVSTAT_VERSION;
69 static int devstat_current_devnumber;
70 static struct mtx devstat_mutex;
71 MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
72
73 static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
74 static struct devstat *devstat_alloc(void);
75 static void devstat_free(struct devstat *);
76 static void devstat_add_entry(struct devstat *ds, const void *dev_name,
77 int unit_number, uint32_t block_size,
78 devstat_support_flags flags,
79 devstat_type_flags device_type,
80 devstat_priority priority);
81
82 /*
83 * Allocate a devstat and initialize it
84 */
85 struct devstat *
86 devstat_new_entry(const void *dev_name,
87 int unit_number, uint32_t block_size,
88 devstat_support_flags flags,
89 devstat_type_flags device_type,
90 devstat_priority priority)
91 {
92 struct devstat *ds;
93
94 mtx_assert(&devstat_mutex, MA_NOTOWNED);
95
96 ds = devstat_alloc();
97 mtx_lock(&devstat_mutex);
98 if (unit_number == -1) {
99 ds->unit_number = unit_number;
100 ds->id = dev_name;
101 binuptime(&ds->creation_time);
102 devstat_generation++;
103 } else {
104 devstat_add_entry(ds, dev_name, unit_number, block_size,
105 flags, device_type, priority);
106 }
107 mtx_unlock(&devstat_mutex);
108 return (ds);
109 }
110
111 /*
112 * Take a malloced and zeroed devstat structure given to us, fill it in
113 * and add it to the queue of devices.
114 */
115 static void
116 devstat_add_entry(struct devstat *ds, const void *dev_name,
117 int unit_number, uint32_t block_size,
118 devstat_support_flags flags,
119 devstat_type_flags device_type,
120 devstat_priority priority)
121 {
122 struct devstatlist *devstat_head;
123 struct devstat *ds_tmp;
124
125 mtx_assert(&devstat_mutex, MA_OWNED);
126 devstat_num_devs++;
127
128 devstat_head = &device_statq;
129
130 /*
131 * Priority sort. Each driver passes in its priority when it adds
132 * its devstat entry. Drivers are sorted first by priority, and
133 * then by probe order.
134 *
135 * For the first device, we just insert it, since the priority
136 * doesn't really matter yet. Subsequent devices are inserted into
137 * the list using the order outlined above.
138 */
139 if (devstat_num_devs == 1)
140 STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
141 else {
142 STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
143 struct devstat *ds_next;
144
145 ds_next = STAILQ_NEXT(ds_tmp, dev_links);
146
147 /*
148 * If we find a break between higher and lower
149 * priority items, and if this item fits in the
150 * break, insert it. This also applies if the
151 * "lower priority item" is the end of the list.
152 */
153 if ((priority <= ds_tmp->priority)
154 && ((ds_next == NULL)
155 || (priority > ds_next->priority))) {
156 STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
157 dev_links);
158 break;
159 } else if (priority > ds_tmp->priority) {
160 /*
161 * If this is the case, we should be able
162 * to insert ourselves at the head of the
163 * list. If we can't, something is wrong.
164 */
165 if (ds_tmp == STAILQ_FIRST(devstat_head)) {
166 STAILQ_INSERT_HEAD(devstat_head,
167 ds, dev_links);
168 break;
169 } else {
170 STAILQ_INSERT_TAIL(devstat_head,
171 ds, dev_links);
172 printf("devstat_add_entry: HELP! "
173 "sorting problem detected "
174 "for name %p unit %d\n",
175 dev_name, unit_number);
176 break;
177 }
178 }
179 }
180 }
181
182 ds->device_number = devstat_current_devnumber++;
183 ds->unit_number = unit_number;
184 strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
185 ds->block_size = block_size;
186 ds->flags = flags;
187 ds->device_type = device_type;
188 ds->priority = priority;
189 binuptime(&ds->creation_time);
190 devstat_generation++;
191 }
192
193 /*
194 * Remove a devstat structure from the list of devices.
195 */
196 void
197 devstat_remove_entry(struct devstat *ds)
198 {
199 struct devstatlist *devstat_head;
200
201 mtx_assert(&devstat_mutex, MA_NOTOWNED);
202 if (ds == NULL)
203 return;
204
205 mtx_lock(&devstat_mutex);
206
207 devstat_head = &device_statq;
208
209 /* Remove this entry from the devstat queue */
210 atomic_add_acq_int(&ds->sequence1, 1);
211 if (ds->unit_number != -1) {
212 devstat_num_devs--;
213 STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
214 }
215 devstat_free(ds);
216 devstat_generation++;
217 mtx_unlock(&devstat_mutex);
218 }
219
220 /*
221 * Record a transaction start.
222 *
223 * See comments for devstat_end_transaction(). Ordering is very important
224 * here.
225 */
226 void
227 devstat_start_transaction(struct devstat *ds, const struct bintime *now)
228 {
229
230 /* sanity check */
231 if (ds == NULL)
232 return;
233
234 atomic_add_acq_int(&ds->sequence1, 1);
235 /*
236 * We only want to set the start time when we are going from idle
237 * to busy. The start time is really the start of the latest busy
238 * period.
239 */
240 if (atomic_fetchadd_int(&ds->start_count, 1) == ds->end_count) {
241 if (now != NULL)
242 ds->busy_from = *now;
243 else
244 binuptime(&ds->busy_from);
245 }
246 atomic_add_rel_int(&ds->sequence0, 1);
247 DTRACE_DEVSTAT_START();
248 }
249
250 void
251 devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
252 {
253
254 /* sanity check */
255 if (ds == NULL)
256 return;
257
258 binuptime(&bp->bio_t0);
259 devstat_start_transaction_bio_t0(ds, bp);
260 }
261
262 void
263 devstat_start_transaction_bio_t0(struct devstat *ds, struct bio *bp)
264 {
265
266 /* sanity check */
267 if (ds == NULL)
268 return;
269
270 devstat_start_transaction(ds, &bp->bio_t0);
271 DTRACE_DEVSTAT_BIO_START();
272 }
273
274 /*
275 * Record the ending of a transaction, and incrment the various counters.
276 *
277 * Ordering in this function, and in devstat_start_transaction() is VERY
278 * important. The idea here is to run without locks, so we are very
279 * careful to only modify some fields on the way "down" (i.e. at
280 * transaction start) and some fields on the way "up" (i.e. at transaction
281 * completion). One exception is busy_from, which we only modify in
282 * devstat_start_transaction() when there are no outstanding transactions,
283 * and thus it can't be modified in devstat_end_transaction()
284 * simultaneously.
285 *
286 * The sequence0 and sequence1 fields are provided to enable an application
287 * spying on the structures with mmap(2) to tell when a structure is in a
288 * consistent state or not.
289 *
290 * For this to work 100% reliably, it is important that the two fields
291 * are at opposite ends of the structure and that they are incremented
292 * in the opposite order of how a memcpy(3) in userland would copy them.
293 * We assume that the copying happens front to back, but there is actually
294 * no way short of writing your own memcpy(3) replacement to guarantee
295 * this will be the case.
296 *
297 * In addition to this, being a kind of locks, they must be updated with
298 * atomic instructions using appropriate memory barriers.
299 */
300 void
301 devstat_end_transaction(struct devstat *ds, uint32_t bytes,
302 devstat_tag_type tag_type, devstat_trans_flags flags,
303 const struct bintime *now, const struct bintime *then)
304 {
305 struct bintime dt, lnow;
306
307 /* sanity check */
308 if (ds == NULL)
309 return;
310
311 if (now == NULL) {
312 binuptime(&lnow);
313 now = &lnow;
314 }
315
316 atomic_add_acq_int(&ds->sequence1, 1);
317 /* Update byte and operations counts */
318 ds->bytes[flags] += bytes;
319 ds->operations[flags]++;
320
321 /*
322 * Keep a count of the various tag types sent.
323 */
324 if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
325 tag_type != DEVSTAT_TAG_NONE)
326 ds->tag_types[tag_type]++;
327
328 if (then != NULL) {
329 /* Update duration of operations */
330 dt = *now;
331 bintime_sub(&dt, then);
332 bintime_add(&ds->duration[flags], &dt);
333 }
334
335 /* Accumulate busy time */
336 dt = *now;
337 bintime_sub(&dt, &ds->busy_from);
338 bintime_add(&ds->busy_time, &dt);
339 ds->busy_from = *now;
340
341 ds->end_count++;
342 atomic_add_rel_int(&ds->sequence0, 1);
343 DTRACE_DEVSTAT_DONE();
344 }
345
346 void
347 devstat_end_transaction_bio(struct devstat *ds, const struct bio *bp)
348 {
349
350 devstat_end_transaction_bio_bt(ds, bp, NULL);
351 }
352
353 void
354 devstat_end_transaction_bio_bt(struct devstat *ds, const struct bio *bp,
355 const struct bintime *now)
356 {
357 devstat_trans_flags flg;
358 devstat_tag_type tag;
359
360 /* sanity check */
361 if (ds == NULL)
362 return;
363
364 if (bp->bio_flags & BIO_ORDERED)
365 tag = DEVSTAT_TAG_ORDERED;
366 else
367 tag = DEVSTAT_TAG_SIMPLE;
368 if (bp->bio_cmd == BIO_DELETE)
369 flg = DEVSTAT_FREE;
370 else if ((bp->bio_cmd == BIO_READ)
371 || ((bp->bio_cmd == BIO_ZONE)
372 && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
373 flg = DEVSTAT_READ;
374 else if (bp->bio_cmd == BIO_WRITE)
375 flg = DEVSTAT_WRITE;
376 else
377 flg = DEVSTAT_NO_DATA;
378
379 devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
380 tag, flg, now, &bp->bio_t0);
381 DTRACE_DEVSTAT_BIO_DONE();
382 }
383
384 /*
385 * This is the sysctl handler for the devstat package. The data pushed out
386 * on the kern.devstat.all sysctl variable consists of the current devstat
387 * generation number, and then an array of devstat structures, one for each
388 * device in the system.
389 *
390 * This is more cryptic that obvious, but basically we neither can nor
391 * want to hold the devstat_mutex for any amount of time, so we grab it
392 * only when we need to and keep an eye on devstat_generation all the time.
393 */
394 static int
395 sysctl_devstat(SYSCTL_HANDLER_ARGS)
396 {
397 int error;
398 long mygen;
399 struct devstat *nds;
400
401 mtx_assert(&devstat_mutex, MA_NOTOWNED);
402
403 /*
404 * XXX devstat_generation should really be "volatile" but that
405 * XXX freaks out the sysctl macro below. The places where we
406 * XXX change it and inspect it are bracketed in the mutex which
407 * XXX guarantees us proper write barriers. I don't believe the
408 * XXX compiler is allowed to optimize mygen away across calls
409 * XXX to other functions, so the following is belived to be safe.
410 */
411 mygen = devstat_generation;
412
413 error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
414
415 if (devstat_num_devs == 0)
416 return(0);
417
418 if (error != 0)
419 return (error);
420
421 mtx_lock(&devstat_mutex);
422 nds = STAILQ_FIRST(&device_statq);
423 if (mygen != devstat_generation)
424 error = EBUSY;
425 mtx_unlock(&devstat_mutex);
426
427 if (error != 0)
428 return (error);
429
430 for (;nds != NULL;) {
431 error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
432 if (error != 0)
433 return (error);
434 mtx_lock(&devstat_mutex);
435 if (mygen != devstat_generation)
436 error = EBUSY;
437 else
438 nds = STAILQ_NEXT(nds, dev_links);
439 mtx_unlock(&devstat_mutex);
440 if (error != 0)
441 return (error);
442 }
443 return(error);
444 }
445
446 /*
447 * Sysctl entries for devstat. The first one is a node that all the rest
448 * hang off of.
449 */
450 static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
451 "Device Statistics");
452
453 SYSCTL_PROC(_kern_devstat, OID_AUTO, all,
454 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
455 sysctl_devstat, "S,devstat",
456 "All devices in the devstat list");
457 /*
458 * Export the number of devices in the system so that userland utilities
459 * can determine how much memory to allocate to hold all the devices.
460 */
461 SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
462 &devstat_num_devs, 0, "Number of devices in the devstat list");
463 SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
464 &devstat_generation, 0, "Devstat list generation");
465 SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
466 &devstat_version, 0, "Devstat list version number");
467
468 /*
469 * Allocator for struct devstat structures. We sub-allocate these from pages
470 * which we get from malloc. These pages are exported for mmap(2)'ing through
471 * a miniature device driver
472 */
473
474 #define statsperpage (PAGE_SIZE / sizeof(struct devstat))
475
476 static d_mmap_t devstat_mmap;
477
478 static struct cdevsw devstat_cdevsw = {
479 .d_version = D_VERSION,
480 .d_mmap = devstat_mmap,
481 .d_name = "devstat",
482 };
483
484 struct statspage {
485 TAILQ_ENTRY(statspage) list;
486 struct devstat *stat;
487 u_int nfree;
488 };
489
490 static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
491 static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
492
493 static int
494 devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
495 int nprot, vm_memattr_t *memattr)
496 {
497 struct statspage *spp;
498
499 if (nprot != VM_PROT_READ)
500 return (-1);
501 mtx_lock(&devstat_mutex);
502 TAILQ_FOREACH(spp, &pagelist, list) {
503 if (offset == 0) {
504 *paddr = vtophys(spp->stat);
505 mtx_unlock(&devstat_mutex);
506 return (0);
507 }
508 offset -= PAGE_SIZE;
509 }
510 mtx_unlock(&devstat_mutex);
511 return (-1);
512 }
513
514 static struct devstat *
515 devstat_alloc(void)
516 {
517 struct devstat *dsp;
518 struct statspage *spp, *spp2;
519 u_int u;
520 static int once;
521
522 mtx_assert(&devstat_mutex, MA_NOTOWNED);
523 if (!once) {
524 make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
525 &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
526 DEVSTAT_DEVICE_NAME);
527 once = 1;
528 }
529 spp2 = NULL;
530 mtx_lock(&devstat_mutex);
531 for (;;) {
532 TAILQ_FOREACH(spp, &pagelist, list) {
533 if (spp->nfree > 0)
534 break;
535 }
536 if (spp != NULL)
537 break;
538 mtx_unlock(&devstat_mutex);
539 spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
540 spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
541 spp2->nfree = statsperpage;
542
543 /*
544 * If free statspages were added while the lock was released
545 * just reuse them.
546 */
547 mtx_lock(&devstat_mutex);
548 TAILQ_FOREACH(spp, &pagelist, list)
549 if (spp->nfree > 0)
550 break;
551 if (spp == NULL) {
552 spp = spp2;
553
554 /*
555 * It would make more sense to add the new page at the
556 * head but the order on the list determine the
557 * sequence of the mapping so we can't do that.
558 */
559 TAILQ_INSERT_TAIL(&pagelist, spp, list);
560 } else
561 break;
562 }
563 dsp = spp->stat;
564 for (u = 0; u < statsperpage; u++) {
565 if (dsp->allocated == 0)
566 break;
567 dsp++;
568 }
569 spp->nfree--;
570 dsp->allocated = 1;
571 mtx_unlock(&devstat_mutex);
572 if (spp2 != NULL && spp2 != spp) {
573 free(spp2->stat, M_DEVSTAT);
574 free(spp2, M_DEVSTAT);
575 }
576 return (dsp);
577 }
578
579 static void
580 devstat_free(struct devstat *dsp)
581 {
582 struct statspage *spp;
583
584 mtx_assert(&devstat_mutex, MA_OWNED);
585 bzero(dsp, sizeof *dsp);
586 TAILQ_FOREACH(spp, &pagelist, list) {
587 if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
588 spp->nfree++;
589 return;
590 }
591 }
592 }
593
594 SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
595 SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
Cache object: 70590ab3f303d1e68c1059aa962e7105
|