1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, Delphix. All rights reserved.
24 * Copyright (c) 2013, Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2013, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2020, George Amanakis. All rights reserved.
27 */
28
29 #ifndef _SYS_ARC_IMPL_H
30 #define _SYS_ARC_IMPL_H
31
32 #include <sys/arc.h>
33 #include <sys/zio_crypt.h>
34 #include <sys/zthr.h>
35 #include <sys/aggsum.h>
36 #include <sys/wmsum.h>
37
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41
42 /*
43 * Note that buffers can be in one of 6 states:
44 * ARC_anon - anonymous (discussed below)
45 * ARC_mru - recently used, currently cached
46 * ARC_mru_ghost - recently used, no longer in cache
47 * ARC_mfu - frequently used, currently cached
48 * ARC_mfu_ghost - frequently used, no longer in cache
49 * ARC_uncached - uncacheable prefetch, to be evicted
50 * ARC_l2c_only - exists in L2ARC but not other states
51 * When there are no active references to the buffer, they are
52 * are linked onto a list in one of these arc states. These are
53 * the only buffers that can be evicted or deleted. Within each
54 * state there are multiple lists, one for meta-data and one for
55 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
56 * etc.) is tracked separately so that it can be managed more
57 * explicitly: favored over data, limited explicitly.
58 *
59 * Anonymous buffers are buffers that are not associated with
60 * a DVA. These are buffers that hold dirty block copies
61 * before they are written to stable storage. By definition,
62 * they are "ref'd" and are considered part of arc_mru
63 * that cannot be freed. Generally, they will acquire a DVA
64 * as they are written and migrate onto the arc_mru list.
65 *
66 * The ARC_l2c_only state is for buffers that are in the second
67 * level ARC but no longer in any of the ARC_m* lists. The second
68 * level ARC itself may also contain buffers that are in any of
69 * the ARC_m* states - meaning that a buffer can exist in two
70 * places. The reason for the ARC_l2c_only state is to keep the
71 * buffer header in the hash table, so that reads that hit the
72 * second level ARC benefit from these fast lookups.
73 */
74
75 typedef struct arc_state {
76 /*
77 * list of evictable buffers
78 */
79 multilist_t arcs_list[ARC_BUFC_NUMTYPES];
80 /*
81 * supports the "dbufs" kstat
82 */
83 arc_state_type_t arcs_state;
84 /*
85 * total amount of evictable data in this state
86 */
87 zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES] ____cacheline_aligned;
88 /*
89 * total amount of data in this state; this includes: evictable,
90 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
91 */
92 zfs_refcount_t arcs_size;
93 } arc_state_t;
94
95 typedef struct arc_callback arc_callback_t;
96
97 struct arc_callback {
98 void *acb_private;
99 arc_read_done_func_t *acb_done;
100 arc_buf_t *acb_buf;
101 boolean_t acb_encrypted;
102 boolean_t acb_compressed;
103 boolean_t acb_noauth;
104 boolean_t acb_nobuf;
105 boolean_t acb_wait;
106 int acb_wait_error;
107 kmutex_t acb_wait_lock;
108 kcondvar_t acb_wait_cv;
109 zbookmark_phys_t acb_zb;
110 zio_t *acb_zio_dummy;
111 zio_t *acb_zio_head;
112 arc_callback_t *acb_prev;
113 arc_callback_t *acb_next;
114 };
115
116 typedef struct arc_write_callback arc_write_callback_t;
117
118 struct arc_write_callback {
119 void *awcb_private;
120 arc_write_done_func_t *awcb_ready;
121 arc_write_done_func_t *awcb_children_ready;
122 arc_write_done_func_t *awcb_physdone;
123 arc_write_done_func_t *awcb_done;
124 arc_buf_t *awcb_buf;
125 };
126
127 /*
128 * ARC buffers are separated into multiple structs as a memory saving measure:
129 * - Common fields struct, always defined, and embedded within it:
130 * - L2-only fields, always allocated but undefined when not in L2ARC
131 * - L1-only fields, only allocated when in L1ARC
132 *
133 * Buffer in L1 Buffer only in L2
134 * +------------------------+ +------------------------+
135 * | arc_buf_hdr_t | | arc_buf_hdr_t |
136 * | | | |
137 * | | | |
138 * | | | |
139 * +------------------------+ +------------------------+
140 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
141 * | (undefined if L1-only) | | |
142 * +------------------------+ +------------------------+
143 * | l1arc_buf_hdr_t |
144 * | |
145 * | |
146 * | |
147 * | |
148 * +------------------------+
149 *
150 * Because it's possible for the L2ARC to become extremely large, we can wind
151 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
152 * is minimized by only allocating the fields necessary for an L1-cached buffer
153 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
154 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
155 * words in pointers. arc_hdr_realloc() is used to switch a header between
156 * these two allocation states.
157 */
158 typedef struct l1arc_buf_hdr {
159 /* for waiting on reads to complete */
160 kcondvar_t b_cv;
161 uint8_t b_byteswap;
162
163 /* protected by arc state mutex */
164 arc_state_t *b_state;
165 multilist_node_t b_arc_node;
166
167 /* protected by hash lock */
168 clock_t b_arc_access;
169 uint32_t b_mru_hits;
170 uint32_t b_mru_ghost_hits;
171 uint32_t b_mfu_hits;
172 uint32_t b_mfu_ghost_hits;
173 uint32_t b_bufcnt;
174 arc_buf_t *b_buf;
175
176 /* self protecting */
177 zfs_refcount_t b_refcnt;
178
179 arc_callback_t *b_acb;
180 abd_t *b_pabd;
181
182 #ifdef ZFS_DEBUG
183 zio_cksum_t *b_freeze_cksum;
184 kmutex_t b_freeze_lock;
185 #endif
186 } l1arc_buf_hdr_t;
187
188 typedef enum l2arc_dev_hdr_flags_t {
189 L2ARC_DEV_HDR_EVICT_FIRST = (1 << 0) /* mirror of l2ad_first */
190 } l2arc_dev_hdr_flags_t;
191
192 /*
193 * Pointer used in persistent L2ARC (for pointing to log blocks).
194 */
195 typedef struct l2arc_log_blkptr {
196 /*
197 * Offset of log block within the device, in bytes
198 */
199 uint64_t lbp_daddr;
200 /*
201 * Aligned payload size (in bytes) of the log block
202 */
203 uint64_t lbp_payload_asize;
204 /*
205 * Offset in bytes of the first buffer in the payload
206 */
207 uint64_t lbp_payload_start;
208 /*
209 * lbp_prop has the following format:
210 * * logical size (in bytes)
211 * * aligned (after compression) size (in bytes)
212 * * compression algorithm (we always LZ4-compress l2arc logs)
213 * * checksum algorithm (used for lbp_cksum)
214 */
215 uint64_t lbp_prop;
216 zio_cksum_t lbp_cksum; /* checksum of log */
217 } l2arc_log_blkptr_t;
218
219 /*
220 * The persistent L2ARC device header.
221 * Byte order of magic determines whether 64-bit bswap of fields is necessary.
222 */
223 typedef struct l2arc_dev_hdr_phys {
224 uint64_t dh_magic; /* L2ARC_DEV_HDR_MAGIC */
225 uint64_t dh_version; /* Persistent L2ARC version */
226
227 /*
228 * Global L2ARC device state and metadata.
229 */
230 uint64_t dh_spa_guid;
231 uint64_t dh_vdev_guid;
232 uint64_t dh_log_entries; /* mirror of l2ad_log_entries */
233 uint64_t dh_evict; /* evicted offset in bytes */
234 uint64_t dh_flags; /* l2arc_dev_hdr_flags_t */
235 /*
236 * Used in zdb.c for determining if a log block is valid, in the same
237 * way that l2arc_rebuild() does.
238 */
239 uint64_t dh_start; /* mirror of l2ad_start */
240 uint64_t dh_end; /* mirror of l2ad_end */
241 /*
242 * Start of log block chain. [0] -> newest log, [1] -> one older (used
243 * for initiating prefetch).
244 */
245 l2arc_log_blkptr_t dh_start_lbps[2];
246 /*
247 * Aligned size of all log blocks as accounted by vdev_space_update().
248 */
249 uint64_t dh_lb_asize; /* mirror of l2ad_lb_asize */
250 uint64_t dh_lb_count; /* mirror of l2ad_lb_count */
251 /*
252 * Mirrors of vdev_trim_action_time and vdev_trim_state, used to
253 * display when the cache device was fully trimmed for the last
254 * time.
255 */
256 uint64_t dh_trim_action_time;
257 uint64_t dh_trim_state;
258 const uint64_t dh_pad[30]; /* pad to 512 bytes */
259 zio_eck_t dh_tail;
260 } l2arc_dev_hdr_phys_t;
261 _Static_assert(sizeof (l2arc_dev_hdr_phys_t) == SPA_MINBLOCKSIZE,
262 "l2arc_dev_hdr_phys_t wrong size");
263
264 /*
265 * A single ARC buffer header entry in a l2arc_log_blk_phys_t.
266 */
267 typedef struct l2arc_log_ent_phys {
268 dva_t le_dva; /* dva of buffer */
269 uint64_t le_birth; /* birth txg of buffer */
270 /*
271 * le_prop has the following format:
272 * * logical size (in bytes)
273 * * physical (compressed) size (in bytes)
274 * * compression algorithm
275 * * object type (used to restore arc_buf_contents_t)
276 * * protected status (used for encryption)
277 * * prefetch status (used in l2arc_read_done())
278 */
279 uint64_t le_prop;
280 uint64_t le_daddr; /* buf location on l2dev */
281 uint64_t le_complevel;
282 /*
283 * We pad the size of each entry to a power of 2 so that the size of
284 * l2arc_log_blk_phys_t is power-of-2 aligned with SPA_MINBLOCKSHIFT,
285 * because of the L2ARC_SET_*SIZE macros.
286 */
287 const uint64_t le_pad[2]; /* pad to 64 bytes */
288 } l2arc_log_ent_phys_t;
289
290 #define L2ARC_LOG_BLK_MAX_ENTRIES (1022)
291
292 /*
293 * A log block of up to 1022 ARC buffer log entries, chained into the
294 * persistent L2ARC metadata linked list. Byte order of magic determines
295 * whether 64-bit bswap of fields is necessary.
296 */
297 typedef struct l2arc_log_blk_phys {
298 uint64_t lb_magic; /* L2ARC_LOG_BLK_MAGIC */
299 /*
300 * There are 2 chains (headed by dh_start_lbps[2]), and this field
301 * points back to the previous block in this chain. We alternate
302 * which chain we append to, so they are time-wise and offset-wise
303 * interleaved, but that is an optimization rather than for
304 * correctness.
305 */
306 l2arc_log_blkptr_t lb_prev_lbp; /* pointer to prev log block */
307 /*
308 * Pad header section to 128 bytes
309 */
310 uint64_t lb_pad[7];
311 /* Payload */
312 l2arc_log_ent_phys_t lb_entries[L2ARC_LOG_BLK_MAX_ENTRIES];
313 } l2arc_log_blk_phys_t; /* 64K total */
314
315 /*
316 * The size of l2arc_log_blk_phys_t has to be power-of-2 aligned with
317 * SPA_MINBLOCKSHIFT because of L2BLK_SET_*SIZE macros.
318 */
319 _Static_assert(IS_P2ALIGNED(sizeof (l2arc_log_blk_phys_t),
320 1ULL << SPA_MINBLOCKSHIFT), "l2arc_log_blk_phys_t misaligned");
321 _Static_assert(sizeof (l2arc_log_blk_phys_t) >= SPA_MINBLOCKSIZE,
322 "l2arc_log_blk_phys_t too small");
323 _Static_assert(sizeof (l2arc_log_blk_phys_t) <= SPA_MAXBLOCKSIZE,
324 "l2arc_log_blk_phys_t too big");
325
326 /*
327 * These structures hold in-flight abd buffers for log blocks as they're being
328 * written to the L2ARC device.
329 */
330 typedef struct l2arc_lb_abd_buf {
331 abd_t *abd;
332 list_node_t node;
333 } l2arc_lb_abd_buf_t;
334
335 /*
336 * These structures hold pointers to log blocks present on the L2ARC device.
337 */
338 typedef struct l2arc_lb_ptr_buf {
339 l2arc_log_blkptr_t *lb_ptr;
340 list_node_t node;
341 } l2arc_lb_ptr_buf_t;
342
343 /* Macros for setting fields in le_prop and lbp_prop */
344 #define L2BLK_GET_LSIZE(field) \
345 BF64_GET_SB((field), 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
346 #define L2BLK_SET_LSIZE(field, x) \
347 BF64_SET_SB((field), 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
348 #define L2BLK_GET_PSIZE(field) \
349 BF64_GET_SB((field), 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1)
350 #define L2BLK_SET_PSIZE(field, x) \
351 BF64_SET_SB((field), 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
352 #define L2BLK_GET_COMPRESS(field) \
353 BF64_GET((field), 32, SPA_COMPRESSBITS)
354 #define L2BLK_SET_COMPRESS(field, x) \
355 BF64_SET((field), 32, SPA_COMPRESSBITS, x)
356 #define L2BLK_GET_PREFETCH(field) BF64_GET((field), 39, 1)
357 #define L2BLK_SET_PREFETCH(field, x) BF64_SET((field), 39, 1, x)
358 #define L2BLK_GET_CHECKSUM(field) BF64_GET((field), 40, 8)
359 #define L2BLK_SET_CHECKSUM(field, x) BF64_SET((field), 40, 8, x)
360 #define L2BLK_GET_TYPE(field) BF64_GET((field), 48, 8)
361 #define L2BLK_SET_TYPE(field, x) BF64_SET((field), 48, 8, x)
362 #define L2BLK_GET_PROTECTED(field) BF64_GET((field), 56, 1)
363 #define L2BLK_SET_PROTECTED(field, x) BF64_SET((field), 56, 1, x)
364 #define L2BLK_GET_STATE(field) BF64_GET((field), 57, 4)
365 #define L2BLK_SET_STATE(field, x) BF64_SET((field), 57, 4, x)
366
367 #define PTR_SWAP(x, y) \
368 do { \
369 void *tmp = (x);\
370 x = y; \
371 y = tmp; \
372 } while (0)
373
374 #define L2ARC_DEV_HDR_MAGIC 0x5a46534341434845LLU /* ASCII: "ZFSCACHE" */
375 #define L2ARC_LOG_BLK_MAGIC 0x4c4f47424c4b4844LLU /* ASCII: "LOGBLKHD" */
376
377 /*
378 * L2ARC Internals
379 */
380 typedef struct l2arc_dev {
381 vdev_t *l2ad_vdev; /* vdev */
382 spa_t *l2ad_spa; /* spa */
383 uint64_t l2ad_hand; /* next write location */
384 uint64_t l2ad_start; /* first addr on device */
385 uint64_t l2ad_end; /* last addr on device */
386 boolean_t l2ad_first; /* first sweep through */
387 boolean_t l2ad_writing; /* currently writing */
388 kmutex_t l2ad_mtx; /* lock for buffer list */
389 list_t l2ad_buflist; /* buffer list */
390 list_node_t l2ad_node; /* device list node */
391 zfs_refcount_t l2ad_alloc; /* allocated bytes */
392 /*
393 * Persistence-related stuff
394 */
395 l2arc_dev_hdr_phys_t *l2ad_dev_hdr; /* persistent device header */
396 uint64_t l2ad_dev_hdr_asize; /* aligned hdr size */
397 l2arc_log_blk_phys_t l2ad_log_blk; /* currently open log block */
398 int l2ad_log_ent_idx; /* index into cur log blk */
399 /* Number of bytes in current log block's payload */
400 uint64_t l2ad_log_blk_payload_asize;
401 /*
402 * Offset (in bytes) of the first buffer in current log block's
403 * payload.
404 */
405 uint64_t l2ad_log_blk_payload_start;
406 /* Flag indicating whether a rebuild is scheduled or is going on */
407 boolean_t l2ad_rebuild;
408 boolean_t l2ad_rebuild_cancel;
409 boolean_t l2ad_rebuild_began;
410 uint64_t l2ad_log_entries; /* entries per log blk */
411 uint64_t l2ad_evict; /* evicted offset in bytes */
412 /* List of pointers to log blocks present in the L2ARC device */
413 list_t l2ad_lbptr_list;
414 /*
415 * Aligned size of all log blocks as accounted by vdev_space_update().
416 */
417 zfs_refcount_t l2ad_lb_asize;
418 /*
419 * Number of log blocks present on the device.
420 */
421 zfs_refcount_t l2ad_lb_count;
422 boolean_t l2ad_trim_all; /* TRIM whole device */
423 } l2arc_dev_t;
424
425 /*
426 * Encrypted blocks will need to be stored encrypted on the L2ARC
427 * disk as they appear in the main pool. In order for this to work we
428 * need to pass around the encryption parameters so they can be used
429 * to write data to the L2ARC. This struct is only defined in the
430 * arc_buf_hdr_t if the L1 header is defined and has the ARC_FLAG_ENCRYPTED
431 * flag set.
432 */
433 typedef struct arc_buf_hdr_crypt {
434 abd_t *b_rabd; /* raw encrypted data */
435 dmu_object_type_t b_ot; /* object type */
436 uint32_t b_ebufcnt; /* count of encrypted buffers */
437
438 /* dsobj for looking up encryption key for l2arc encryption */
439 uint64_t b_dsobj;
440
441 /* encryption parameters */
442 uint8_t b_salt[ZIO_DATA_SALT_LEN];
443 uint8_t b_iv[ZIO_DATA_IV_LEN];
444
445 /*
446 * Technically this could be removed since we will always be able to
447 * get the mac from the bp when we need it. However, it is inconvenient
448 * for callers of arc code to have to pass a bp in all the time. This
449 * also allows us to assert that L2ARC data is properly encrypted to
450 * match the data in the main storage pool.
451 */
452 uint8_t b_mac[ZIO_DATA_MAC_LEN];
453 } arc_buf_hdr_crypt_t;
454
455 typedef struct l2arc_buf_hdr {
456 /* protected by arc_buf_hdr mutex */
457 l2arc_dev_t *b_dev; /* L2ARC device */
458 uint64_t b_daddr; /* disk address, offset byte */
459 uint32_t b_hits;
460 arc_state_type_t b_arcs_state;
461 list_node_t b_l2node;
462 } l2arc_buf_hdr_t;
463
464 typedef struct l2arc_write_callback {
465 l2arc_dev_t *l2wcb_dev; /* device info */
466 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
467 /* in-flight list of log blocks */
468 list_t l2wcb_abd_list;
469 } l2arc_write_callback_t;
470
471 struct arc_buf_hdr {
472 /* protected by hash lock */
473 dva_t b_dva;
474 uint64_t b_birth;
475
476 arc_buf_contents_t b_type;
477 uint8_t b_complevel;
478 uint8_t b_reserved1; /* used for 4 byte alignment */
479 uint16_t b_reserved2; /* used for 4 byte alignment */
480 arc_buf_hdr_t *b_hash_next;
481 arc_flags_t b_flags;
482
483 /*
484 * This field stores the size of the data buffer after
485 * compression, and is set in the arc's zio completion handlers.
486 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
487 *
488 * While the block pointers can store up to 32MB in their psize
489 * field, we can only store up to 32MB minus 512B. This is due
490 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
491 * a field of zeros represents 512B in the bp). We can't use a
492 * bias of 1 since we need to reserve a psize of zero, here, to
493 * represent holes and embedded blocks.
494 *
495 * This isn't a problem in practice, since the maximum size of a
496 * buffer is limited to 16MB, so we never need to store 32MB in
497 * this field. Even in the upstream illumos code base, the
498 * maximum size of a buffer is limited to 16MB.
499 */
500 uint16_t b_psize;
501
502 /*
503 * This field stores the size of the data buffer before
504 * compression, and cannot change once set. It is in units
505 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
506 */
507 uint16_t b_lsize; /* immutable */
508 uint64_t b_spa; /* immutable */
509
510 /* L2ARC fields. Undefined when not in L2ARC. */
511 l2arc_buf_hdr_t b_l2hdr;
512 /* L1ARC fields. Undefined when in l2arc_only state */
513 l1arc_buf_hdr_t b_l1hdr;
514 /*
515 * Encryption parameters. Defined only when ARC_FLAG_ENCRYPTED
516 * is set and the L1 header exists.
517 */
518 arc_buf_hdr_crypt_t b_crypt_hdr;
519 };
520
521 typedef struct arc_stats {
522 /* Number of requests that were satisfied without I/O. */
523 kstat_named_t arcstat_hits;
524 /* Number of requests for which I/O was already running. */
525 kstat_named_t arcstat_iohits;
526 /* Number of requests for which I/O has to be issued. */
527 kstat_named_t arcstat_misses;
528 /* Same three, but specifically for demand data. */
529 kstat_named_t arcstat_demand_data_hits;
530 kstat_named_t arcstat_demand_data_iohits;
531 kstat_named_t arcstat_demand_data_misses;
532 /* Same three, but specifically for demand metadata. */
533 kstat_named_t arcstat_demand_metadata_hits;
534 kstat_named_t arcstat_demand_metadata_iohits;
535 kstat_named_t arcstat_demand_metadata_misses;
536 /* Same three, but specifically for prefetch data. */
537 kstat_named_t arcstat_prefetch_data_hits;
538 kstat_named_t arcstat_prefetch_data_iohits;
539 kstat_named_t arcstat_prefetch_data_misses;
540 /* Same three, but specifically for prefetch metadata. */
541 kstat_named_t arcstat_prefetch_metadata_hits;
542 kstat_named_t arcstat_prefetch_metadata_iohits;
543 kstat_named_t arcstat_prefetch_metadata_misses;
544 kstat_named_t arcstat_mru_hits;
545 kstat_named_t arcstat_mru_ghost_hits;
546 kstat_named_t arcstat_mfu_hits;
547 kstat_named_t arcstat_mfu_ghost_hits;
548 kstat_named_t arcstat_uncached_hits;
549 kstat_named_t arcstat_deleted;
550 /*
551 * Number of buffers that could not be evicted because the hash lock
552 * was held by another thread. The lock may not necessarily be held
553 * by something using the same buffer, since hash locks are shared
554 * by multiple buffers.
555 */
556 kstat_named_t arcstat_mutex_miss;
557 /*
558 * Number of buffers skipped when updating the access state due to the
559 * header having already been released after acquiring the hash lock.
560 */
561 kstat_named_t arcstat_access_skip;
562 /*
563 * Number of buffers skipped because they have I/O in progress, are
564 * indirect prefetch buffers that have not lived long enough, or are
565 * not from the spa we're trying to evict from.
566 */
567 kstat_named_t arcstat_evict_skip;
568 /*
569 * Number of times arc_evict_state() was unable to evict enough
570 * buffers to reach its target amount.
571 */
572 kstat_named_t arcstat_evict_not_enough;
573 kstat_named_t arcstat_evict_l2_cached;
574 kstat_named_t arcstat_evict_l2_eligible;
575 kstat_named_t arcstat_evict_l2_eligible_mfu;
576 kstat_named_t arcstat_evict_l2_eligible_mru;
577 kstat_named_t arcstat_evict_l2_ineligible;
578 kstat_named_t arcstat_evict_l2_skip;
579 kstat_named_t arcstat_hash_elements;
580 kstat_named_t arcstat_hash_elements_max;
581 kstat_named_t arcstat_hash_collisions;
582 kstat_named_t arcstat_hash_chains;
583 kstat_named_t arcstat_hash_chain_max;
584 kstat_named_t arcstat_p;
585 kstat_named_t arcstat_c;
586 kstat_named_t arcstat_c_min;
587 kstat_named_t arcstat_c_max;
588 kstat_named_t arcstat_size;
589 /*
590 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
591 * Note that the compressed bytes may match the uncompressed bytes
592 * if the block is either not compressed or compressed arc is disabled.
593 */
594 kstat_named_t arcstat_compressed_size;
595 /*
596 * Uncompressed size of the data stored in b_pabd. If compressed
597 * arc is disabled then this value will be identical to the stat
598 * above.
599 */
600 kstat_named_t arcstat_uncompressed_size;
601 /*
602 * Number of bytes stored in all the arc_buf_t's. This is classified
603 * as "overhead" since this data is typically short-lived and will
604 * be evicted from the arc when it becomes unreferenced unless the
605 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
606 * values have been set (see comment in dbuf.c for more information).
607 */
608 kstat_named_t arcstat_overhead_size;
609 /*
610 * Number of bytes consumed by internal ARC structures necessary
611 * for tracking purposes; these structures are not actually
612 * backed by ARC buffers. This includes arc_buf_hdr_t structures
613 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
614 * caches), and arc_buf_t structures (allocated via arc_buf_t
615 * cache).
616 */
617 kstat_named_t arcstat_hdr_size;
618 /*
619 * Number of bytes consumed by ARC buffers of type equal to
620 * ARC_BUFC_DATA. This is generally consumed by buffers backing
621 * on disk user data (e.g. plain file contents).
622 */
623 kstat_named_t arcstat_data_size;
624 /*
625 * Number of bytes consumed by ARC buffers of type equal to
626 * ARC_BUFC_METADATA. This is generally consumed by buffers
627 * backing on disk data that is used for internal ZFS
628 * structures (e.g. ZAP, dnode, indirect blocks, etc).
629 */
630 kstat_named_t arcstat_metadata_size;
631 /*
632 * Number of bytes consumed by dmu_buf_impl_t objects.
633 */
634 kstat_named_t arcstat_dbuf_size;
635 /*
636 * Number of bytes consumed by dnode_t objects.
637 */
638 kstat_named_t arcstat_dnode_size;
639 /*
640 * Number of bytes consumed by bonus buffers.
641 */
642 kstat_named_t arcstat_bonus_size;
643 #if defined(COMPAT_FREEBSD11)
644 /*
645 * Sum of the previous three counters, provided for compatibility.
646 */
647 kstat_named_t arcstat_other_size;
648 #endif
649
650 /*
651 * Total number of bytes consumed by ARC buffers residing in the
652 * arc_anon state. This includes *all* buffers in the arc_anon
653 * state; e.g. data, metadata, evictable, and unevictable buffers
654 * are all included in this value.
655 */
656 kstat_named_t arcstat_anon_size;
657 /*
658 * Number of bytes consumed by ARC buffers that meet the
659 * following criteria: backing buffers of type ARC_BUFC_DATA,
660 * residing in the arc_anon state, and are eligible for eviction
661 * (e.g. have no outstanding holds on the buffer).
662 */
663 kstat_named_t arcstat_anon_evictable_data;
664 /*
665 * Number of bytes consumed by ARC buffers that meet the
666 * following criteria: backing buffers of type ARC_BUFC_METADATA,
667 * residing in the arc_anon state, and are eligible for eviction
668 * (e.g. have no outstanding holds on the buffer).
669 */
670 kstat_named_t arcstat_anon_evictable_metadata;
671 /*
672 * Total number of bytes consumed by ARC buffers residing in the
673 * arc_mru state. This includes *all* buffers in the arc_mru
674 * state; e.g. data, metadata, evictable, and unevictable buffers
675 * are all included in this value.
676 */
677 kstat_named_t arcstat_mru_size;
678 /*
679 * Number of bytes consumed by ARC buffers that meet the
680 * following criteria: backing buffers of type ARC_BUFC_DATA,
681 * residing in the arc_mru state, and are eligible for eviction
682 * (e.g. have no outstanding holds on the buffer).
683 */
684 kstat_named_t arcstat_mru_evictable_data;
685 /*
686 * Number of bytes consumed by ARC buffers that meet the
687 * following criteria: backing buffers of type ARC_BUFC_METADATA,
688 * residing in the arc_mru state, and are eligible for eviction
689 * (e.g. have no outstanding holds on the buffer).
690 */
691 kstat_named_t arcstat_mru_evictable_metadata;
692 /*
693 * Total number of bytes that *would have been* consumed by ARC
694 * buffers in the arc_mru_ghost state. The key thing to note
695 * here, is the fact that this size doesn't actually indicate
696 * RAM consumption. The ghost lists only consist of headers and
697 * don't actually have ARC buffers linked off of these headers.
698 * Thus, *if* the headers had associated ARC buffers, these
699 * buffers *would have* consumed this number of bytes.
700 */
701 kstat_named_t arcstat_mru_ghost_size;
702 /*
703 * Number of bytes that *would have been* consumed by ARC
704 * buffers that are eligible for eviction, of type
705 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
706 */
707 kstat_named_t arcstat_mru_ghost_evictable_data;
708 /*
709 * Number of bytes that *would have been* consumed by ARC
710 * buffers that are eligible for eviction, of type
711 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
712 */
713 kstat_named_t arcstat_mru_ghost_evictable_metadata;
714 /*
715 * Total number of bytes consumed by ARC buffers residing in the
716 * arc_mfu state. This includes *all* buffers in the arc_mfu
717 * state; e.g. data, metadata, evictable, and unevictable buffers
718 * are all included in this value.
719 */
720 kstat_named_t arcstat_mfu_size;
721 /*
722 * Number of bytes consumed by ARC buffers that are eligible for
723 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
724 * state.
725 */
726 kstat_named_t arcstat_mfu_evictable_data;
727 /*
728 * Number of bytes consumed by ARC buffers that are eligible for
729 * eviction, of type ARC_BUFC_METADATA, and reside in the
730 * arc_mfu state.
731 */
732 kstat_named_t arcstat_mfu_evictable_metadata;
733 /*
734 * Total number of bytes that *would have been* consumed by ARC
735 * buffers in the arc_mfu_ghost state. See the comment above
736 * arcstat_mru_ghost_size for more details.
737 */
738 kstat_named_t arcstat_mfu_ghost_size;
739 /*
740 * Number of bytes that *would have been* consumed by ARC
741 * buffers that are eligible for eviction, of type
742 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
743 */
744 kstat_named_t arcstat_mfu_ghost_evictable_data;
745 /*
746 * Number of bytes that *would have been* consumed by ARC
747 * buffers that are eligible for eviction, of type
748 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
749 */
750 kstat_named_t arcstat_mfu_ghost_evictable_metadata;
751 /*
752 * Total number of bytes that are going to be evicted from ARC due to
753 * ARC_FLAG_UNCACHED being set.
754 */
755 kstat_named_t arcstat_uncached_size;
756 /*
757 * Number of data bytes that are going to be evicted from ARC due to
758 * ARC_FLAG_UNCACHED being set.
759 */
760 kstat_named_t arcstat_uncached_evictable_data;
761 /*
762 * Number of metadata bytes that that are going to be evicted from ARC
763 * due to ARC_FLAG_UNCACHED being set.
764 */
765 kstat_named_t arcstat_uncached_evictable_metadata;
766 kstat_named_t arcstat_l2_hits;
767 kstat_named_t arcstat_l2_misses;
768 /*
769 * Allocated size (in bytes) of L2ARC cached buffers by ARC state.
770 */
771 kstat_named_t arcstat_l2_prefetch_asize;
772 kstat_named_t arcstat_l2_mru_asize;
773 kstat_named_t arcstat_l2_mfu_asize;
774 /*
775 * Allocated size (in bytes) of L2ARC cached buffers by buffer content
776 * type.
777 */
778 kstat_named_t arcstat_l2_bufc_data_asize;
779 kstat_named_t arcstat_l2_bufc_metadata_asize;
780 kstat_named_t arcstat_l2_feeds;
781 kstat_named_t arcstat_l2_rw_clash;
782 kstat_named_t arcstat_l2_read_bytes;
783 kstat_named_t arcstat_l2_write_bytes;
784 kstat_named_t arcstat_l2_writes_sent;
785 kstat_named_t arcstat_l2_writes_done;
786 kstat_named_t arcstat_l2_writes_error;
787 kstat_named_t arcstat_l2_writes_lock_retry;
788 kstat_named_t arcstat_l2_evict_lock_retry;
789 kstat_named_t arcstat_l2_evict_reading;
790 kstat_named_t arcstat_l2_evict_l1cached;
791 kstat_named_t arcstat_l2_free_on_write;
792 kstat_named_t arcstat_l2_abort_lowmem;
793 kstat_named_t arcstat_l2_cksum_bad;
794 kstat_named_t arcstat_l2_io_error;
795 kstat_named_t arcstat_l2_lsize;
796 kstat_named_t arcstat_l2_psize;
797 kstat_named_t arcstat_l2_hdr_size;
798 /*
799 * Number of L2ARC log blocks written. These are used for restoring the
800 * L2ARC. Updated during writing of L2ARC log blocks.
801 */
802 kstat_named_t arcstat_l2_log_blk_writes;
803 /*
804 * Moving average of the aligned size of the L2ARC log blocks, in
805 * bytes. Updated during L2ARC rebuild and during writing of L2ARC
806 * log blocks.
807 */
808 kstat_named_t arcstat_l2_log_blk_avg_asize;
809 /* Aligned size of L2ARC log blocks on L2ARC devices. */
810 kstat_named_t arcstat_l2_log_blk_asize;
811 /* Number of L2ARC log blocks present on L2ARC devices. */
812 kstat_named_t arcstat_l2_log_blk_count;
813 /*
814 * Moving average of the aligned size of L2ARC restored data, in bytes,
815 * to the aligned size of their metadata in L2ARC, in bytes.
816 * Updated during L2ARC rebuild and during writing of L2ARC log blocks.
817 */
818 kstat_named_t arcstat_l2_data_to_meta_ratio;
819 /*
820 * Number of times the L2ARC rebuild was successful for an L2ARC device.
821 */
822 kstat_named_t arcstat_l2_rebuild_success;
823 /*
824 * Number of times the L2ARC rebuild failed because the device header
825 * was in an unsupported format or corrupted.
826 */
827 kstat_named_t arcstat_l2_rebuild_abort_unsupported;
828 /*
829 * Number of times the L2ARC rebuild failed because of IO errors
830 * while reading a log block.
831 */
832 kstat_named_t arcstat_l2_rebuild_abort_io_errors;
833 /*
834 * Number of times the L2ARC rebuild failed because of IO errors when
835 * reading the device header.
836 */
837 kstat_named_t arcstat_l2_rebuild_abort_dh_errors;
838 /*
839 * Number of L2ARC log blocks which failed to be restored due to
840 * checksum errors.
841 */
842 kstat_named_t arcstat_l2_rebuild_abort_cksum_lb_errors;
843 /*
844 * Number of times the L2ARC rebuild was aborted due to low system
845 * memory.
846 */
847 kstat_named_t arcstat_l2_rebuild_abort_lowmem;
848 /* Logical size of L2ARC restored data, in bytes. */
849 kstat_named_t arcstat_l2_rebuild_size;
850 /* Aligned size of L2ARC restored data, in bytes. */
851 kstat_named_t arcstat_l2_rebuild_asize;
852 /*
853 * Number of L2ARC log entries (buffers) that were successfully
854 * restored in ARC.
855 */
856 kstat_named_t arcstat_l2_rebuild_bufs;
857 /*
858 * Number of L2ARC log entries (buffers) already cached in ARC. These
859 * were not restored again.
860 */
861 kstat_named_t arcstat_l2_rebuild_bufs_precached;
862 /*
863 * Number of L2ARC log blocks that were restored successfully. Each
864 * log block may hold up to L2ARC_LOG_BLK_MAX_ENTRIES buffers.
865 */
866 kstat_named_t arcstat_l2_rebuild_log_blks;
867 kstat_named_t arcstat_memory_throttle_count;
868 kstat_named_t arcstat_memory_direct_count;
869 kstat_named_t arcstat_memory_indirect_count;
870 kstat_named_t arcstat_memory_all_bytes;
871 kstat_named_t arcstat_memory_free_bytes;
872 kstat_named_t arcstat_memory_available_bytes;
873 kstat_named_t arcstat_no_grow;
874 kstat_named_t arcstat_tempreserve;
875 kstat_named_t arcstat_loaned_bytes;
876 kstat_named_t arcstat_prune;
877 kstat_named_t arcstat_meta_used;
878 kstat_named_t arcstat_meta_limit;
879 kstat_named_t arcstat_dnode_limit;
880 kstat_named_t arcstat_meta_max;
881 kstat_named_t arcstat_meta_min;
882 kstat_named_t arcstat_async_upgrade_sync;
883 /* Number of predictive prefetch requests. */
884 kstat_named_t arcstat_predictive_prefetch;
885 /* Number of requests for which predictive prefetch has completed. */
886 kstat_named_t arcstat_demand_hit_predictive_prefetch;
887 /* Number of requests for which predictive prefetch was running. */
888 kstat_named_t arcstat_demand_iohit_predictive_prefetch;
889 /* Number of prescient prefetch requests. */
890 kstat_named_t arcstat_prescient_prefetch;
891 /* Number of requests for which prescient prefetch has completed. */
892 kstat_named_t arcstat_demand_hit_prescient_prefetch;
893 /* Number of requests for which prescient prefetch was running. */
894 kstat_named_t arcstat_demand_iohit_prescient_prefetch;
895 kstat_named_t arcstat_need_free;
896 kstat_named_t arcstat_sys_free;
897 kstat_named_t arcstat_raw_size;
898 kstat_named_t arcstat_cached_only_in_progress;
899 kstat_named_t arcstat_abd_chunk_waste_size;
900 } arc_stats_t;
901
902 typedef struct arc_sums {
903 wmsum_t arcstat_hits;
904 wmsum_t arcstat_iohits;
905 wmsum_t arcstat_misses;
906 wmsum_t arcstat_demand_data_hits;
907 wmsum_t arcstat_demand_data_iohits;
908 wmsum_t arcstat_demand_data_misses;
909 wmsum_t arcstat_demand_metadata_hits;
910 wmsum_t arcstat_demand_metadata_iohits;
911 wmsum_t arcstat_demand_metadata_misses;
912 wmsum_t arcstat_prefetch_data_hits;
913 wmsum_t arcstat_prefetch_data_iohits;
914 wmsum_t arcstat_prefetch_data_misses;
915 wmsum_t arcstat_prefetch_metadata_hits;
916 wmsum_t arcstat_prefetch_metadata_iohits;
917 wmsum_t arcstat_prefetch_metadata_misses;
918 wmsum_t arcstat_mru_hits;
919 wmsum_t arcstat_mru_ghost_hits;
920 wmsum_t arcstat_mfu_hits;
921 wmsum_t arcstat_mfu_ghost_hits;
922 wmsum_t arcstat_uncached_hits;
923 wmsum_t arcstat_deleted;
924 wmsum_t arcstat_mutex_miss;
925 wmsum_t arcstat_access_skip;
926 wmsum_t arcstat_evict_skip;
927 wmsum_t arcstat_evict_not_enough;
928 wmsum_t arcstat_evict_l2_cached;
929 wmsum_t arcstat_evict_l2_eligible;
930 wmsum_t arcstat_evict_l2_eligible_mfu;
931 wmsum_t arcstat_evict_l2_eligible_mru;
932 wmsum_t arcstat_evict_l2_ineligible;
933 wmsum_t arcstat_evict_l2_skip;
934 wmsum_t arcstat_hash_collisions;
935 wmsum_t arcstat_hash_chains;
936 aggsum_t arcstat_size;
937 wmsum_t arcstat_compressed_size;
938 wmsum_t arcstat_uncompressed_size;
939 wmsum_t arcstat_overhead_size;
940 wmsum_t arcstat_hdr_size;
941 wmsum_t arcstat_data_size;
942 wmsum_t arcstat_metadata_size;
943 wmsum_t arcstat_dbuf_size;
944 aggsum_t arcstat_dnode_size;
945 wmsum_t arcstat_bonus_size;
946 wmsum_t arcstat_l2_hits;
947 wmsum_t arcstat_l2_misses;
948 wmsum_t arcstat_l2_prefetch_asize;
949 wmsum_t arcstat_l2_mru_asize;
950 wmsum_t arcstat_l2_mfu_asize;
951 wmsum_t arcstat_l2_bufc_data_asize;
952 wmsum_t arcstat_l2_bufc_metadata_asize;
953 wmsum_t arcstat_l2_feeds;
954 wmsum_t arcstat_l2_rw_clash;
955 wmsum_t arcstat_l2_read_bytes;
956 wmsum_t arcstat_l2_write_bytes;
957 wmsum_t arcstat_l2_writes_sent;
958 wmsum_t arcstat_l2_writes_done;
959 wmsum_t arcstat_l2_writes_error;
960 wmsum_t arcstat_l2_writes_lock_retry;
961 wmsum_t arcstat_l2_evict_lock_retry;
962 wmsum_t arcstat_l2_evict_reading;
963 wmsum_t arcstat_l2_evict_l1cached;
964 wmsum_t arcstat_l2_free_on_write;
965 wmsum_t arcstat_l2_abort_lowmem;
966 wmsum_t arcstat_l2_cksum_bad;
967 wmsum_t arcstat_l2_io_error;
968 wmsum_t arcstat_l2_lsize;
969 wmsum_t arcstat_l2_psize;
970 aggsum_t arcstat_l2_hdr_size;
971 wmsum_t arcstat_l2_log_blk_writes;
972 wmsum_t arcstat_l2_log_blk_asize;
973 wmsum_t arcstat_l2_log_blk_count;
974 wmsum_t arcstat_l2_rebuild_success;
975 wmsum_t arcstat_l2_rebuild_abort_unsupported;
976 wmsum_t arcstat_l2_rebuild_abort_io_errors;
977 wmsum_t arcstat_l2_rebuild_abort_dh_errors;
978 wmsum_t arcstat_l2_rebuild_abort_cksum_lb_errors;
979 wmsum_t arcstat_l2_rebuild_abort_lowmem;
980 wmsum_t arcstat_l2_rebuild_size;
981 wmsum_t arcstat_l2_rebuild_asize;
982 wmsum_t arcstat_l2_rebuild_bufs;
983 wmsum_t arcstat_l2_rebuild_bufs_precached;
984 wmsum_t arcstat_l2_rebuild_log_blks;
985 wmsum_t arcstat_memory_throttle_count;
986 wmsum_t arcstat_memory_direct_count;
987 wmsum_t arcstat_memory_indirect_count;
988 wmsum_t arcstat_prune;
989 aggsum_t arcstat_meta_used;
990 wmsum_t arcstat_async_upgrade_sync;
991 wmsum_t arcstat_predictive_prefetch;
992 wmsum_t arcstat_demand_hit_predictive_prefetch;
993 wmsum_t arcstat_demand_iohit_predictive_prefetch;
994 wmsum_t arcstat_prescient_prefetch;
995 wmsum_t arcstat_demand_hit_prescient_prefetch;
996 wmsum_t arcstat_demand_iohit_prescient_prefetch;
997 wmsum_t arcstat_raw_size;
998 wmsum_t arcstat_cached_only_in_progress;
999 wmsum_t arcstat_abd_chunk_waste_size;
1000 } arc_sums_t;
1001
1002 typedef struct arc_evict_waiter {
1003 list_node_t aew_node;
1004 kcondvar_t aew_cv;
1005 uint64_t aew_count;
1006 } arc_evict_waiter_t;
1007
1008 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
1009
1010 #define ARCSTAT_INCR(stat, val) \
1011 wmsum_add(&arc_sums.stat, (val))
1012
1013 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
1014 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
1015
1016 #define arc_no_grow ARCSTAT(arcstat_no_grow) /* do not grow cache size */
1017 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
1018 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
1019 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
1020 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
1021 #define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */
1022
1023 #define arc_anon (&ARC_anon)
1024 #define arc_mru (&ARC_mru)
1025 #define arc_mru_ghost (&ARC_mru_ghost)
1026 #define arc_mfu (&ARC_mfu)
1027 #define arc_mfu_ghost (&ARC_mfu_ghost)
1028 #define arc_l2c_only (&ARC_l2c_only)
1029 #define arc_uncached (&ARC_uncached)
1030
1031 extern taskq_t *arc_prune_taskq;
1032 extern arc_stats_t arc_stats;
1033 extern arc_sums_t arc_sums;
1034 extern hrtime_t arc_growtime;
1035 extern boolean_t arc_warm;
1036 extern uint_t arc_grow_retry;
1037 extern uint_t arc_no_grow_shift;
1038 extern uint_t arc_shrink_shift;
1039 extern kmutex_t arc_prune_mtx;
1040 extern list_t arc_prune_list;
1041 extern arc_state_t ARC_mfu;
1042 extern arc_state_t ARC_mru;
1043 extern uint_t zfs_arc_pc_percent;
1044 extern uint_t arc_lotsfree_percent;
1045 extern uint64_t zfs_arc_min;
1046 extern uint64_t zfs_arc_max;
1047
1048 extern void arc_reduce_target_size(int64_t to_free);
1049 extern boolean_t arc_reclaim_needed(void);
1050 extern void arc_kmem_reap_soon(void);
1051 extern void arc_wait_for_eviction(uint64_t, boolean_t);
1052
1053 extern void arc_lowmem_init(void);
1054 extern void arc_lowmem_fini(void);
1055 extern void arc_prune_async(uint64_t);
1056 extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
1057 extern uint64_t arc_free_memory(void);
1058 extern int64_t arc_available_memory(void);
1059 extern void arc_tuning_update(boolean_t);
1060 extern void arc_register_hotplug(void);
1061 extern void arc_unregister_hotplug(void);
1062
1063 extern int param_set_arc_u64(ZFS_MODULE_PARAM_ARGS);
1064 extern int param_set_arc_int(ZFS_MODULE_PARAM_ARGS);
1065 extern int param_set_arc_min(ZFS_MODULE_PARAM_ARGS);
1066 extern int param_set_arc_max(ZFS_MODULE_PARAM_ARGS);
1067
1068 /* used in zdb.c */
1069 boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
1070 const l2arc_log_blkptr_t *lbp);
1071
1072 /* used in vdev_trim.c */
1073 void l2arc_dev_hdr_update(l2arc_dev_t *dev);
1074 l2arc_dev_t *l2arc_vdev_get(vdev_t *vd);
1075
1076 #ifdef __cplusplus
1077 }
1078 #endif
1079
1080 #endif /* _SYS_ARC_IMPL_H */
Cache object: 3fc3301c043d6c645fe444f533d2a9fe
|