1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/dbuf.h>
37 #include <sys/dnode.h>
38 #include <sys/zap.h>
39 #include <sys/sa.h>
40 #include <sys/sunddi.h>
41 #include <sys/sa_impl.h>
42 #include <sys/errno.h>
43 #include <sys/zfs_context.h>
44
45 #ifdef _KERNEL
46 #include <sys/zfs_znode.h>
47 #endif
48
49 /*
50 * ZFS System attributes:
51 *
52 * A generic mechanism to allow for arbitrary attributes
53 * to be stored in a dnode. The data will be stored in the bonus buffer of
54 * the dnode and if necessary a special "spill" block will be used to handle
55 * overflow situations. The spill block will be sized to fit the data
56 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
57 * spill block is stored at the end of the current bonus buffer. Any
58 * attributes that would be in the way of the blkptr_t will be relocated
59 * into the spill block.
60 *
61 * Attribute registration:
62 *
63 * Stored persistently on a per dataset basis
64 * a mapping between attribute "string" names and their actual attribute
65 * numeric values, length, and byteswap function. The names are only used
66 * during registration. All attributes are known by their unique attribute
67 * id value. If an attribute can have a variable size then the value
68 * 0 will be used to indicate this.
69 *
70 * Attribute Layout:
71 *
72 * Attribute layouts are a way to compactly store multiple attributes, but
73 * without taking the overhead associated with managing each attribute
74 * individually. Since you will typically have the same set of attributes
75 * stored in the same order a single table will be used to represent that
76 * layout. The ZPL for example will usually have only about 10 different
77 * layouts (regular files, device files, symlinks,
78 * regular files + scanstamp, files/dir with extended attributes, and then
79 * you have the possibility of all of those minus ACL, because it would
80 * be kicked out into the spill block)
81 *
82 * Layouts are simply an array of the attributes and their
83 * ordering i.e. [0, 1, 4, 5, 2]
84 *
85 * Each distinct layout is given a unique layout number and that is what's
86 * stored in the header at the beginning of the SA data buffer.
87 *
88 * A layout only covers a single dbuf (bonus or spill). If a set of
89 * attributes is split up between the bonus buffer and a spill buffer then
90 * two different layouts will be used. This allows us to byteswap the
91 * spill without looking at the bonus buffer and keeps the on disk format of
92 * the bonus and spill buffer the same.
93 *
94 * Adding a single attribute will cause the entire set of attributes to
95 * be rewritten and could result in a new layout number being constructed
96 * as part of the rewrite if no such layout exists for the new set of
97 * attributes. The new attribute will be appended to the end of the already
98 * existing attributes.
99 *
100 * Both the attribute registration and attribute layout information are
101 * stored in normal ZAP attributes. Their should be a small number of
102 * known layouts and the set of attributes is assumed to typically be quite
103 * small.
104 *
105 * The registered attributes and layout "table" information is maintained
106 * in core and a special "sa_os_t" is attached to the objset_t.
107 *
108 * A special interface is provided to allow for quickly applying
109 * a large set of attributes at once. sa_replace_all_by_template() is
110 * used to set an array of attributes. This is used by the ZPL when
111 * creating a brand new file. The template that is passed into the function
112 * specifies the attribute, size for variable length attributes, location of
113 * data and special "data locator" function if the data isn't in a contiguous
114 * location.
115 *
116 * Byteswap implications:
117 *
118 * Since the SA attributes are not entirely self describing we can't do
119 * the normal byteswap processing. The special ZAP layout attribute and
120 * attribute registration attributes define the byteswap function and the
121 * size of the attributes, unless it is variable sized.
122 * The normal ZFS byteswapping infrastructure assumes you don't need
123 * to read any objects in order to do the necessary byteswapping. Whereas
124 * SA attributes can only be properly byteswapped if the dataset is opened
125 * and the layout/attribute ZAP attributes are available. Because of this
126 * the SA attributes will be byteswapped when they are first accessed by
127 * the SA code that will read the SA data.
128 */
129
130 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
131 uint16_t length, int length_idx, boolean_t, void *userp);
132
133 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
134 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
135 static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
136 sa_hdr_phys_t *hdr);
137 static void sa_idx_tab_rele(objset_t *os, void *arg);
138 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
139 int buflen);
140 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
141 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
142 uint16_t buflen, dmu_tx_t *tx);
143
144 static arc_byteswap_func_t sa_bswap_table[] = {
145 byteswap_uint64_array,
146 byteswap_uint32_array,
147 byteswap_uint16_array,
148 byteswap_uint8_array,
149 zfs_acl_byteswap,
150 };
151
152 #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
153 #define SA_COPY_DATA(f, s, t, l) \
154 do { \
155 if (f == NULL) { \
156 if (l == 8) { \
157 *(uint64_t *)t = *(uint64_t *)s; \
158 } else if (l == 16) { \
159 *(uint64_t *)t = *(uint64_t *)s; \
160 *(uint64_t *)((uintptr_t)t + 8) = \
161 *(uint64_t *)((uintptr_t)s + 8); \
162 } else { \
163 memcpy(t, s, l); \
164 } \
165 } else { \
166 sa_copy_data(f, s, t, l); \
167 } \
168 } while (0)
169 #else
170 #define SA_COPY_DATA(f, s, t, l) sa_copy_data(f, s, t, l)
171 #endif
172
173 /*
174 * This table is fixed and cannot be changed. Its purpose is to
175 * allow the SA code to work with both old/new ZPL file systems.
176 * It contains the list of legacy attributes. These attributes aren't
177 * stored in the "attribute" registry zap objects, since older ZPL file systems
178 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
179 * use this static table.
180 */
181 static const sa_attr_reg_t sa_legacy_attrs[] = {
182 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
183 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
184 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
185 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
186 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
187 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
188 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
189 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
190 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
191 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
192 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
193 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
194 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
195 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
196 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
197 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
198 };
199
200 /*
201 * This is only used for objects of type DMU_OT_ZNODE
202 */
203 static const sa_attr_type_t sa_legacy_zpl_layout[] = {
204 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
205 };
206
207 /*
208 * Special dummy layout used for buffers with no attributes.
209 */
210 static const sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
211
212 static const size_t sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs);
213 static kmem_cache_t *sa_cache = NULL;
214
215 static int
216 sa_cache_constructor(void *buf, void *unused, int kmflag)
217 {
218 (void) unused, (void) kmflag;
219 sa_handle_t *hdl = buf;
220
221 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
222 return (0);
223 }
224
225 static void
226 sa_cache_destructor(void *buf, void *unused)
227 {
228 (void) unused;
229 sa_handle_t *hdl = buf;
230 mutex_destroy(&hdl->sa_lock);
231 }
232
233 void
234 sa_cache_init(void)
235 {
236 sa_cache = kmem_cache_create("sa_cache",
237 sizeof (sa_handle_t), 0, sa_cache_constructor,
238 sa_cache_destructor, NULL, NULL, NULL, 0);
239 }
240
241 void
242 sa_cache_fini(void)
243 {
244 if (sa_cache)
245 kmem_cache_destroy(sa_cache);
246 }
247
248 static int
249 layout_num_compare(const void *arg1, const void *arg2)
250 {
251 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
252 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
253
254 return (TREE_CMP(node1->lot_num, node2->lot_num));
255 }
256
257 static int
258 layout_hash_compare(const void *arg1, const void *arg2)
259 {
260 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
261 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
262
263 int cmp = TREE_CMP(node1->lot_hash, node2->lot_hash);
264 if (likely(cmp))
265 return (cmp);
266
267 return (TREE_CMP(node1->lot_instance, node2->lot_instance));
268 }
269
270 static boolean_t
271 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
272 {
273 int i;
274
275 if (count != tbf->lot_attr_count)
276 return (1);
277
278 for (i = 0; i != count; i++) {
279 if (attrs[i] != tbf->lot_attrs[i])
280 return (1);
281 }
282 return (0);
283 }
284
285 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
286
287 static uint64_t
288 sa_layout_info_hash(const sa_attr_type_t *attrs, int attr_count)
289 {
290 uint64_t crc = -1ULL;
291
292 for (int i = 0; i != attr_count; i++)
293 crc ^= SA_ATTR_HASH(attrs[i]);
294
295 return (crc);
296 }
297
298 static int
299 sa_get_spill(sa_handle_t *hdl)
300 {
301 int rc;
302 if (hdl->sa_spill == NULL) {
303 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
304 &hdl->sa_spill)) == 0)
305 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
306 } else {
307 rc = 0;
308 }
309
310 return (rc);
311 }
312
313 /*
314 * Main attribute lookup/update function
315 * returns 0 for success or non zero for failures
316 *
317 * Operates on bulk array, first failure will abort further processing
318 */
319 static int
320 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
321 sa_data_op_t data_op, dmu_tx_t *tx)
322 {
323 sa_os_t *sa = hdl->sa_os->os_sa;
324 int i;
325 int error = 0;
326 sa_buf_type_t buftypes;
327
328 buftypes = 0;
329
330 ASSERT(count > 0);
331 for (i = 0; i != count; i++) {
332 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
333
334 bulk[i].sa_addr = NULL;
335 /* First check the bonus buffer */
336
337 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
338 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
339 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
340 SA_GET_HDR(hdl, SA_BONUS),
341 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
342 if (tx && !(buftypes & SA_BONUS)) {
343 dmu_buf_will_dirty(hdl->sa_bonus, tx);
344 buftypes |= SA_BONUS;
345 }
346 }
347 if (bulk[i].sa_addr == NULL &&
348 ((error = sa_get_spill(hdl)) == 0)) {
349 if (TOC_ATTR_PRESENT(
350 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
351 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
352 SA_GET_HDR(hdl, SA_SPILL),
353 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
354 if (tx && !(buftypes & SA_SPILL) &&
355 bulk[i].sa_size == bulk[i].sa_length) {
356 dmu_buf_will_dirty(hdl->sa_spill, tx);
357 buftypes |= SA_SPILL;
358 }
359 }
360 }
361 if (error && error != ENOENT) {
362 return ((error == ECKSUM) ? EIO : error);
363 }
364
365 switch (data_op) {
366 case SA_LOOKUP:
367 if (bulk[i].sa_addr == NULL)
368 return (SET_ERROR(ENOENT));
369 if (bulk[i].sa_data) {
370 SA_COPY_DATA(bulk[i].sa_data_func,
371 bulk[i].sa_addr, bulk[i].sa_data,
372 bulk[i].sa_size);
373 }
374 continue;
375
376 case SA_UPDATE:
377 /* existing rewrite of attr */
378 if (bulk[i].sa_addr &&
379 bulk[i].sa_size == bulk[i].sa_length) {
380 SA_COPY_DATA(bulk[i].sa_data_func,
381 bulk[i].sa_data, bulk[i].sa_addr,
382 bulk[i].sa_length);
383 continue;
384 } else if (bulk[i].sa_addr) { /* attr size change */
385 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
386 SA_REPLACE, bulk[i].sa_data_func,
387 bulk[i].sa_data, bulk[i].sa_length, tx);
388 } else { /* adding new attribute */
389 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
390 SA_ADD, bulk[i].sa_data_func,
391 bulk[i].sa_data, bulk[i].sa_length, tx);
392 }
393 if (error)
394 return (error);
395 break;
396 default:
397 break;
398 }
399 }
400 return (error);
401 }
402
403 static sa_lot_t *
404 sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
405 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
406 {
407 sa_os_t *sa = os->os_sa;
408 sa_lot_t *tb, *findtb;
409 int i;
410 avl_index_t loc;
411
412 ASSERT(MUTEX_HELD(&sa->sa_lock));
413 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
414 tb->lot_attr_count = attr_count;
415 tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
416 KM_SLEEP);
417 memcpy(tb->lot_attrs, attrs, sizeof (sa_attr_type_t) * attr_count);
418 tb->lot_num = lot_num;
419 tb->lot_hash = hash;
420 tb->lot_instance = 0;
421
422 if (zapadd) {
423 char attr_name[8];
424
425 if (sa->sa_layout_attr_obj == 0) {
426 sa->sa_layout_attr_obj = zap_create_link(os,
427 DMU_OT_SA_ATTR_LAYOUTS,
428 sa->sa_master_obj, SA_LAYOUTS, tx);
429 }
430
431 (void) snprintf(attr_name, sizeof (attr_name),
432 "%d", (int)lot_num);
433 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
434 attr_name, 2, attr_count, attrs, tx));
435 }
436
437 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
438 offsetof(sa_idx_tab_t, sa_next));
439
440 for (i = 0; i != attr_count; i++) {
441 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
442 tb->lot_var_sizes++;
443 }
444
445 avl_add(&sa->sa_layout_num_tree, tb);
446
447 /* verify we don't have a hash collision */
448 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
449 for (; findtb && findtb->lot_hash == hash;
450 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
451 if (findtb->lot_instance != tb->lot_instance)
452 break;
453 tb->lot_instance++;
454 }
455 }
456 avl_add(&sa->sa_layout_hash_tree, tb);
457 return (tb);
458 }
459
460 static void
461 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
462 int count, dmu_tx_t *tx, sa_lot_t **lot)
463 {
464 sa_lot_t *tb, tbsearch;
465 avl_index_t loc;
466 sa_os_t *sa = os->os_sa;
467 boolean_t found = B_FALSE;
468
469 mutex_enter(&sa->sa_lock);
470 tbsearch.lot_hash = hash;
471 tbsearch.lot_instance = 0;
472 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
473 if (tb) {
474 for (; tb && tb->lot_hash == hash;
475 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
476 if (sa_layout_equal(tb, attrs, count) == 0) {
477 found = B_TRUE;
478 break;
479 }
480 }
481 }
482 if (!found) {
483 tb = sa_add_layout_entry(os, attrs, count,
484 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
485 }
486 mutex_exit(&sa->sa_lock);
487 *lot = tb;
488 }
489
490 static int
491 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
492 {
493 int error;
494 uint32_t blocksize;
495
496 if (size == 0) {
497 blocksize = SPA_MINBLOCKSIZE;
498 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
499 ASSERT(0);
500 return (SET_ERROR(EFBIG));
501 } else {
502 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
503 }
504
505 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
506 ASSERT(error == 0);
507 return (error);
508 }
509
510 static void
511 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
512 {
513 if (func == NULL) {
514 memcpy(target, datastart, buflen);
515 } else {
516 boolean_t start;
517 int bytes;
518 void *dataptr;
519 void *saptr = target;
520 uint32_t length;
521
522 start = B_TRUE;
523 bytes = 0;
524 while (bytes < buflen) {
525 func(&dataptr, &length, buflen, start, datastart);
526 memcpy(saptr, dataptr, length);
527 saptr = (void *)((caddr_t)saptr + length);
528 bytes += length;
529 start = B_FALSE;
530 }
531 }
532 }
533
534 /*
535 * Determine several different values pertaining to system attribute
536 * buffers.
537 *
538 * Return the size of the sa_hdr_phys_t header for the buffer. Each
539 * variable length attribute except the first contributes two bytes to
540 * the header size, which is then rounded up to an 8-byte boundary.
541 *
542 * The following output parameters are also computed.
543 *
544 * index - The index of the first attribute in attr_desc that will
545 * spill over. Only valid if will_spill is set.
546 *
547 * total - The total number of bytes of all system attributes described
548 * in attr_desc.
549 *
550 * will_spill - Set when spilling is necessary. It is only set when
551 * the buftype is SA_BONUS.
552 */
553 static int
554 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
555 dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
556 int *total, boolean_t *will_spill)
557 {
558 int var_size_count = 0;
559 int i;
560 int hdrsize;
561 int extra_hdrsize;
562
563 if (buftype == SA_BONUS && sa->sa_force_spill) {
564 *total = 0;
565 *index = 0;
566 *will_spill = B_TRUE;
567 return (0);
568 }
569
570 *index = -1;
571 *total = 0;
572 *will_spill = B_FALSE;
573
574 extra_hdrsize = 0;
575 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
576 sizeof (sa_hdr_phys_t);
577
578 ASSERT(IS_P2ALIGNED(full_space, 8));
579
580 for (i = 0; i != attr_count; i++) {
581 boolean_t is_var_sz, might_spill_here;
582 int tmp_hdrsize;
583
584 *total = P2ROUNDUP(*total, 8);
585 *total += attr_desc[i].sa_length;
586 if (*will_spill)
587 continue;
588
589 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
590 if (is_var_sz)
591 var_size_count++;
592
593 /*
594 * Calculate what the SA header size would be if this
595 * attribute doesn't spill.
596 */
597 tmp_hdrsize = hdrsize + ((is_var_sz && var_size_count > 1) ?
598 sizeof (uint16_t) : 0);
599
600 /*
601 * Check whether this attribute spans into the space
602 * that would be used by the spill block pointer should
603 * a spill block be needed.
604 */
605 might_spill_here =
606 buftype == SA_BONUS && *index == -1 &&
607 (*total + P2ROUNDUP(tmp_hdrsize, 8)) >
608 (full_space - sizeof (blkptr_t));
609
610 if (is_var_sz && var_size_count > 1) {
611 if (buftype == SA_SPILL ||
612 tmp_hdrsize + *total < full_space) {
613 /*
614 * Record the extra header size in case this
615 * increase needs to be reversed due to
616 * spill-over.
617 */
618 hdrsize = tmp_hdrsize;
619 if (*index != -1 || might_spill_here)
620 extra_hdrsize += sizeof (uint16_t);
621 } else {
622 ASSERT(buftype == SA_BONUS);
623 if (*index == -1)
624 *index = i;
625 *will_spill = B_TRUE;
626 continue;
627 }
628 }
629
630 /*
631 * Store index of where spill *could* occur. Then
632 * continue to count the remaining attribute sizes. The
633 * sum is used later for sizing bonus and spill buffer.
634 */
635 if (might_spill_here)
636 *index = i;
637
638 if ((*total + P2ROUNDUP(hdrsize, 8)) > full_space &&
639 buftype == SA_BONUS)
640 *will_spill = B_TRUE;
641 }
642
643 if (*will_spill)
644 hdrsize -= extra_hdrsize;
645
646 hdrsize = P2ROUNDUP(hdrsize, 8);
647 return (hdrsize);
648 }
649
650 #define BUF_SPACE_NEEDED(total, header) (total + header)
651
652 /*
653 * Find layout that corresponds to ordering of attributes
654 * If not found a new layout number is created and added to
655 * persistent layout tables.
656 */
657 static int
658 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
659 dmu_tx_t *tx)
660 {
661 sa_os_t *sa = hdl->sa_os->os_sa;
662 uint64_t hash;
663 sa_buf_type_t buftype;
664 sa_hdr_phys_t *sahdr;
665 void *data_start;
666 sa_attr_type_t *attrs, *attrs_start;
667 int i, lot_count;
668 int dnodesize;
669 int spill_idx;
670 int hdrsize;
671 int spillhdrsize = 0;
672 int used;
673 dmu_object_type_t bonustype;
674 sa_lot_t *lot;
675 int len_idx;
676 int spill_used;
677 int bonuslen;
678 boolean_t spilling;
679
680 dmu_buf_will_dirty(hdl->sa_bonus, tx);
681 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
682 dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
683 bonuslen = DN_BONUS_SIZE(dnodesize);
684
685 /* first determine bonus header size and sum of all attributes */
686 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
687 SA_BONUS, bonuslen, &spill_idx, &used, &spilling);
688
689 if (used > SPA_OLD_MAXBLOCKSIZE)
690 return (SET_ERROR(EFBIG));
691
692 VERIFY0(dmu_set_bonus(hdl->sa_bonus, spilling ?
693 MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
694 used + hdrsize, tx));
695
696 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
697 bonustype == DMU_OT_SA);
698
699 /* setup and size spill buffer when needed */
700 if (spilling) {
701 boolean_t dummy;
702
703 if (hdl->sa_spill == NULL) {
704 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
705 &hdl->sa_spill) == 0);
706 }
707 dmu_buf_will_dirty(hdl->sa_spill, tx);
708
709 spillhdrsize = sa_find_sizes(sa, &attr_desc[spill_idx],
710 attr_count - spill_idx, hdl->sa_spill, SA_SPILL,
711 hdl->sa_spill->db_size, &i, &spill_used, &dummy);
712
713 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
714 return (SET_ERROR(EFBIG));
715
716 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
717 hdl->sa_spill->db_size)
718 VERIFY(0 == sa_resize_spill(hdl,
719 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
720 }
721
722 /* setup starting pointers to lay down data */
723 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
724 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
725 buftype = SA_BONUS;
726
727 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
728 KM_SLEEP);
729 lot_count = 0;
730
731 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
732 uint16_t length;
733
734 ASSERT(IS_P2ALIGNED(data_start, 8));
735 attrs[i] = attr_desc[i].sa_attr;
736 length = SA_REGISTERED_LEN(sa, attrs[i]);
737 if (length == 0)
738 length = attr_desc[i].sa_length;
739
740 if (spilling && i == spill_idx) { /* switch to spill buffer */
741 VERIFY(bonustype == DMU_OT_SA);
742 if (buftype == SA_BONUS && !sa->sa_force_spill) {
743 sa_find_layout(hdl->sa_os, hash, attrs_start,
744 lot_count, tx, &lot);
745 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
746 }
747
748 buftype = SA_SPILL;
749 hash = -1ULL;
750 len_idx = 0;
751
752 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
753 sahdr->sa_magic = SA_MAGIC;
754 data_start = (void *)((uintptr_t)sahdr +
755 spillhdrsize);
756 attrs_start = &attrs[i];
757 lot_count = 0;
758 }
759 hash ^= SA_ATTR_HASH(attrs[i]);
760 attr_desc[i].sa_addr = data_start;
761 attr_desc[i].sa_size = length;
762 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
763 data_start, length);
764 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
765 sahdr->sa_lengths[len_idx++] = length;
766 }
767 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
768 length), 8);
769 lot_count++;
770 }
771
772 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
773
774 /*
775 * Verify that old znodes always have layout number 0.
776 * Must be DMU_OT_SA for arbitrary layouts
777 */
778 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
779 (bonustype == DMU_OT_SA && lot->lot_num > 1));
780
781 if (bonustype == DMU_OT_SA) {
782 SA_SET_HDR(sahdr, lot->lot_num,
783 buftype == SA_BONUS ? hdrsize : spillhdrsize);
784 }
785
786 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
787 if (hdl->sa_bonus_tab) {
788 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
789 hdl->sa_bonus_tab = NULL;
790 }
791 if (!sa->sa_force_spill)
792 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
793 if (hdl->sa_spill) {
794 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
795 if (!spilling) {
796 /*
797 * remove spill block that is no longer needed.
798 */
799 dmu_buf_rele(hdl->sa_spill, NULL);
800 hdl->sa_spill = NULL;
801 hdl->sa_spill_tab = NULL;
802 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
803 sa_handle_object(hdl), tx));
804 } else {
805 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
806 }
807 }
808
809 return (0);
810 }
811
812 static void
813 sa_free_attr_table(sa_os_t *sa)
814 {
815 int i;
816
817 if (sa->sa_attr_table == NULL)
818 return;
819
820 for (i = 0; i != sa->sa_num_attrs; i++) {
821 if (sa->sa_attr_table[i].sa_name)
822 kmem_free(sa->sa_attr_table[i].sa_name,
823 strlen(sa->sa_attr_table[i].sa_name) + 1);
824 }
825
826 kmem_free(sa->sa_attr_table,
827 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
828
829 sa->sa_attr_table = NULL;
830 }
831
832 static int
833 sa_attr_table_setup(objset_t *os, const sa_attr_reg_t *reg_attrs, int count)
834 {
835 sa_os_t *sa = os->os_sa;
836 uint64_t sa_attr_count = 0;
837 uint64_t sa_reg_count = 0;
838 int error = 0;
839 uint64_t attr_value;
840 sa_attr_table_t *tb;
841 zap_cursor_t zc;
842 zap_attribute_t za;
843 int registered_count = 0;
844 int i;
845 dmu_objset_type_t ostype = dmu_objset_type(os);
846
847 sa->sa_user_table =
848 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
849 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
850
851 if (sa->sa_reg_attr_obj != 0) {
852 error = zap_count(os, sa->sa_reg_attr_obj,
853 &sa_attr_count);
854
855 /*
856 * Make sure we retrieved a count and that it isn't zero
857 */
858 if (error || (error == 0 && sa_attr_count == 0)) {
859 if (error == 0)
860 error = SET_ERROR(EINVAL);
861 goto bail;
862 }
863 sa_reg_count = sa_attr_count;
864 }
865
866 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
867 sa_attr_count += sa_legacy_attr_count;
868
869 /* Allocate attribute numbers for attributes that aren't registered */
870 for (i = 0; i != count; i++) {
871 boolean_t found = B_FALSE;
872 int j;
873
874 if (ostype == DMU_OST_ZFS) {
875 for (j = 0; j != sa_legacy_attr_count; j++) {
876 if (strcmp(reg_attrs[i].sa_name,
877 sa_legacy_attrs[j].sa_name) == 0) {
878 sa->sa_user_table[i] =
879 sa_legacy_attrs[j].sa_attr;
880 found = B_TRUE;
881 }
882 }
883 }
884 if (found)
885 continue;
886
887 if (sa->sa_reg_attr_obj)
888 error = zap_lookup(os, sa->sa_reg_attr_obj,
889 reg_attrs[i].sa_name, 8, 1, &attr_value);
890 else
891 error = SET_ERROR(ENOENT);
892 switch (error) {
893 case ENOENT:
894 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
895 sa_attr_count++;
896 break;
897 case 0:
898 sa->sa_user_table[i] = ATTR_NUM(attr_value);
899 break;
900 default:
901 goto bail;
902 }
903 }
904
905 sa->sa_num_attrs = sa_attr_count;
906 tb = sa->sa_attr_table =
907 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
908
909 /*
910 * Attribute table is constructed from requested attribute list,
911 * previously foreign registered attributes, and also the legacy
912 * ZPL set of attributes.
913 */
914
915 if (sa->sa_reg_attr_obj) {
916 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
917 (error = zap_cursor_retrieve(&zc, &za)) == 0;
918 zap_cursor_advance(&zc)) {
919 uint64_t value;
920 value = za.za_first_integer;
921
922 registered_count++;
923 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
924 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
925 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
926 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
927
928 if (tb[ATTR_NUM(value)].sa_name) {
929 continue;
930 }
931 tb[ATTR_NUM(value)].sa_name =
932 kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
933 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
934 strlen(za.za_name) +1);
935 }
936 zap_cursor_fini(&zc);
937 /*
938 * Make sure we processed the correct number of registered
939 * attributes
940 */
941 if (registered_count != sa_reg_count) {
942 ASSERT(error != 0);
943 goto bail;
944 }
945
946 }
947
948 if (ostype == DMU_OST_ZFS) {
949 for (i = 0; i != sa_legacy_attr_count; i++) {
950 if (tb[i].sa_name)
951 continue;
952 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
953 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
954 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
955 tb[i].sa_registered = B_FALSE;
956 tb[i].sa_name =
957 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
958 KM_SLEEP);
959 (void) strlcpy(tb[i].sa_name,
960 sa_legacy_attrs[i].sa_name,
961 strlen(sa_legacy_attrs[i].sa_name) + 1);
962 }
963 }
964
965 for (i = 0; i != count; i++) {
966 sa_attr_type_t attr_id;
967
968 attr_id = sa->sa_user_table[i];
969 if (tb[attr_id].sa_name)
970 continue;
971
972 tb[attr_id].sa_length = reg_attrs[i].sa_length;
973 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
974 tb[attr_id].sa_attr = attr_id;
975 tb[attr_id].sa_name =
976 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
977 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
978 strlen(reg_attrs[i].sa_name) + 1);
979 }
980
981 sa->sa_need_attr_registration =
982 (sa_attr_count != registered_count);
983
984 return (0);
985 bail:
986 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
987 sa->sa_user_table = NULL;
988 sa_free_attr_table(sa);
989 ASSERT(error != 0);
990 return (error);
991 }
992
993 int
994 sa_setup(objset_t *os, uint64_t sa_obj, const sa_attr_reg_t *reg_attrs,
995 int count, sa_attr_type_t **user_table)
996 {
997 zap_cursor_t zc;
998 zap_attribute_t za;
999 sa_os_t *sa;
1000 dmu_objset_type_t ostype = dmu_objset_type(os);
1001 sa_attr_type_t *tb;
1002 int error;
1003
1004 mutex_enter(&os->os_user_ptr_lock);
1005 if (os->os_sa) {
1006 mutex_enter(&os->os_sa->sa_lock);
1007 mutex_exit(&os->os_user_ptr_lock);
1008 tb = os->os_sa->sa_user_table;
1009 mutex_exit(&os->os_sa->sa_lock);
1010 *user_table = tb;
1011 return (0);
1012 }
1013
1014 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1015 mutex_init(&sa->sa_lock, NULL, MUTEX_NOLOCKDEP, NULL);
1016 sa->sa_master_obj = sa_obj;
1017
1018 os->os_sa = sa;
1019 mutex_enter(&sa->sa_lock);
1020 mutex_exit(&os->os_user_ptr_lock);
1021 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1022 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1023 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1024 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1025
1026 if (sa_obj) {
1027 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1028 8, 1, &sa->sa_layout_attr_obj);
1029 if (error != 0 && error != ENOENT)
1030 goto fail;
1031 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1032 8, 1, &sa->sa_reg_attr_obj);
1033 if (error != 0 && error != ENOENT)
1034 goto fail;
1035 }
1036
1037 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1038 goto fail;
1039
1040 if (sa->sa_layout_attr_obj != 0) {
1041 uint64_t layout_count;
1042
1043 error = zap_count(os, sa->sa_layout_attr_obj,
1044 &layout_count);
1045
1046 /*
1047 * Layout number count should be > 0
1048 */
1049 if (error || (error == 0 && layout_count == 0)) {
1050 if (error == 0)
1051 error = SET_ERROR(EINVAL);
1052 goto fail;
1053 }
1054
1055 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1056 (error = zap_cursor_retrieve(&zc, &za)) == 0;
1057 zap_cursor_advance(&zc)) {
1058 sa_attr_type_t *lot_attrs;
1059 uint64_t lot_num;
1060
1061 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1062 za.za_num_integers, KM_SLEEP);
1063
1064 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1065 za.za_name, 2, za.za_num_integers,
1066 lot_attrs))) != 0) {
1067 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1068 za.za_num_integers);
1069 break;
1070 }
1071 VERIFY0(ddi_strtoull(za.za_name, NULL, 10,
1072 (unsigned long long *)&lot_num));
1073
1074 (void) sa_add_layout_entry(os, lot_attrs,
1075 za.za_num_integers, lot_num,
1076 sa_layout_info_hash(lot_attrs,
1077 za.za_num_integers), B_FALSE, NULL);
1078 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1079 za.za_num_integers);
1080 }
1081 zap_cursor_fini(&zc);
1082
1083 /*
1084 * Make sure layout count matches number of entries added
1085 * to AVL tree
1086 */
1087 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1088 ASSERT(error != 0);
1089 goto fail;
1090 }
1091 }
1092
1093 /* Add special layout number for old ZNODES */
1094 if (ostype == DMU_OST_ZFS) {
1095 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1096 sa_legacy_attr_count, 0,
1097 sa_layout_info_hash(sa_legacy_zpl_layout,
1098 sa_legacy_attr_count), B_FALSE, NULL);
1099
1100 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1101 0, B_FALSE, NULL);
1102 }
1103 *user_table = os->os_sa->sa_user_table;
1104 mutex_exit(&sa->sa_lock);
1105 return (0);
1106 fail:
1107 os->os_sa = NULL;
1108 sa_free_attr_table(sa);
1109 if (sa->sa_user_table)
1110 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1111 mutex_exit(&sa->sa_lock);
1112 avl_destroy(&sa->sa_layout_hash_tree);
1113 avl_destroy(&sa->sa_layout_num_tree);
1114 mutex_destroy(&sa->sa_lock);
1115 kmem_free(sa, sizeof (sa_os_t));
1116 return ((error == ECKSUM) ? EIO : error);
1117 }
1118
1119 void
1120 sa_tear_down(objset_t *os)
1121 {
1122 sa_os_t *sa = os->os_sa;
1123 sa_lot_t *layout;
1124 void *cookie;
1125
1126 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1127
1128 /* Free up attr table */
1129
1130 sa_free_attr_table(sa);
1131
1132 cookie = NULL;
1133 while ((layout =
1134 avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
1135 sa_idx_tab_t *tab;
1136 while ((tab = list_head(&layout->lot_idx_tab))) {
1137 ASSERT(zfs_refcount_count(&tab->sa_refcount));
1138 sa_idx_tab_rele(os, tab);
1139 }
1140 }
1141
1142 cookie = NULL;
1143 while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))) {
1144 kmem_free(layout->lot_attrs,
1145 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1146 kmem_free(layout, sizeof (sa_lot_t));
1147 }
1148
1149 avl_destroy(&sa->sa_layout_hash_tree);
1150 avl_destroy(&sa->sa_layout_num_tree);
1151 mutex_destroy(&sa->sa_lock);
1152
1153 kmem_free(sa, sizeof (sa_os_t));
1154 os->os_sa = NULL;
1155 }
1156
1157 static void
1158 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1159 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1160 {
1161 sa_idx_tab_t *idx_tab = userp;
1162
1163 if (var_length) {
1164 ASSERT(idx_tab->sa_variable_lengths);
1165 idx_tab->sa_variable_lengths[length_idx] = length;
1166 }
1167 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1168 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1169 }
1170
1171 static void
1172 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1173 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1174 {
1175 void *data_start;
1176 sa_lot_t *tb = tab;
1177 sa_lot_t search;
1178 avl_index_t loc;
1179 sa_os_t *sa = os->os_sa;
1180 int i;
1181 uint16_t *length_start = NULL;
1182 uint8_t length_idx = 0;
1183
1184 if (tab == NULL) {
1185 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1186 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1187 ASSERT(tb);
1188 }
1189
1190 if (IS_SA_BONUSTYPE(type)) {
1191 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1192 offsetof(sa_hdr_phys_t, sa_lengths) +
1193 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1194 length_start = hdr->sa_lengths;
1195 } else {
1196 data_start = hdr;
1197 }
1198
1199 for (i = 0; i != tb->lot_attr_count; i++) {
1200 int attr_length, reg_length;
1201 uint8_t idx_len;
1202
1203 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1204 if (reg_length) {
1205 attr_length = reg_length;
1206 idx_len = 0;
1207 } else {
1208 attr_length = length_start[length_idx];
1209 idx_len = length_idx++;
1210 }
1211
1212 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1213 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1214
1215 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1216 attr_length), 8);
1217 }
1218 }
1219
1220 static void
1221 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1222 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1223 {
1224 (void) hdr, (void) length_idx, (void) variable_length;
1225 sa_handle_t *hdl = userp;
1226 sa_os_t *sa = hdl->sa_os->os_sa;
1227
1228 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1229 }
1230
1231 static void
1232 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1233 {
1234 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1235 dmu_buf_impl_t *db;
1236 int num_lengths = 1;
1237 int i;
1238 sa_os_t *sa __maybe_unused = hdl->sa_os->os_sa;
1239
1240 ASSERT(MUTEX_HELD(&sa->sa_lock));
1241 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1242 return;
1243
1244 db = SA_GET_DB(hdl, buftype);
1245
1246 if (buftype == SA_SPILL) {
1247 arc_release(db->db_buf, NULL);
1248 arc_buf_thaw(db->db_buf);
1249 }
1250
1251 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1252 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1253
1254 /*
1255 * Determine number of variable lengths in header
1256 * The standard 8 byte header has one for free and a
1257 * 16 byte header would have 4 + 1;
1258 */
1259 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1260 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1261 for (i = 0; i != num_lengths; i++)
1262 sa_hdr_phys->sa_lengths[i] =
1263 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1264
1265 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1266 sa_byteswap_cb, NULL, hdl);
1267
1268 if (buftype == SA_SPILL)
1269 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1270 }
1271
1272 static int
1273 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1274 {
1275 sa_hdr_phys_t *sa_hdr_phys;
1276 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1277 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1278 sa_os_t *sa = hdl->sa_os->os_sa;
1279 sa_idx_tab_t *idx_tab;
1280
1281 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1282
1283 mutex_enter(&sa->sa_lock);
1284
1285 /* Do we need to byteswap? */
1286
1287 /* only check if not old znode */
1288 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1289 sa_hdr_phys->sa_magic != 0) {
1290 if (BSWAP_32(sa_hdr_phys->sa_magic) != SA_MAGIC) {
1291 mutex_exit(&sa->sa_lock);
1292 zfs_dbgmsg("Buffer Header: %x != SA_MAGIC:%x "
1293 "object=%#llx\n", sa_hdr_phys->sa_magic, SA_MAGIC,
1294 (u_longlong_t)db->db.db_object);
1295 return (SET_ERROR(EIO));
1296 }
1297 sa_byteswap(hdl, buftype);
1298 }
1299
1300 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1301
1302 if (buftype == SA_BONUS)
1303 hdl->sa_bonus_tab = idx_tab;
1304 else
1305 hdl->sa_spill_tab = idx_tab;
1306
1307 mutex_exit(&sa->sa_lock);
1308 return (0);
1309 }
1310
1311 static void
1312 sa_evict_sync(void *dbu)
1313 {
1314 (void) dbu;
1315 panic("evicting sa dbuf\n");
1316 }
1317
1318 static void
1319 sa_idx_tab_rele(objset_t *os, void *arg)
1320 {
1321 sa_os_t *sa = os->os_sa;
1322 sa_idx_tab_t *idx_tab = arg;
1323
1324 if (idx_tab == NULL)
1325 return;
1326
1327 mutex_enter(&sa->sa_lock);
1328 if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1329 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1330 if (idx_tab->sa_variable_lengths)
1331 kmem_free(idx_tab->sa_variable_lengths,
1332 sizeof (uint16_t) *
1333 idx_tab->sa_layout->lot_var_sizes);
1334 zfs_refcount_destroy(&idx_tab->sa_refcount);
1335 kmem_free(idx_tab->sa_idx_tab,
1336 sizeof (uint32_t) * sa->sa_num_attrs);
1337 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1338 }
1339 mutex_exit(&sa->sa_lock);
1340 }
1341
1342 static void
1343 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1344 {
1345 sa_os_t *sa __maybe_unused = os->os_sa;
1346
1347 ASSERT(MUTEX_HELD(&sa->sa_lock));
1348 (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
1349 }
1350
1351 void
1352 sa_spill_rele(sa_handle_t *hdl)
1353 {
1354 mutex_enter(&hdl->sa_lock);
1355 if (hdl->sa_spill) {
1356 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1357 dmu_buf_rele(hdl->sa_spill, NULL);
1358 hdl->sa_spill = NULL;
1359 hdl->sa_spill_tab = NULL;
1360 }
1361 mutex_exit(&hdl->sa_lock);
1362 }
1363
1364 void
1365 sa_handle_destroy(sa_handle_t *hdl)
1366 {
1367 dmu_buf_t *db = hdl->sa_bonus;
1368
1369 mutex_enter(&hdl->sa_lock);
1370 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1371
1372 if (hdl->sa_bonus_tab)
1373 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1374
1375 if (hdl->sa_spill_tab)
1376 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1377
1378 dmu_buf_rele(hdl->sa_bonus, NULL);
1379
1380 if (hdl->sa_spill)
1381 dmu_buf_rele(hdl->sa_spill, NULL);
1382 mutex_exit(&hdl->sa_lock);
1383
1384 kmem_cache_free(sa_cache, hdl);
1385 }
1386
1387 int
1388 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1389 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1390 {
1391 int error = 0;
1392 sa_handle_t *handle = NULL;
1393 #ifdef ZFS_DEBUG
1394 dmu_object_info_t doi;
1395
1396 dmu_object_info_from_db(db, &doi);
1397 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1398 doi.doi_bonus_type == DMU_OT_ZNODE);
1399 #endif
1400 /* find handle, if it exists */
1401 /* if one doesn't exist then create a new one, and initialize it */
1402
1403 if (hdl_type == SA_HDL_SHARED)
1404 handle = dmu_buf_get_user(db);
1405
1406 if (handle == NULL) {
1407 sa_handle_t *winner = NULL;
1408
1409 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1410 handle->sa_dbu.dbu_evict_func_sync = NULL;
1411 handle->sa_dbu.dbu_evict_func_async = NULL;
1412 handle->sa_userp = userp;
1413 handle->sa_bonus = db;
1414 handle->sa_os = os;
1415 handle->sa_spill = NULL;
1416 handle->sa_bonus_tab = NULL;
1417 handle->sa_spill_tab = NULL;
1418
1419 error = sa_build_index(handle, SA_BONUS);
1420
1421 if (hdl_type == SA_HDL_SHARED) {
1422 dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
1423 NULL);
1424 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1425 }
1426
1427 if (winner != NULL) {
1428 kmem_cache_free(sa_cache, handle);
1429 handle = winner;
1430 }
1431 }
1432 *handlepp = handle;
1433
1434 return (error);
1435 }
1436
1437 int
1438 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1439 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1440 {
1441 dmu_buf_t *db;
1442 int error;
1443
1444 if ((error = dmu_bonus_hold(objset, objid, NULL, &db)))
1445 return (error);
1446
1447 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1448 handlepp));
1449 }
1450
1451 int
1452 sa_buf_hold(objset_t *objset, uint64_t obj_num, const void *tag, dmu_buf_t **db)
1453 {
1454 return (dmu_bonus_hold(objset, obj_num, tag, db));
1455 }
1456
1457 void
1458 sa_buf_rele(dmu_buf_t *db, const void *tag)
1459 {
1460 dmu_buf_rele(db, tag);
1461 }
1462
1463 static int
1464 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1465 {
1466 ASSERT(hdl);
1467 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1468 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1469 }
1470
1471 static int
1472 sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
1473 uint32_t buflen)
1474 {
1475 int error;
1476 sa_bulk_attr_t bulk;
1477
1478 VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
1479
1480 bulk.sa_attr = attr;
1481 bulk.sa_data = buf;
1482 bulk.sa_length = buflen;
1483 bulk.sa_data_func = NULL;
1484
1485 ASSERT(hdl);
1486 error = sa_lookup_impl(hdl, &bulk, 1);
1487 return (error);
1488 }
1489
1490 int
1491 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1492 {
1493 int error;
1494
1495 mutex_enter(&hdl->sa_lock);
1496 error = sa_lookup_locked(hdl, attr, buf, buflen);
1497 mutex_exit(&hdl->sa_lock);
1498
1499 return (error);
1500 }
1501
1502 #ifdef _KERNEL
1503 int
1504 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, zfs_uio_t *uio)
1505 {
1506 int error;
1507 sa_bulk_attr_t bulk;
1508
1509 bulk.sa_data = NULL;
1510 bulk.sa_attr = attr;
1511 bulk.sa_data_func = NULL;
1512
1513 ASSERT(hdl);
1514
1515 mutex_enter(&hdl->sa_lock);
1516 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1517 error = zfs_uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1518 zfs_uio_resid(uio)), UIO_READ, uio);
1519 }
1520 mutex_exit(&hdl->sa_lock);
1521 return (error);
1522 }
1523
1524 /*
1525 * For the existed object that is upgraded from old system, its ondisk layout
1526 * has no slot for the project ID attribute. But quota accounting logic needs
1527 * to access related slots by offset directly. So we need to adjust these old
1528 * objects' layout to make the project ID to some unified and fixed offset.
1529 */
1530 int
1531 sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
1532 {
1533 znode_t *zp = sa_get_userdata(hdl);
1534 dmu_buf_t *db = sa_get_db(hdl);
1535 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1536 int count = 0, err = 0;
1537 sa_bulk_attr_t *bulk, *attrs;
1538 zfs_acl_locator_cb_t locate = { 0 };
1539 uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
1540 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
1541 zfs_acl_phys_t znode_acl = { 0 };
1542 char scanstamp[AV_SCANSTAMP_SZ];
1543
1544 if (zp->z_acl_cached == NULL) {
1545 zfs_acl_t *aclp;
1546
1547 mutex_enter(&zp->z_acl_lock);
1548 err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
1549 mutex_exit(&zp->z_acl_lock);
1550 if (err != 0 && err != ENOENT)
1551 return (err);
1552 }
1553
1554 bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1555 attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1556 mutex_enter(&hdl->sa_lock);
1557 mutex_enter(&zp->z_lock);
1558
1559 err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
1560 sizeof (uint64_t));
1561 if (unlikely(err == 0))
1562 /* Someone has added project ID attr by race. */
1563 err = EEXIST;
1564 if (err != ENOENT)
1565 goto out;
1566
1567 /* First do a bulk query of the attributes that aren't cached */
1568 if (zp->z_is_sa) {
1569 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1570 &mode, 8);
1571 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1572 &gen, 8);
1573 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1574 &uid, 8);
1575 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1576 &gid, 8);
1577 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1578 &parent, 8);
1579 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1580 &atime, 16);
1581 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1582 &mtime, 16);
1583 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1584 &ctime, 16);
1585 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1586 &crtime, 16);
1587 if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
1588 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1589 &rdev, 8);
1590 } else {
1591 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1592 &atime, 16);
1593 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1594 &mtime, 16);
1595 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1596 &ctime, 16);
1597 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1598 &crtime, 16);
1599 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1600 &gen, 8);
1601 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1602 &mode, 8);
1603 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1604 &parent, 8);
1605 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
1606 &xattr, 8);
1607 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1608 &rdev, 8);
1609 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1610 &uid, 8);
1611 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1612 &gid, 8);
1613 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
1614 &znode_acl, 88);
1615 }
1616 err = sa_bulk_lookup_locked(hdl, bulk, count);
1617 if (err != 0)
1618 goto out;
1619
1620 err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
1621 if (err != 0 && err != ENOENT)
1622 goto out;
1623
1624 zp->z_projid = projid;
1625 zp->z_pflags |= ZFS_PROJID;
1626 links = ZTONLNK(zp);
1627 count = 0;
1628 err = 0;
1629
1630 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
1631 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
1632 &zp->z_size, 8);
1633 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
1634 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
1635 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
1636 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
1637 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1638 &zp->z_pflags, 8);
1639 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
1640 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1641 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1642 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1643 &crtime, 16);
1644 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
1645 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
1646
1647 if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
1648 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
1649 &rdev, 8);
1650
1651 if (zp->z_acl_cached != NULL) {
1652 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
1653 &zp->z_acl_cached->z_acl_count, 8);
1654 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
1655 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
1656 locate.cb_aclp = zp->z_acl_cached;
1657 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
1658 zfs_acl_data_locator, &locate,
1659 zp->z_acl_cached->z_acl_bytes);
1660 }
1661
1662 if (xattr)
1663 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
1664 &xattr, 8);
1665
1666 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
1667 memcpy(scanstamp,
1668 (caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
1669 AV_SCANSTAMP_SZ);
1670 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
1671 scanstamp, AV_SCANSTAMP_SZ);
1672 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
1673 }
1674
1675 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
1676 VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
1677 if (znode_acl.z_acl_extern_obj) {
1678 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
1679 znode_acl.z_acl_extern_obj, tx));
1680 }
1681
1682 zp->z_is_sa = B_TRUE;
1683
1684 out:
1685 mutex_exit(&zp->z_lock);
1686 mutex_exit(&hdl->sa_lock);
1687 kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
1688 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
1689 return (err);
1690 }
1691 #endif
1692
1693 static sa_idx_tab_t *
1694 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
1695 {
1696 sa_idx_tab_t *idx_tab;
1697 sa_os_t *sa = os->os_sa;
1698 sa_lot_t *tb, search;
1699 avl_index_t loc;
1700
1701 /*
1702 * Deterimine layout number. If SA node and header == 0 then
1703 * force the index table to the dummy "1" empty layout.
1704 *
1705 * The layout number would only be zero for a newly created file
1706 * that has not added any attributes yet, or with crypto enabled which
1707 * doesn't write any attributes to the bonus buffer.
1708 */
1709
1710 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1711
1712 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1713
1714 /* Verify header size is consistent with layout information */
1715 ASSERT(tb);
1716 ASSERT((IS_SA_BONUSTYPE(bonustype) &&
1717 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) ||
1718 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1719
1720 /*
1721 * See if any of the already existing TOC entries can be reused?
1722 */
1723
1724 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1725 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1726 boolean_t valid_idx = B_TRUE;
1727 int i;
1728
1729 if (tb->lot_var_sizes != 0 &&
1730 idx_tab->sa_variable_lengths != NULL) {
1731 for (i = 0; i != tb->lot_var_sizes; i++) {
1732 if (hdr->sa_lengths[i] !=
1733 idx_tab->sa_variable_lengths[i]) {
1734 valid_idx = B_FALSE;
1735 break;
1736 }
1737 }
1738 }
1739 if (valid_idx) {
1740 sa_idx_tab_hold(os, idx_tab);
1741 return (idx_tab);
1742 }
1743 }
1744
1745 /* No such luck, create a new entry */
1746 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1747 idx_tab->sa_idx_tab =
1748 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1749 idx_tab->sa_layout = tb;
1750 zfs_refcount_create(&idx_tab->sa_refcount);
1751 if (tb->lot_var_sizes)
1752 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1753 tb->lot_var_sizes, KM_SLEEP);
1754
1755 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1756 tb, idx_tab);
1757 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1758 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1759 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1760 return (idx_tab);
1761 }
1762
1763 void
1764 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1765 boolean_t start, void *userdata)
1766 {
1767 ASSERT(start);
1768
1769 *dataptr = userdata;
1770 *len = total_len;
1771 }
1772
1773 static void
1774 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1775 {
1776 uint64_t attr_value = 0;
1777 sa_os_t *sa = hdl->sa_os->os_sa;
1778 sa_attr_table_t *tb = sa->sa_attr_table;
1779 int i;
1780
1781 mutex_enter(&sa->sa_lock);
1782
1783 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1784 mutex_exit(&sa->sa_lock);
1785 return;
1786 }
1787
1788 if (sa->sa_reg_attr_obj == 0) {
1789 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1790 DMU_OT_SA_ATTR_REGISTRATION,
1791 sa->sa_master_obj, SA_REGISTRY, tx);
1792 }
1793 for (i = 0; i != sa->sa_num_attrs; i++) {
1794 if (sa->sa_attr_table[i].sa_registered)
1795 continue;
1796 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1797 tb[i].sa_byteswap);
1798 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1799 tb[i].sa_name, 8, 1, &attr_value, tx));
1800 tb[i].sa_registered = B_TRUE;
1801 }
1802 sa->sa_need_attr_registration = B_FALSE;
1803 mutex_exit(&sa->sa_lock);
1804 }
1805
1806 /*
1807 * Replace all attributes with attributes specified in template.
1808 * If dnode had a spill buffer then those attributes will be
1809 * also be replaced, possibly with just an empty spill block
1810 *
1811 * This interface is intended to only be used for bulk adding of
1812 * attributes for a new file. It will also be used by the ZPL
1813 * when converting and old formatted znode to native SA support.
1814 */
1815 int
1816 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1817 int attr_count, dmu_tx_t *tx)
1818 {
1819 sa_os_t *sa = hdl->sa_os->os_sa;
1820
1821 if (sa->sa_need_attr_registration)
1822 sa_attr_register_sync(hdl, tx);
1823 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1824 }
1825
1826 int
1827 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1828 int attr_count, dmu_tx_t *tx)
1829 {
1830 int error;
1831
1832 mutex_enter(&hdl->sa_lock);
1833 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1834 attr_count, tx);
1835 mutex_exit(&hdl->sa_lock);
1836 return (error);
1837 }
1838
1839 /*
1840 * Add/remove a single attribute or replace a variable-sized attribute value
1841 * with a value of a different size, and then rewrite the entire set
1842 * of attributes.
1843 * Same-length attribute value replacement (including fixed-length attributes)
1844 * is handled more efficiently by the upper layers.
1845 */
1846 static int
1847 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1848 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1849 uint16_t buflen, dmu_tx_t *tx)
1850 {
1851 sa_os_t *sa = hdl->sa_os->os_sa;
1852 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1853 dnode_t *dn;
1854 sa_bulk_attr_t *attr_desc;
1855 void *old_data[2];
1856 int bonus_attr_count = 0;
1857 int bonus_data_size = 0;
1858 int spill_data_size = 0;
1859 int spill_attr_count = 0;
1860 int error;
1861 uint16_t length, reg_length;
1862 int i, j, k, length_idx;
1863 sa_hdr_phys_t *hdr;
1864 sa_idx_tab_t *idx_tab;
1865 int attr_count;
1866 int count;
1867
1868 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1869
1870 /* First make of copy of the old data */
1871
1872 DB_DNODE_ENTER(db);
1873 dn = DB_DNODE(db);
1874 if (dn->dn_bonuslen != 0) {
1875 bonus_data_size = hdl->sa_bonus->db_size;
1876 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1877 memcpy(old_data[0], hdl->sa_bonus->db_data,
1878 hdl->sa_bonus->db_size);
1879 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1880 } else {
1881 old_data[0] = NULL;
1882 }
1883 DB_DNODE_EXIT(db);
1884
1885 /* Bring spill buffer online if it isn't currently */
1886
1887 if ((error = sa_get_spill(hdl)) == 0) {
1888 spill_data_size = hdl->sa_spill->db_size;
1889 old_data[1] = vmem_alloc(spill_data_size, KM_SLEEP);
1890 memcpy(old_data[1], hdl->sa_spill->db_data,
1891 hdl->sa_spill->db_size);
1892 spill_attr_count =
1893 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1894 } else if (error && error != ENOENT) {
1895 if (old_data[0])
1896 kmem_free(old_data[0], bonus_data_size);
1897 return (error);
1898 } else {
1899 old_data[1] = NULL;
1900 }
1901
1902 /* build descriptor of all attributes */
1903
1904 attr_count = bonus_attr_count + spill_attr_count;
1905 if (action == SA_ADD)
1906 attr_count++;
1907 else if (action == SA_REMOVE)
1908 attr_count--;
1909
1910 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1911
1912 /*
1913 * loop through bonus and spill buffer if it exists, and
1914 * build up new attr_descriptor to reset the attributes
1915 */
1916 k = j = 0;
1917 count = bonus_attr_count;
1918 hdr = SA_GET_HDR(hdl, SA_BONUS);
1919 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1920 for (; k != 2; k++) {
1921 /*
1922 * Iterate over each attribute in layout. Fetch the
1923 * size of variable-length attributes needing rewrite
1924 * from sa_lengths[].
1925 */
1926 for (i = 0, length_idx = 0; i != count; i++) {
1927 sa_attr_type_t attr;
1928
1929 attr = idx_tab->sa_layout->lot_attrs[i];
1930 reg_length = SA_REGISTERED_LEN(sa, attr);
1931 if (reg_length == 0) {
1932 length = hdr->sa_lengths[length_idx];
1933 length_idx++;
1934 } else {
1935 length = reg_length;
1936 }
1937 if (attr == newattr) {
1938 /*
1939 * There is nothing to do for SA_REMOVE,
1940 * so it is just skipped.
1941 */
1942 if (action == SA_REMOVE)
1943 continue;
1944
1945 /*
1946 * Duplicate attributes are not allowed, so the
1947 * action can not be SA_ADD here.
1948 */
1949 ASSERT3S(action, ==, SA_REPLACE);
1950
1951 /*
1952 * Only a variable-sized attribute can be
1953 * replaced here, and its size must be changing.
1954 */
1955 ASSERT3U(reg_length, ==, 0);
1956 ASSERT3U(length, !=, buflen);
1957 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1958 locator, datastart, buflen);
1959 } else {
1960 SA_ADD_BULK_ATTR(attr_desc, j, attr,
1961 NULL, (void *)
1962 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
1963 (uintptr_t)old_data[k]), length);
1964 }
1965 }
1966 if (k == 0 && hdl->sa_spill) {
1967 hdr = SA_GET_HDR(hdl, SA_SPILL);
1968 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
1969 count = spill_attr_count;
1970 } else {
1971 break;
1972 }
1973 }
1974 if (action == SA_ADD) {
1975 reg_length = SA_REGISTERED_LEN(sa, newattr);
1976 IMPLY(reg_length != 0, reg_length == buflen);
1977 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
1978 datastart, buflen);
1979 }
1980 ASSERT3U(j, ==, attr_count);
1981
1982 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
1983
1984 if (old_data[0])
1985 kmem_free(old_data[0], bonus_data_size);
1986 if (old_data[1])
1987 vmem_free(old_data[1], spill_data_size);
1988 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
1989
1990 return (error);
1991 }
1992
1993 static int
1994 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
1995 dmu_tx_t *tx)
1996 {
1997 int error;
1998 sa_os_t *sa = hdl->sa_os->os_sa;
1999 dmu_object_type_t bonustype;
2000 dmu_buf_t *saved_spill;
2001
2002 ASSERT(hdl);
2003 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2004
2005 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
2006 saved_spill = hdl->sa_spill;
2007
2008 /* sync out registration table if necessary */
2009 if (sa->sa_need_attr_registration)
2010 sa_attr_register_sync(hdl, tx);
2011
2012 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
2013 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
2014 sa->sa_update_cb(hdl, tx);
2015
2016 /*
2017 * If saved_spill is NULL and current sa_spill is not NULL that
2018 * means we increased the refcount of the spill buffer through
2019 * sa_get_spill() or dmu_spill_hold_by_dnode(). Therefore we
2020 * must release the hold before calling dmu_tx_commit() to avoid
2021 * making a copy of this buffer in dbuf_sync_leaf() due to the
2022 * reference count now being greater than 1.
2023 */
2024 if (!saved_spill && hdl->sa_spill) {
2025 if (hdl->sa_spill_tab) {
2026 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
2027 hdl->sa_spill_tab = NULL;
2028 }
2029
2030 dmu_buf_rele(hdl->sa_spill, NULL);
2031 hdl->sa_spill = NULL;
2032 }
2033
2034 return (error);
2035 }
2036
2037 /*
2038 * update or add new attribute
2039 */
2040 int
2041 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
2042 void *buf, uint32_t buflen, dmu_tx_t *tx)
2043 {
2044 int error;
2045 sa_bulk_attr_t bulk;
2046
2047 VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
2048
2049 bulk.sa_attr = type;
2050 bulk.sa_data_func = NULL;
2051 bulk.sa_length = buflen;
2052 bulk.sa_data = buf;
2053
2054 mutex_enter(&hdl->sa_lock);
2055 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2056 mutex_exit(&hdl->sa_lock);
2057 return (error);
2058 }
2059
2060 /*
2061 * Return size of an attribute
2062 */
2063
2064 int
2065 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
2066 {
2067 sa_bulk_attr_t bulk;
2068 int error;
2069
2070 bulk.sa_data = NULL;
2071 bulk.sa_attr = attr;
2072 bulk.sa_data_func = NULL;
2073
2074 ASSERT(hdl);
2075 mutex_enter(&hdl->sa_lock);
2076 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
2077 mutex_exit(&hdl->sa_lock);
2078 return (error);
2079 }
2080 *size = bulk.sa_size;
2081
2082 mutex_exit(&hdl->sa_lock);
2083 return (0);
2084 }
2085
2086 int
2087 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2088 {
2089 ASSERT(hdl);
2090 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2091 return (sa_lookup_impl(hdl, attrs, count));
2092 }
2093
2094 int
2095 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2096 {
2097 int error;
2098
2099 ASSERT(hdl);
2100 mutex_enter(&hdl->sa_lock);
2101 error = sa_bulk_lookup_locked(hdl, attrs, count);
2102 mutex_exit(&hdl->sa_lock);
2103 return (error);
2104 }
2105
2106 int
2107 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
2108 {
2109 int error;
2110
2111 ASSERT(hdl);
2112 mutex_enter(&hdl->sa_lock);
2113 error = sa_bulk_update_impl(hdl, attrs, count, tx);
2114 mutex_exit(&hdl->sa_lock);
2115 return (error);
2116 }
2117
2118 int
2119 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
2120 {
2121 int error;
2122
2123 mutex_enter(&hdl->sa_lock);
2124 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
2125 NULL, 0, tx);
2126 mutex_exit(&hdl->sa_lock);
2127 return (error);
2128 }
2129
2130 void
2131 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
2132 {
2133 dmu_object_info_from_db(hdl->sa_bonus, doi);
2134 }
2135
2136 void
2137 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
2138 {
2139 dmu_object_size_from_db(hdl->sa_bonus,
2140 blksize, nblocks);
2141 }
2142
2143 void
2144 sa_set_userp(sa_handle_t *hdl, void *ptr)
2145 {
2146 hdl->sa_userp = ptr;
2147 }
2148
2149 dmu_buf_t *
2150 sa_get_db(sa_handle_t *hdl)
2151 {
2152 return (hdl->sa_bonus);
2153 }
2154
2155 void *
2156 sa_get_userdata(sa_handle_t *hdl)
2157 {
2158 return (hdl->sa_userp);
2159 }
2160
2161 void
2162 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
2163 {
2164 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
2165 os->os_sa->sa_update_cb = func;
2166 }
2167
2168 void
2169 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
2170 {
2171
2172 mutex_enter(&os->os_sa->sa_lock);
2173 sa_register_update_callback_locked(os, func);
2174 mutex_exit(&os->os_sa->sa_lock);
2175 }
2176
2177 uint64_t
2178 sa_handle_object(sa_handle_t *hdl)
2179 {
2180 return (hdl->sa_bonus->db_object);
2181 }
2182
2183 boolean_t
2184 sa_enabled(objset_t *os)
2185 {
2186 return (os->os_sa == NULL);
2187 }
2188
2189 int
2190 sa_set_sa_object(objset_t *os, uint64_t sa_object)
2191 {
2192 sa_os_t *sa = os->os_sa;
2193
2194 if (sa->sa_master_obj)
2195 return (1);
2196
2197 sa->sa_master_obj = sa_object;
2198
2199 return (0);
2200 }
2201
2202 int
2203 sa_hdrsize(void *arg)
2204 {
2205 sa_hdr_phys_t *hdr = arg;
2206
2207 return (SA_HDR_SIZE(hdr));
2208 }
2209
2210 void
2211 sa_handle_lock(sa_handle_t *hdl)
2212 {
2213 ASSERT(hdl);
2214 mutex_enter(&hdl->sa_lock);
2215 }
2216
2217 void
2218 sa_handle_unlock(sa_handle_t *hdl)
2219 {
2220 ASSERT(hdl);
2221 mutex_exit(&hdl->sa_lock);
2222 }
2223
2224 #ifdef _KERNEL
2225 EXPORT_SYMBOL(sa_handle_get);
2226 EXPORT_SYMBOL(sa_handle_get_from_db);
2227 EXPORT_SYMBOL(sa_handle_destroy);
2228 EXPORT_SYMBOL(sa_buf_hold);
2229 EXPORT_SYMBOL(sa_buf_rele);
2230 EXPORT_SYMBOL(sa_spill_rele);
2231 EXPORT_SYMBOL(sa_lookup);
2232 EXPORT_SYMBOL(sa_update);
2233 EXPORT_SYMBOL(sa_remove);
2234 EXPORT_SYMBOL(sa_bulk_lookup);
2235 EXPORT_SYMBOL(sa_bulk_lookup_locked);
2236 EXPORT_SYMBOL(sa_bulk_update);
2237 EXPORT_SYMBOL(sa_size);
2238 EXPORT_SYMBOL(sa_object_info);
2239 EXPORT_SYMBOL(sa_object_size);
2240 EXPORT_SYMBOL(sa_get_userdata);
2241 EXPORT_SYMBOL(sa_set_userp);
2242 EXPORT_SYMBOL(sa_get_db);
2243 EXPORT_SYMBOL(sa_handle_object);
2244 EXPORT_SYMBOL(sa_register_update_callback);
2245 EXPORT_SYMBOL(sa_setup);
2246 EXPORT_SYMBOL(sa_replace_all_by_template);
2247 EXPORT_SYMBOL(sa_replace_all_by_template_locked);
2248 EXPORT_SYMBOL(sa_enabled);
2249 EXPORT_SYMBOL(sa_cache_init);
2250 EXPORT_SYMBOL(sa_cache_fini);
2251 EXPORT_SYMBOL(sa_set_sa_object);
2252 EXPORT_SYMBOL(sa_hdrsize);
2253 EXPORT_SYMBOL(sa_handle_lock);
2254 EXPORT_SYMBOL(sa_handle_unlock);
2255 EXPORT_SYMBOL(sa_lookup_uio);
2256 EXPORT_SYMBOL(sa_add_projid);
2257 #endif /* _KERNEL */
Cache object: bbf6077b0b62270ea867713f28ce252f
|