FreeBSD/Linux Kernel Cross Reference
sys/fs/jfs/xattr.c
1 /*
2 * Copyright (c) International Business Machines Corp., 2000-2002
3 * Copyright (c) Christoph Hellwig, 2002
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/fs.h>
21 #include <linux/xattr.h>
22 #include "jfs_incore.h"
23 #include "jfs_dmap.h"
24 #include "jfs_debug.h"
25 #include "jfs_dinode.h"
26 #include "jfs_extent.h"
27 #include "jfs_metapage.h"
28 #include "jfs_xattr.h"
29
30 /*
31 * jfs_xattr.c: extended attribute service
32 *
33 * Overall design --
34 *
35 * Format:
36 *
37 * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
38 * value) and a variable (0 or more) number of extended attribute
39 * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
40 * where <name> is constructed from a null-terminated ascii string
41 * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
42 * (1 ... 65535 bytes). The in-memory format is
43 *
44 * 0 1 2 4 4 + namelen + 1
45 * +-------+--------+--------+----------------+-------------------+
46 * | Flags | Name | Value | Name String \0 | Data . . . . |
47 * | | Length | Length | | |
48 * +-------+--------+--------+----------------+-------------------+
49 *
50 * A jfs_ea_list then is structured as
51 *
52 * 0 4 4 + EA_SIZE(ea1)
53 * +------------+-------------------+--------------------+-----
54 * | Overall EA | First FEA Element | Second FEA Element | .....
55 * | List Size | | |
56 * +------------+-------------------+--------------------+-----
57 *
58 * On-disk:
59 *
60 * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
61 * written directly. An EA list may be in-lined in the inode if there is
62 * sufficient room available.
63 */
64
65 struct ea_buffer {
66 int flag; /* Indicates what storage xattr points to */
67 int max_size; /* largest xattr that fits in current buffer */
68 dxd_t new_ea; /* dxd to replace ea when modifying xattr */
69 struct metapage *mp; /* metapage containing ea list */
70 struct jfs_ea_list *xattr; /* buffer containing ea list */
71 };
72
73 /*
74 * ea_buffer.flag values
75 */
76 #define EA_INLINE 0x0001
77 #define EA_EXTENT 0x0002
78 #define EA_NEW 0x0004
79 #define EA_MALLOC 0x0008
80
81 /* Namespaces */
82 #define XATTR_SYSTEM_PREFIX "system."
83 #define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
84
85 #define XATTR_USER_PREFIX "user."
86 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
87
88 #define XATTR_OS2_PREFIX "os2."
89 #define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
90
91 /*
92 * These three routines are used to recognize on-disk extended attributes
93 * that are in a recognized namespace. If the attribute is not recognized,
94 * "os2." is prepended to the name
95 */
96 static inline int is_os2_xattr(struct jfs_ea *ea)
97 {
98 /*
99 * Check for "system."
100 */
101 if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
102 !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
103 return FALSE;
104 /*
105 * Check for "user."
106 */
107 if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
108 !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
109 return FALSE;
110 /*
111 * Add any other valid namespace prefixes here
112 */
113
114 /*
115 * We assume it's OS/2's flat namespace
116 */
117 return TRUE;
118 }
119
120 static inline int name_size(struct jfs_ea *ea)
121 {
122 if (is_os2_xattr(ea))
123 return ea->namelen + XATTR_OS2_PREFIX_LEN;
124 else
125 return ea->namelen;
126 }
127
128 static inline int copy_name(char *buffer, struct jfs_ea *ea)
129 {
130 int len = ea->namelen;
131
132 if (is_os2_xattr(ea)) {
133 memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
134 buffer += XATTR_OS2_PREFIX_LEN;
135 len += XATTR_OS2_PREFIX_LEN;
136 }
137 memcpy(buffer, ea->name, ea->namelen);
138 buffer[ea->namelen] = 0;
139
140 return len;
141 }
142
143 /* Forward references */
144 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
145
146 /*
147 * NAME: ea_write_inline
148 *
149 * FUNCTION: Attempt to write an EA inline if area is available
150 *
151 * PRE CONDITIONS:
152 * Already verified that the specified EA is small enough to fit inline
153 *
154 * PARAMETERS:
155 * ip - Inode pointer
156 * ealist - EA list pointer
157 * size - size of ealist in bytes
158 * ea - dxd_t structure to be filled in with necessary EA information
159 * if we successfully copy the EA inline
160 *
161 * NOTES:
162 * Checks if the inode's inline area is available. If so, copies EA inline
163 * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
164 * have to be put into an extent.
165 *
166 * RETURNS: 0 for successful copy to inline area; -1 if area not available
167 */
168 static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
169 int size, dxd_t * ea)
170 {
171 struct jfs_inode_info *ji = JFS_IP(ip);
172
173 /*
174 * Make sure we have an EA -- the NULL EA list is valid, but you
175 * can't copy it!
176 */
177 if (ealist && size > sizeof (struct jfs_ea_list)) {
178 assert(size <= sizeof (ji->i_inline_ea));
179
180 /*
181 * See if the space is available or if it is already being
182 * used for an inline EA.
183 */
184 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
185 return -1;
186
187 DXDsize(ea, size);
188 DXDlength(ea, 0);
189 DXDaddress(ea, 0);
190 memcpy(ji->i_inline_ea, ealist, size);
191 ea->flag = DXD_INLINE;
192 ji->mode2 &= ~INLINEEA;
193 } else {
194 ea->flag = 0;
195 DXDsize(ea, 0);
196 DXDlength(ea, 0);
197 DXDaddress(ea, 0);
198
199 /* Free up INLINE area */
200 if (ji->ea.flag & DXD_INLINE)
201 ji->mode2 |= INLINEEA;
202 }
203
204 mark_inode_dirty(ip);
205 return 0;
206 }
207
208 /*
209 * NAME: ea_write
210 *
211 * FUNCTION: Write an EA for an inode
212 *
213 * PRE CONDITIONS: EA has been verified
214 *
215 * PARAMETERS:
216 * ip - Inode pointer
217 * ealist - EA list pointer
218 * size - size of ealist in bytes
219 * ea - dxd_t structure to be filled in appropriately with where the
220 * EA was copied
221 *
222 * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
223 * extent and synchronously writes it to those blocks.
224 *
225 * RETURNS: 0 for success; Anything else indicates failure
226 */
227 static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
228 dxd_t * ea)
229 {
230 struct super_block *sb = ip->i_sb;
231 struct jfs_inode_info *ji = JFS_IP(ip);
232 struct jfs_sb_info *sbi = JFS_SBI(sb);
233 int nblocks;
234 s64 blkno;
235 int rc = 0, i;
236 char *cp;
237 s32 nbytes, nb;
238 s32 bytes_to_write;
239 struct metapage *mp;
240
241 /*
242 * Quick check to see if this is an in-linable EA. Short EAs
243 * and empty EAs are all in-linable, provided the space exists.
244 */
245 if (!ealist || size <= sizeof (ji->i_inline_ea)) {
246 if (!ea_write_inline(ip, ealist, size, ea))
247 return 0;
248 }
249
250 /* figure out how many blocks we need */
251 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
252
253 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
254 if (rc)
255 return -rc;
256
257 /*
258 * Now have nblocks worth of storage to stuff into the FEALIST.
259 * loop over the FEALIST copying data into the buffer one page at
260 * a time.
261 */
262 cp = (char *) ealist;
263 nbytes = size;
264 for (i = 0; i < nblocks; i += sbi->nbperpage) {
265 /*
266 * Determine how many bytes for this request, and round up to
267 * the nearest aggregate block size
268 */
269 nb = min(PSIZE, nbytes);
270 bytes_to_write =
271 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
272 << sb->s_blocksize_bits;
273
274 if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
275 rc = -EIO;
276 goto failed;
277 }
278
279 memcpy(mp->data, cp, nb);
280
281 /*
282 * We really need a way to propagate errors for
283 * forced writes like this one. --hch
284 *
285 * (__write_metapage => release_metapage => flush_metapage)
286 */
287 #ifdef _JFS_FIXME
288 if ((rc = flush_metapage(mp))) {
289 /*
290 * the write failed -- this means that the buffer
291 * is still assigned and the blocks are not being
292 * used. this seems like the best error recovery
293 * we can get ...
294 */
295 goto failed;
296 }
297 #else
298 flush_metapage(mp);
299 #endif
300
301 cp += PSIZE;
302 nbytes -= nb;
303 }
304
305 ea->flag = DXD_EXTENT;
306 DXDsize(ea, le32_to_cpu(ealist->size));
307 DXDlength(ea, nblocks);
308 DXDaddress(ea, blkno);
309
310 /* Free up INLINE area */
311 if (ji->ea.flag & DXD_INLINE)
312 ji->mode2 |= INLINEEA;
313
314 return 0;
315
316 failed:
317 dbFree(ip, blkno, nblocks);
318 return rc;
319 }
320
321 /*
322 * NAME: ea_read_inline
323 *
324 * FUNCTION: Read an inlined EA into user's buffer
325 *
326 * PARAMETERS:
327 * ip - Inode pointer
328 * ealist - Pointer to buffer to fill in with EA
329 *
330 * RETURNS: 0
331 */
332 static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
333 {
334 struct jfs_inode_info *ji = JFS_IP(ip);
335 int ea_size = sizeDXD(&ji->ea);
336
337 if (ea_size == 0) {
338 ealist->size = 0;
339 return 0;
340 }
341
342 /* Sanity Check */
343 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
344 return -EIO;
345 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
346 != ea_size)
347 return -EIO;
348
349 memcpy(ealist, ji->i_inline_ea, ea_size);
350 return 0;
351 }
352
353 /*
354 * NAME: ea_read
355 *
356 * FUNCTION: copy EA data into user's buffer
357 *
358 * PARAMETERS:
359 * ip - Inode pointer
360 * ealist - Pointer to buffer to fill in with EA
361 *
362 * NOTES: If EA is inline calls ea_read_inline() to copy EA.
363 *
364 * RETURNS: 0 for success; other indicates failure
365 */
366 static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
367 {
368 struct super_block *sb = ip->i_sb;
369 struct jfs_inode_info *ji = JFS_IP(ip);
370 struct jfs_sb_info *sbi = JFS_SBI(sb);
371 int nblocks;
372 s64 blkno;
373 char *cp = (char *) ealist;
374 int i;
375 int nbytes, nb;
376 s32 bytes_to_read;
377 struct metapage *mp;
378
379 /* quick check for in-line EA */
380 if (ji->ea.flag & DXD_INLINE)
381 return ea_read_inline(ip, ealist);
382
383 nbytes = sizeDXD(&ji->ea);
384 assert(nbytes);
385
386 /*
387 * Figure out how many blocks were allocated when this EA list was
388 * originally written to disk.
389 */
390 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
391 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
392
393 /*
394 * I have found the disk blocks which were originally used to store
395 * the FEALIST. now i loop over each contiguous block copying the
396 * data into the buffer.
397 */
398 for (i = 0; i < nblocks; i += sbi->nbperpage) {
399 /*
400 * Determine how many bytes for this request, and round up to
401 * the nearest aggregate block size
402 */
403 nb = min(PSIZE, nbytes);
404 bytes_to_read =
405 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
406 << sb->s_blocksize_bits;
407
408 if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
409 return -EIO;
410
411 memcpy(cp, mp->data, nb);
412 release_metapage(mp);
413
414 cp += PSIZE;
415 nbytes -= nb;
416 }
417
418 return 0;
419 }
420
421 /*
422 * NAME: ea_get
423 *
424 * FUNCTION: Returns buffer containing existing extended attributes.
425 * The size of the buffer will be the larger of the existing
426 * attributes size, or min_size.
427 *
428 * The buffer, which may be inlined in the inode or in the
429 * page cache must be release by calling ea_release or ea_put
430 *
431 * PARAMETERS:
432 * inode - Inode pointer
433 * ea_buf - Structure to be populated with ealist and its metadata
434 * min_size- minimum size of buffer to be returned
435 *
436 * RETURNS: 0 for success; Other indicates failure
437 */
438 static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
439 {
440 struct jfs_inode_info *ji = JFS_IP(inode);
441 struct super_block *sb = inode->i_sb;
442 int size;
443 int ea_size = sizeDXD(&ji->ea);
444 int blocks_needed, current_blocks;
445 s64 blkno;
446 int rc;
447
448 /* When fsck.jfs clears a bad ea, it doesn't clear the size */
449 if (ji->ea.flag == 0)
450 ea_size = 0;
451
452 if (ea_size == 0) {
453 if (min_size == 0) {
454 ea_buf->flag = 0;
455 ea_buf->max_size = 0;
456 ea_buf->xattr = NULL;
457 return 0;
458 }
459 if ((min_size <= sizeof (ji->i_inline_ea)) &&
460 (ji->mode2 & INLINEEA)) {
461 ea_buf->flag = EA_INLINE | EA_NEW;
462 ea_buf->max_size = sizeof (ji->i_inline_ea);
463 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
464 DXDlength(&ea_buf->new_ea, 0);
465 DXDaddress(&ea_buf->new_ea, 0);
466 ea_buf->new_ea.flag = DXD_INLINE;
467 DXDsize(&ea_buf->new_ea, min_size);
468 return 0;
469 }
470 current_blocks = 0;
471 } else if (ji->ea.flag & DXD_INLINE) {
472 if (min_size <= sizeof (ji->i_inline_ea)) {
473 ea_buf->flag = EA_INLINE;
474 ea_buf->max_size = sizeof (ji->i_inline_ea);
475 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
476 goto size_check;
477 }
478 current_blocks = 0;
479 } else {
480 assert(ji->ea.flag & DXD_EXTENT);
481 current_blocks = (ea_size + sb->s_blocksize - 1) >>
482 sb->s_blocksize_bits;
483 }
484 size = max(min_size, ea_size);
485
486 if (size > PSIZE) {
487 /*
488 * To keep the rest of the code simple. Allocate a
489 * contiguous buffer to work with
490 */
491 ea_buf->xattr = kmalloc(size, GFP_KERNEL);
492 if (ea_buf->xattr == NULL)
493 return -ENOMEM;
494
495 ea_buf->flag |= EA_MALLOC;
496 ea_buf->max_size = (size + sb->s_blocksize - 1) &
497 ~(sb->s_blocksize - 1);
498
499 if (ea_size == 0)
500 return 0;
501
502 if ((rc = ea_read(inode, ea_buf->xattr))) {
503 kfree(ea_buf->xattr);
504 ea_buf->xattr = NULL;
505 return rc;
506 }
507 goto size_check;
508 }
509 blocks_needed = (min_size + sb->s_blocksize - 1) >>
510 sb->s_blocksize_bits;
511
512 if (blocks_needed > current_blocks) {
513 rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
514 &blkno);
515 if (rc)
516 return -rc;
517
518 DXDlength(&ea_buf->new_ea, blocks_needed);
519 DXDaddress(&ea_buf->new_ea, blkno);
520 ea_buf->new_ea.flag = DXD_EXTENT;
521 DXDsize(&ea_buf->new_ea, min_size);
522
523 ea_buf->flag = EA_EXTENT | EA_NEW;
524
525 ea_buf->mp = get_metapage(inode, blkno,
526 blocks_needed << sb->s_blocksize_bits,
527 1);
528 if (ea_buf->mp == NULL) {
529 dbFree(inode, blkno, (s64) blocks_needed);
530 return -EIO;
531 }
532 ea_buf->xattr = ea_buf->mp->data;
533 ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
534 ~(sb->s_blocksize - 1);
535 if (ea_size == 0)
536 return 0;
537 if ((rc = ea_read(inode, ea_buf->xattr))) {
538 discard_metapage(ea_buf->mp);
539 dbFree(inode, blkno, (s64) blocks_needed);
540 return rc;
541 }
542 goto size_check;
543 }
544 ea_buf->flag = EA_EXTENT;
545 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
546 lengthDXD(&ji->ea), 1);
547 if (ea_buf->mp == NULL)
548 return -EIO;
549 ea_buf->xattr = ea_buf->mp->data;
550 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
551 ~(sb->s_blocksize - 1);
552
553 size_check:
554 if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
555 printk(KERN_ERR "ea_get: invalid extended attribute\n");
556 dump_mem("xattr", ea_buf->xattr, ea_size);
557 ea_release(inode, ea_buf);
558 return -EIO;
559 }
560
561 return ea_size;
562 }
563
564 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
565 {
566 if (ea_buf->flag & EA_MALLOC)
567 kfree(ea_buf->xattr);
568 else if (ea_buf->flag & EA_EXTENT) {
569 assert(ea_buf->mp);
570 release_metapage(ea_buf->mp);
571
572 if (ea_buf->flag & EA_NEW)
573 dbFree(inode, addressDXD(&ea_buf->new_ea),
574 lengthDXD(&ea_buf->new_ea));
575 }
576 }
577
578 static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size)
579 {
580 struct jfs_inode_info *ji = JFS_IP(inode);
581 unsigned long old_blocks, new_blocks;
582 int rc = 0;
583 tid_t tid;
584
585 if (new_size == 0) {
586 ea_release(inode, ea_buf);
587 ea_buf = 0;
588 } else if (ea_buf->flag & EA_INLINE) {
589 assert(new_size <= sizeof (ji->i_inline_ea));
590 ji->mode2 &= ~INLINEEA;
591 ea_buf->new_ea.flag = DXD_INLINE;
592 DXDsize(&ea_buf->new_ea, new_size);
593 DXDaddress(&ea_buf->new_ea, 0);
594 DXDlength(&ea_buf->new_ea, 0);
595 } else if (ea_buf->flag & EA_MALLOC) {
596 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
597 kfree(ea_buf->xattr);
598 } else if (ea_buf->flag & EA_NEW) {
599 /* We have already allocated a new dxd */
600 flush_metapage(ea_buf->mp);
601 } else {
602 /* ->xattr must point to original ea's metapage */
603 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
604 discard_metapage(ea_buf->mp);
605 }
606 if (rc)
607 return rc;
608
609 tid = txBegin(inode->i_sb, 0);
610 down(&ji->commit_sem);
611
612 old_blocks = new_blocks = 0;
613
614 if (ji->ea.flag & DXD_EXTENT) {
615 invalidate_dxd_metapages(inode, ji->ea);
616 old_blocks = lengthDXD(&ji->ea);
617 }
618
619 if (ea_buf) {
620 txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
621 if (ea_buf->new_ea.flag & DXD_EXTENT) {
622 new_blocks = lengthDXD(&ea_buf->new_ea);
623 if (ji->ea.flag & DXD_INLINE)
624 ji->mode2 |= INLINEEA;
625 }
626 ji->ea = ea_buf->new_ea;
627 } else {
628 txEA(tid, inode, &ji->ea, 0);
629 if (ji->ea.flag & DXD_INLINE)
630 ji->mode2 |= INLINEEA;
631 ji->ea.flag = 0;
632 ji->ea.size = 0;
633 }
634
635 inode->i_blocks += LBLK2PBLK(inode->i_sb, new_blocks - old_blocks);
636 rc = txCommit(tid, 1, &inode, 0);
637 txEnd(tid);
638 up(&ji->commit_sem);
639
640 return rc;
641 }
642
643 static int can_set_xattr(struct inode *inode, const char *name,
644 void *value, size_t value_len)
645 {
646 if (IS_RDONLY(inode))
647 return -EROFS;
648
649 if (IS_IMMUTABLE(inode) || IS_APPEND(inode) || S_ISLNK(inode->i_mode))
650 return -EPERM;
651
652 if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) &&
653 (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0))
654 return -EOPNOTSUPP;
655
656 if (!S_ISREG(inode->i_mode) &&
657 (!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX))
658 return -EPERM;
659
660 return permission(inode, MAY_WRITE);
661 }
662
663 int __jfs_setxattr(struct inode *inode, const char *name, void *value,
664 size_t value_len, int flags)
665 {
666 struct jfs_ea_list *ealist;
667 struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
668 struct ea_buffer ea_buf;
669 int old_ea_size = 0;
670 int xattr_size;
671 int new_size;
672 int namelen = strlen(name);
673 char *os2name = NULL;
674 int found = 0;
675 int rc;
676 int length;
677
678 if ((rc = can_set_xattr(inode, name, value, value_len)))
679 return rc;
680
681 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
682 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
683 GFP_KERNEL);
684 if (!os2name)
685 return -ENOMEM;
686 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
687 name = os2name;
688 namelen -= XATTR_OS2_PREFIX_LEN;
689 }
690
691 xattr_size = ea_get(inode, &ea_buf, 0);
692 if (xattr_size < 0) {
693 rc = xattr_size;
694 goto out;
695 }
696
697 again:
698 ealist = (struct jfs_ea_list *) ea_buf.xattr;
699 new_size = sizeof (struct jfs_ea_list);
700
701 if (xattr_size) {
702 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
703 ea = NEXT_EA(ea)) {
704 if ((namelen == ea->namelen) &&
705 (memcmp(name, ea->name, namelen) == 0)) {
706 found = 1;
707 if (flags & XATTR_CREATE) {
708 rc = -EEXIST;
709 goto release;
710 }
711 old_ea = ea;
712 old_ea_size = EA_SIZE(ea);
713 next_ea = NEXT_EA(ea);
714 } else
715 new_size += EA_SIZE(ea);
716 }
717 }
718
719 if (!found) {
720 if (flags & XATTR_REPLACE) {
721 rc = -ENODATA;
722 goto release;
723 }
724 if (value == NULL) {
725 rc = 0;
726 goto release;
727 }
728 }
729 if (value)
730 new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
731
732 if (new_size > ea_buf.max_size) {
733 /*
734 * We need to allocate more space for merged ea list.
735 * We should only have loop to again: once.
736 */
737 ea_release(inode, &ea_buf);
738 xattr_size = ea_get(inode, &ea_buf, new_size);
739 if (xattr_size < 0) {
740 rc = xattr_size;
741 goto out;
742 }
743 goto again;
744 }
745
746 /* Remove old ea of the same name */
747 if (found) {
748 /* number of bytes following target EA */
749 length = (char *) END_EALIST(ealist) - (char *) next_ea;
750 if (length > 0)
751 memmove(old_ea, next_ea, length);
752 xattr_size -= old_ea_size;
753 }
754
755 /* Add new entry to the end */
756 if (value) {
757 if (xattr_size == 0)
758 /* Completely new ea list */
759 xattr_size = sizeof (struct jfs_ea_list);
760
761 ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
762 ea->flag = 0;
763 ea->namelen = namelen;
764 ea->valuelen = (cpu_to_le16(value_len));
765 memcpy(ea->name, name, namelen);
766 ea->name[namelen] = 0;
767 if (value_len)
768 memcpy(&ea->name[namelen + 1], value, value_len);
769 xattr_size += EA_SIZE(ea);
770 }
771
772 /* DEBUG - If we did this right, these number match */
773 if (xattr_size != new_size) {
774 printk(KERN_ERR
775 "jfs_xsetattr: xattr_size = %d, new_size = %d\n",
776 xattr_size, new_size);
777
778 rc = -EINVAL;
779 goto release;
780 }
781
782 /*
783 * If we're left with an empty list, there's no ea
784 */
785 if (new_size == sizeof (struct jfs_ea_list))
786 new_size = 0;
787
788 ealist->size = cpu_to_le32(new_size);
789
790 rc = ea_put(inode, &ea_buf, new_size);
791
792 goto out;
793 release:
794 ea_release(inode, &ea_buf);
795 out:
796 if (os2name)
797 kfree(os2name);
798
799 return rc;
800 }
801
802 int jfs_setxattr(struct dentry *dentry, const char *name, void *value,
803 size_t value_len, int flags)
804 {
805 if (value == NULL) { /* empty EA, do not remove */
806 value = "";
807 value_len = 0;
808 }
809
810 return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags);
811 }
812
813 static inline int can_get_xattr(struct inode *inode, const char *name)
814 {
815 return permission(inode, MAY_READ);
816 }
817
818 ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
819 size_t buf_size)
820 {
821 struct jfs_ea_list *ealist;
822 struct jfs_ea *ea;
823 struct ea_buffer ea_buf;
824 int xattr_size;
825 ssize_t size;
826 int namelen = strlen(name);
827 char *os2name = NULL;
828 int rc;
829 char *value;
830
831 if ((rc = can_get_xattr(inode, name)))
832 return rc;
833
834 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
835 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
836 GFP_KERNEL);
837 if (!os2name)
838 return -ENOMEM;
839 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
840 name = os2name;
841 namelen -= XATTR_OS2_PREFIX_LEN;
842 }
843
844 xattr_size = ea_get(inode, &ea_buf, 0);
845 if (xattr_size < 0) {
846 size = xattr_size;
847 goto out;
848 }
849
850 if (xattr_size == 0)
851 goto not_found;
852
853 ealist = (struct jfs_ea_list *) ea_buf.xattr;
854
855 /* Find the named attribute */
856 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
857 if ((namelen == ea->namelen) &&
858 memcmp(name, ea->name, namelen) == 0) {
859 /* Found it */
860 size = le16_to_cpu(ea->valuelen);
861 if (!data)
862 goto release;
863 else if (size > buf_size) {
864 size = -ERANGE;
865 goto release;
866 }
867 value = ((char *) &ea->name) + ea->namelen + 1;
868 memcpy(data, value, size);
869 goto release;
870 }
871 not_found:
872 size = -ENODATA;
873 release:
874 ea_release(inode, &ea_buf);
875 out:
876 if (os2name)
877 kfree(os2name);
878
879 return size;
880 }
881
882 ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
883 size_t buf_size)
884 {
885 return __jfs_getxattr(dentry->d_inode, name, data, buf_size);
886 }
887
888 ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
889 {
890 struct inode *inode = dentry->d_inode;
891 char *buffer;
892 ssize_t size = 0;
893 int xattr_size;
894 struct jfs_ea_list *ealist;
895 struct jfs_ea *ea;
896 struct ea_buffer ea_buf;
897
898 xattr_size = ea_get(inode, &ea_buf, 0);
899 if (xattr_size < 0) {
900 size = xattr_size;
901 goto out;
902 }
903
904 if (xattr_size == 0)
905 goto release;
906
907 ealist = (struct jfs_ea_list *) ea_buf.xattr;
908
909 /* compute required size of list */
910 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
911 size += name_size(ea) + 1;
912
913 if (!data)
914 goto release;
915
916 if (size > buf_size) {
917 size = -ERANGE;
918 goto release;
919 }
920
921 /* Copy attribute names to buffer */
922 buffer = data;
923 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
924 int namelen = copy_name(buffer, ea);
925 buffer += namelen + 1;
926 }
927
928 release:
929 ea_release(inode, &ea_buf);
930 out:
931 return size;
932 }
933
934 int jfs_removexattr(struct dentry *dentry, const char *name)
935 {
936 return __jfs_setxattr(dentry->d_inode, name, 0, 0, XATTR_REPLACE);
937 }
Cache object: eaf3d3c97f694c0451e12c6dcb3bbe17
|