Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/drivers/staging/erofs/xattr.c
4 *
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13#include <linux/security.h>
14#include "xattr.h"
15
16struct xattr_iter {
17 struct super_block *sb;
18 struct page *page;
19 void *kaddr;
20
21 erofs_blk_t blkaddr;
22 unsigned int ofs;
23};
24
25static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
26{
27 /* the only user of kunmap() is 'init_inode_xattrs' */
28 if (unlikely(!atomic))
29 kunmap(it->page);
30 else
31 kunmap_atomic(it->kaddr);
32
33 unlock_page(it->page);
34 put_page(it->page);
35}
36
37static inline void xattr_iter_end_final(struct xattr_iter *it)
38{
39 if (it->page == NULL)
40 return;
41
42 xattr_iter_end(it, true);
43}
44
45static int init_inode_xattrs(struct inode *inode)
46{
47 struct erofs_vnode *const vi = EROFS_V(inode);
48 struct xattr_iter it;
49 unsigned int i;
50 struct erofs_xattr_ibody_header *ih;
51 struct super_block *sb;
52 struct erofs_sb_info *sbi;
53 bool atomic_map;
54 int ret = 0;
55
56 /* the most case is that xattrs of this inode are initialized. */
57 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
58 return 0;
59
60 if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
61 return -ERESTARTSYS;
62
63 /* someone has initialized xattrs for us? */
64 if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
65 goto out_unlock;
66
67 /*
68 * bypass all xattr operations if ->xattr_isize is not greater than
69 * sizeof(struct erofs_xattr_ibody_header), in detail:
70 * 1) it is not enough to contain erofs_xattr_ibody_header then
71 * ->xattr_isize should be 0 (it means no xattr);
72 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
73 * undefined right now (maybe use later with some new sb feature).
74 */
75 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
76 errln("xattr_isize %d of nid %llu is not supported yet",
77 vi->xattr_isize, vi->nid);
78 ret = -ENOTSUPP;
79 goto out_unlock;
80 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
81 if (unlikely(vi->xattr_isize)) {
82 DBG_BUGON(1);
83 ret = -EIO;
84 goto out_unlock; /* xattr ondisk layout error */
85 }
86 ret = -ENOATTR;
87 goto out_unlock;
88 }
89
90 sb = inode->i_sb;
91 sbi = EROFS_SB(sb);
92 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
93 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
94
95 it.page = erofs_get_inline_page(inode, it.blkaddr);
96 if (IS_ERR(it.page)) {
97 ret = PTR_ERR(it.page);
98 goto out_unlock;
99 }
100
101 /* read in shared xattr array (non-atomic, see kmalloc below) */
102 it.kaddr = kmap(it.page);
103 atomic_map = false;
104
105 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
106
107 vi->xattr_shared_count = ih->h_shared_count;
108 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
109 sizeof(uint), GFP_KERNEL);
110 if (vi->xattr_shared_xattrs == NULL) {
111 xattr_iter_end(&it, atomic_map);
112 ret = -ENOMEM;
113 goto out_unlock;
114 }
115
116 /* let's skip ibody header */
117 it.ofs += sizeof(struct erofs_xattr_ibody_header);
118
119 for (i = 0; i < vi->xattr_shared_count; ++i) {
120 if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
121 /* cannot be unaligned */
122 BUG_ON(it.ofs != EROFS_BLKSIZ);
123 xattr_iter_end(&it, atomic_map);
124
125 it.page = erofs_get_meta_page(sb,
126 ++it.blkaddr, S_ISDIR(inode->i_mode));
127 if (IS_ERR(it.page)) {
128 kfree(vi->xattr_shared_xattrs);
129 vi->xattr_shared_xattrs = NULL;
130 ret = PTR_ERR(it.page);
131 goto out_unlock;
132 }
133
134 it.kaddr = kmap_atomic(it.page);
135 atomic_map = true;
136 it.ofs = 0;
137 }
138 vi->xattr_shared_xattrs[i] =
139 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
140 it.ofs += sizeof(__le32);
141 }
142 xattr_iter_end(&it, atomic_map);
143
144 set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
145
146out_unlock:
147 clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
148 return ret;
149}
150
151/*
152 * the general idea for these return values is
153 * if 0 is returned, go on processing the current xattr;
154 * 1 (> 0) is returned, skip this round to process the next xattr;
155 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
156 * and need to be handled
157 */
158struct xattr_iter_handlers {
159 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
160 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
161 unsigned int len);
162 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
163 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
164 unsigned int len);
165};
166
167static inline int xattr_iter_fixup(struct xattr_iter *it)
168{
169 if (it->ofs < EROFS_BLKSIZ)
170 return 0;
171
172 xattr_iter_end(it, true);
173
174 it->blkaddr += erofs_blknr(it->ofs);
175
176 it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
177 if (IS_ERR(it->page)) {
178 int err = PTR_ERR(it->page);
179
180 it->page = NULL;
181 return err;
182 }
183
184 it->kaddr = kmap_atomic(it->page);
185 it->ofs = erofs_blkoff(it->ofs);
186 return 0;
187}
188
189static int inline_xattr_iter_begin(struct xattr_iter *it,
190 struct inode *inode)
191{
192 struct erofs_vnode *const vi = EROFS_V(inode);
193 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
194 unsigned int xattr_header_sz, inline_xattr_ofs;
195
196 xattr_header_sz = inlinexattr_header_size(inode);
197 if (unlikely(xattr_header_sz >= vi->xattr_isize)) {
198 BUG_ON(xattr_header_sz > vi->xattr_isize);
199 return -ENOATTR;
200 }
201
202 inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
203
204 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
205 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
206
207 it->page = erofs_get_inline_page(inode, it->blkaddr);
208 if (IS_ERR(it->page))
209 return PTR_ERR(it->page);
210
211 it->kaddr = kmap_atomic(it->page);
212 return vi->xattr_isize - xattr_header_sz;
213}
214
215/*
216 * Regardless of success or failure, `xattr_foreach' will end up with
217 * `ofs' pointing to the next xattr item rather than an arbitrary position.
218 */
219static int xattr_foreach(struct xattr_iter *it,
220 const struct xattr_iter_handlers *op, unsigned int *tlimit)
221{
222 struct erofs_xattr_entry entry;
223 unsigned int value_sz, processed, slice;
224 int err;
225
226 /* 0. fixup blkaddr, ofs, ipage */
227 err = xattr_iter_fixup(it);
228 if (err)
229 return err;
230
231 /*
232 * 1. read xattr entry to the memory,
233 * since we do EROFS_XATTR_ALIGN
234 * therefore entry should be in the page
235 */
236 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
237 if (tlimit != NULL) {
238 unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry);
239
240 BUG_ON(*tlimit < entry_sz);
241 *tlimit -= entry_sz;
242 }
243
244 it->ofs += sizeof(struct erofs_xattr_entry);
245 value_sz = le16_to_cpu(entry.e_value_size);
246
247 /* handle entry */
248 err = op->entry(it, &entry);
249 if (err) {
250 it->ofs += entry.e_name_len + value_sz;
251 goto out;
252 }
253
254 /* 2. handle xattr name (ofs will finally be at the end of name) */
255 processed = 0;
256
257 while (processed < entry.e_name_len) {
258 if (it->ofs >= EROFS_BLKSIZ) {
259 BUG_ON(it->ofs > EROFS_BLKSIZ);
260
261 err = xattr_iter_fixup(it);
262 if (err)
263 goto out;
264 it->ofs = 0;
265 }
266
267 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
268 entry.e_name_len - processed);
269
270 /* handle name */
271 err = op->name(it, processed, it->kaddr + it->ofs, slice);
272 if (err) {
273 it->ofs += entry.e_name_len - processed + value_sz;
274 goto out;
275 }
276
277 it->ofs += slice;
278 processed += slice;
279 }
280
281 /* 3. handle xattr value */
282 processed = 0;
283
284 if (op->alloc_buffer != NULL) {
285 err = op->alloc_buffer(it, value_sz);
286 if (err) {
287 it->ofs += value_sz;
288 goto out;
289 }
290 }
291
292 while (processed < value_sz) {
293 if (it->ofs >= EROFS_BLKSIZ) {
294 BUG_ON(it->ofs > EROFS_BLKSIZ);
295
296 err = xattr_iter_fixup(it);
297 if (err)
298 goto out;
299 it->ofs = 0;
300 }
301
302 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
303 value_sz - processed);
304 op->value(it, processed, it->kaddr + it->ofs, slice);
305 it->ofs += slice;
306 processed += slice;
307 }
308
309out:
310 /* xattrs should be 4-byte aligned (on-disk constraint) */
311 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
312 return err < 0 ? err : 0;
313}
314
315struct getxattr_iter {
316 struct xattr_iter it;
317
318 char *buffer;
319 int buffer_size, index;
320 struct qstr name;
321};
322
323static int xattr_entrymatch(struct xattr_iter *_it,
324 struct erofs_xattr_entry *entry)
325{
326 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
327
328 return (it->index != entry->e_name_index ||
329 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
330}
331
332static int xattr_namematch(struct xattr_iter *_it,
333 unsigned int processed, char *buf, unsigned int len)
334{
335 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
336
337 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
338}
339
340static int xattr_checkbuffer(struct xattr_iter *_it,
341 unsigned int value_sz)
342{
343 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
344 int err = it->buffer_size < value_sz ? -ERANGE : 0;
345
346 it->buffer_size = value_sz;
347 return it->buffer == NULL ? 1 : err;
348}
349
350static void xattr_copyvalue(struct xattr_iter *_it,
351 unsigned int processed, char *buf, unsigned int len)
352{
353 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
354
355 memcpy(it->buffer + processed, buf, len);
356}
357
358static const struct xattr_iter_handlers find_xattr_handlers = {
359 .entry = xattr_entrymatch,
360 .name = xattr_namematch,
361 .alloc_buffer = xattr_checkbuffer,
362 .value = xattr_copyvalue
363};
364
365static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
366{
367 int ret;
368 unsigned int remaining;
369
370 ret = inline_xattr_iter_begin(&it->it, inode);
371 if (ret < 0)
372 return ret;
373
374 remaining = ret;
375 while (remaining) {
376 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
377 if (ret != -ENOATTR)
378 break;
379 }
380 xattr_iter_end_final(&it->it);
381
382 return ret ? ret : it->buffer_size;
383}
384
385static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
386{
387 struct erofs_vnode *const vi = EROFS_V(inode);
388 struct super_block *const sb = inode->i_sb;
389 struct erofs_sb_info *const sbi = EROFS_SB(sb);
390 unsigned int i;
391 int ret = -ENOATTR;
392
393 for (i = 0; i < vi->xattr_shared_count; ++i) {
394 erofs_blk_t blkaddr =
395 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
396
397 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
398
399 if (!i || blkaddr != it->it.blkaddr) {
400 if (i)
401 xattr_iter_end(&it->it, true);
402
403 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
404 if (IS_ERR(it->it.page))
405 return PTR_ERR(it->it.page);
406
407 it->it.kaddr = kmap_atomic(it->it.page);
408 it->it.blkaddr = blkaddr;
409 }
410
411 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
412 if (ret != -ENOATTR)
413 break;
414 }
415 if (vi->xattr_shared_count)
416 xattr_iter_end_final(&it->it);
417
418 return ret ? ret : it->buffer_size;
419}
420
421static bool erofs_xattr_user_list(struct dentry *dentry)
422{
423 return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
424}
425
426static bool erofs_xattr_trusted_list(struct dentry *dentry)
427{
428 return capable(CAP_SYS_ADMIN);
429}
430
431int erofs_getxattr(struct inode *inode, int index,
432 const char *name,
433 void *buffer, size_t buffer_size)
434{
435 int ret;
436 struct getxattr_iter it;
437
438 if (unlikely(name == NULL))
439 return -EINVAL;
440
441 ret = init_inode_xattrs(inode);
442 if (ret)
443 return ret;
444
445 it.index = index;
446
447 it.name.len = strlen(name);
448 if (it.name.len > EROFS_NAME_LEN)
449 return -ERANGE;
450 it.name.name = name;
451
452 it.buffer = buffer;
453 it.buffer_size = buffer_size;
454
455 it.it.sb = inode->i_sb;
456 ret = inline_getxattr(inode, &it);
457 if (ret == -ENOATTR)
458 ret = shared_getxattr(inode, &it);
459 return ret;
460}
461
462static int erofs_xattr_generic_get(const struct xattr_handler *handler,
463 struct dentry *unused, struct inode *inode,
464 const char *name, void *buffer, size_t size)
465{
466 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
467
468 switch (handler->flags) {
469 case EROFS_XATTR_INDEX_USER:
470 if (!test_opt(sbi, XATTR_USER))
471 return -EOPNOTSUPP;
472 break;
473 case EROFS_XATTR_INDEX_TRUSTED:
474 if (!capable(CAP_SYS_ADMIN))
475 return -EPERM;
476 break;
477 case EROFS_XATTR_INDEX_SECURITY:
478 break;
479 default:
480 return -EINVAL;
481 }
482
483 return erofs_getxattr(inode, handler->flags, name, buffer, size);
484}
485
486const struct xattr_handler erofs_xattr_user_handler = {
487 .prefix = XATTR_USER_PREFIX,
488 .flags = EROFS_XATTR_INDEX_USER,
489 .list = erofs_xattr_user_list,
490 .get = erofs_xattr_generic_get,
491};
492
493const struct xattr_handler erofs_xattr_trusted_handler = {
494 .prefix = XATTR_TRUSTED_PREFIX,
495 .flags = EROFS_XATTR_INDEX_TRUSTED,
496 .list = erofs_xattr_trusted_list,
497 .get = erofs_xattr_generic_get,
498};
499
500#ifdef CONFIG_EROFS_FS_SECURITY
501const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
502 .prefix = XATTR_SECURITY_PREFIX,
503 .flags = EROFS_XATTR_INDEX_SECURITY,
504 .get = erofs_xattr_generic_get,
505};
506#endif
507
508const struct xattr_handler *erofs_xattr_handlers[] = {
509 &erofs_xattr_user_handler,
510#ifdef CONFIG_EROFS_FS_POSIX_ACL
511 &posix_acl_access_xattr_handler,
512 &posix_acl_default_xattr_handler,
513#endif
514 &erofs_xattr_trusted_handler,
515#ifdef CONFIG_EROFS_FS_SECURITY
516 &erofs_xattr_security_handler,
517#endif
518 NULL,
519};
520
521struct listxattr_iter {
522 struct xattr_iter it;
523
524 struct dentry *dentry;
525 char *buffer;
526 int buffer_size, buffer_ofs;
527};
528
529static int xattr_entrylist(struct xattr_iter *_it,
530 struct erofs_xattr_entry *entry)
531{
532 struct listxattr_iter *it =
533 container_of(_it, struct listxattr_iter, it);
534 unsigned int prefix_len;
535 const char *prefix;
536
537 const struct xattr_handler *h =
538 erofs_xattr_handler(entry->e_name_index);
539
540 if (h == NULL || (h->list != NULL && !h->list(it->dentry)))
541 return 1;
542
543 prefix = xattr_prefix(h);
544 prefix_len = strlen(prefix);
545
546 if (it->buffer == NULL) {
547 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
548 return 1;
549 }
550
551 if (it->buffer_ofs + prefix_len
552 + entry->e_name_len + 1 > it->buffer_size)
553 return -ERANGE;
554
555 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
556 it->buffer_ofs += prefix_len;
557 return 0;
558}
559
560static int xattr_namelist(struct xattr_iter *_it,
561 unsigned int processed, char *buf, unsigned int len)
562{
563 struct listxattr_iter *it =
564 container_of(_it, struct listxattr_iter, it);
565
566 memcpy(it->buffer + it->buffer_ofs, buf, len);
567 it->buffer_ofs += len;
568 return 0;
569}
570
571static int xattr_skipvalue(struct xattr_iter *_it,
572 unsigned int value_sz)
573{
574 struct listxattr_iter *it =
575 container_of(_it, struct listxattr_iter, it);
576
577 it->buffer[it->buffer_ofs++] = '\0';
578 return 1;
579}
580
581static const struct xattr_iter_handlers list_xattr_handlers = {
582 .entry = xattr_entrylist,
583 .name = xattr_namelist,
584 .alloc_buffer = xattr_skipvalue,
585 .value = NULL
586};
587
588static int inline_listxattr(struct listxattr_iter *it)
589{
590 int ret;
591 unsigned int remaining;
592
593 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
594 if (ret < 0)
595 return ret;
596
597 remaining = ret;
598 while (remaining) {
599 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
600 if (ret)
601 break;
602 }
603 xattr_iter_end_final(&it->it);
604 return ret ? ret : it->buffer_ofs;
605}
606
607static int shared_listxattr(struct listxattr_iter *it)
608{
609 struct inode *const inode = d_inode(it->dentry);
610 struct erofs_vnode *const vi = EROFS_V(inode);
611 struct super_block *const sb = inode->i_sb;
612 struct erofs_sb_info *const sbi = EROFS_SB(sb);
613 unsigned int i;
614 int ret = 0;
615
616 for (i = 0; i < vi->xattr_shared_count; ++i) {
617 erofs_blk_t blkaddr =
618 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
619
620 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
621 if (!i || blkaddr != it->it.blkaddr) {
622 if (i)
623 xattr_iter_end(&it->it, true);
624
625 it->it.page = erofs_get_meta_page(sb, blkaddr, false);
626 if (IS_ERR(it->it.page))
627 return PTR_ERR(it->it.page);
628
629 it->it.kaddr = kmap_atomic(it->it.page);
630 it->it.blkaddr = blkaddr;
631 }
632
633 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
634 if (ret)
635 break;
636 }
637 if (vi->xattr_shared_count)
638 xattr_iter_end_final(&it->it);
639
640 return ret ? ret : it->buffer_ofs;
641}
642
643ssize_t erofs_listxattr(struct dentry *dentry,
644 char *buffer, size_t buffer_size)
645{
646 int ret;
647 struct listxattr_iter it;
648
649 ret = init_inode_xattrs(d_inode(dentry));
650 if (ret)
651 return ret;
652
653 it.dentry = dentry;
654 it.buffer = buffer;
655 it.buffer_size = buffer_size;
656 it.buffer_ofs = 0;
657
658 it.it.sb = dentry->d_sb;
659
660 ret = inline_listxattr(&it);
661 if (ret < 0 && ret != -ENOATTR)
662 return ret;
663 return shared_listxattr(&it);
664}
665
666#ifdef CONFIG_EROFS_FS_POSIX_ACL
667struct posix_acl *erofs_get_acl(struct inode *inode, int type)
668{
669 struct posix_acl *acl;
670 int prefix, rc;
671 char *value = NULL;
672
673 switch (type) {
674 case ACL_TYPE_ACCESS:
675 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
676 break;
677 case ACL_TYPE_DEFAULT:
678 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
679 break;
680 default:
681 return ERR_PTR(-EINVAL);
682 }
683
684 rc = erofs_getxattr(inode, prefix, "", NULL, 0);
685 if (rc > 0) {
686 value = kmalloc(rc, GFP_KERNEL);
687 if (!value)
688 return ERR_PTR(-ENOMEM);
689 rc = erofs_getxattr(inode, prefix, "", value, rc);
690 }
691
692 if (rc == -ENOATTR)
693 acl = NULL;
694 else if (rc < 0)
695 acl = ERR_PTR(rc);
696 else
697 acl = posix_acl_from_xattr(&init_user_ns, value, rc);
698 kfree(value);
699 return acl;
700}
701#endif
702