Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

landlock: Fix same-layer rule unions

The original behavior was to check if the full set of requested accesses
was allowed by at least a rule of every relevant layer. This didn't
take into account requests for multiple accesses and same-layer rules
allowing the union of these accesses in a complementary way. As a
result, multiple accesses requested on a file hierarchy matching rules
that, together, allowed these accesses, but without a unique rule
allowing all of them, was illegitimately denied. This case should be
rare in practice and it can only be triggered by the path_rename or
file_open hook implementations.

For instance, if, for the same layer, a rule allows execution
beneath /a/b and another rule allows read beneath /a, requesting access
to read and execute at the same time for /a/b should be allowed for this
layer.

This was an inconsistency because the union of same-layer rule accesses
was already allowed if requested once at a time anyway.

This fix changes the way allowed accesses are gathered over a path walk.
To take into account all these rule accesses, we store in a matrix all
layer granting the set of requested accesses, according to the handled
accesses. To avoid heap allocation, we use an array on the stack which
is 2*13 bytes. A following commit bringing the LANDLOCK_ACCESS_FS_REFER
access right will increase this size to reach 112 bytes (2*14*4) in case
of link or rename actions.

Add a new layout1.layer_rule_unions test to check that accesses from
different rules pertaining to the same layer are ORed in a file
hierarchy. Also test that it is not the case for rules from different
layers.

Reviewed-by: Paul Moore <paul@paul-moore.com>
Link: https://lore.kernel.org/r/20220506161102.525323-5-mic@digikod.net
Cc: stable@vger.kernel.org
Signed-off-by: Mickaël Salaün <mic@digikod.net>

+161 -26
+52 -26
security/landlock/fs.c
··· 207 207 return rule; 208 208 } 209 209 210 - static inline layer_mask_t unmask_layers(const struct landlock_rule *const rule, 211 - const access_mask_t access_request, 212 - layer_mask_t layer_mask) 210 + /* 211 + * @layer_masks is read and may be updated according to the access request and 212 + * the matching rule. 213 + * 214 + * Returns true if the request is allowed (i.e. relevant layer masks for the 215 + * request are empty). 216 + */ 217 + static inline bool 218 + unmask_layers(const struct landlock_rule *const rule, 219 + const access_mask_t access_request, 220 + layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS]) 213 221 { 214 222 size_t layer_level; 215 223 224 + if (!access_request || !layer_masks) 225 + return true; 216 226 if (!rule) 217 - return layer_mask; 227 + return false; 218 228 219 229 /* 220 230 * An access is granted if, for each policy layer, at least one rule 221 - * encountered on the pathwalk grants the requested accesses, 222 - * regardless of their position in the layer stack. We must then check 231 + * encountered on the pathwalk grants the requested access, 232 + * regardless of its position in the layer stack. We must then check 223 233 * the remaining layers for each inode, from the first added layer to 224 - * the last one. 234 + * the last one. When there is multiple requested accesses, for each 235 + * policy layer, the full set of requested accesses may not be granted 236 + * by only one rule, but by the union (binary OR) of multiple rules. 237 + * E.g. /a/b <execute> + /a <read> => /a/b <execute + read> 225 238 */ 226 239 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) { 227 240 const struct landlock_layer *const layer = 228 241 &rule->layers[layer_level]; 229 242 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1); 243 + const unsigned long access_req = access_request; 244 + unsigned long access_bit; 245 + bool is_empty; 230 246 231 - /* Checks that the layer grants access to the full request. */ 232 - if ((layer->access & access_request) == access_request) { 233 - layer_mask &= ~layer_bit; 234 - 235 - if (layer_mask == 0) 236 - return layer_mask; 247 + /* 248 + * Records in @layer_masks which layer grants access to each 249 + * requested access. 250 + */ 251 + is_empty = true; 252 + for_each_set_bit(access_bit, &access_req, 253 + ARRAY_SIZE(*layer_masks)) { 254 + if (layer->access & BIT_ULL(access_bit)) 255 + (*layer_masks)[access_bit] &= ~layer_bit; 256 + is_empty = is_empty && !(*layer_masks)[access_bit]; 237 257 } 258 + if (is_empty) 259 + return true; 238 260 } 239 - return layer_mask; 261 + return false; 240 262 } 241 263 242 264 static int check_access_path(const struct landlock_ruleset *const domain, 243 265 const struct path *const path, 244 266 const access_mask_t access_request) 245 267 { 246 - bool allowed = false; 268 + layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {}; 269 + bool allowed = false, has_access = false; 247 270 struct path walker_path; 248 - layer_mask_t layer_mask; 249 271 size_t i; 250 272 251 273 if (!access_request) ··· 287 265 return -EACCES; 288 266 289 267 /* Saves all layers handling a subset of requested accesses. */ 290 - layer_mask = 0; 291 268 for (i = 0; i < domain->num_layers; i++) { 292 - if (domain->fs_access_masks[i] & access_request) 293 - layer_mask |= BIT_ULL(i); 269 + const unsigned long access_req = access_request; 270 + unsigned long access_bit; 271 + 272 + for_each_set_bit(access_bit, &access_req, 273 + ARRAY_SIZE(layer_masks)) { 274 + if (domain->fs_access_masks[i] & BIT_ULL(access_bit)) { 275 + layer_masks[access_bit] |= BIT_ULL(i); 276 + has_access = true; 277 + } 278 + } 294 279 } 295 280 /* An access request not handled by the domain is allowed. */ 296 - if (layer_mask == 0) 281 + if (!has_access) 297 282 return 0; 298 283 299 284 walker_path = *path; ··· 312 283 while (true) { 313 284 struct dentry *parent_dentry; 314 285 315 - layer_mask = 316 - unmask_layers(find_rule(domain, walker_path.dentry), 317 - access_request, layer_mask); 318 - if (layer_mask == 0) { 286 + allowed = unmask_layers(find_rule(domain, walker_path.dentry), 287 + access_request, &layer_masks); 288 + if (allowed) 319 289 /* Stops when a rule from each layer grants access. */ 320 - allowed = true; 321 290 break; 322 - } 323 291 324 292 jump_up: 325 293 if (walker_path.dentry == walker_path.mnt->mnt_root) {
+2
security/landlock/ruleset.h
··· 22 22 typedef u16 access_mask_t; 23 23 /* Makes sure all filesystem access rights can be stored. */ 24 24 static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS); 25 + /* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */ 26 + static_assert(sizeof(unsigned long) >= sizeof(access_mask_t)); 25 27 26 28 typedef u16 layer_mask_t; 27 29 /* Makes sure all layers can be checked. */
+107
tools/testing/selftests/landlock/fs_test.c
··· 758 758 ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); 759 759 } 760 760 761 + TEST_F_FORK(layout1, layer_rule_unions) 762 + { 763 + const struct rule layer1[] = { 764 + { 765 + .path = dir_s1d2, 766 + .access = LANDLOCK_ACCESS_FS_READ_FILE, 767 + }, 768 + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ 769 + { 770 + .path = dir_s1d3, 771 + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, 772 + }, 773 + {}, 774 + }; 775 + const struct rule layer2[] = { 776 + /* Doesn't change anything from layer1. */ 777 + { 778 + .path = dir_s1d2, 779 + .access = LANDLOCK_ACCESS_FS_READ_FILE | 780 + LANDLOCK_ACCESS_FS_WRITE_FILE, 781 + }, 782 + {}, 783 + }; 784 + const struct rule layer3[] = { 785 + /* Only allows write (but not read) to dir_s1d3. */ 786 + { 787 + .path = dir_s1d2, 788 + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, 789 + }, 790 + {}, 791 + }; 792 + int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1); 793 + 794 + ASSERT_LE(0, ruleset_fd); 795 + enforce_ruleset(_metadata, ruleset_fd); 796 + ASSERT_EQ(0, close(ruleset_fd)); 797 + 798 + /* Checks s1d1 hierarchy with layer1. */ 799 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); 800 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); 801 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); 802 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 803 + 804 + /* Checks s1d2 hierarchy with layer1. */ 805 + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); 806 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); 807 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); 808 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 809 + 810 + /* Checks s1d3 hierarchy with layer1. */ 811 + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); 812 + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); 813 + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ 814 + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); 815 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 816 + 817 + /* Doesn't change anything from layer1. */ 818 + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2); 819 + ASSERT_LE(0, ruleset_fd); 820 + enforce_ruleset(_metadata, ruleset_fd); 821 + ASSERT_EQ(0, close(ruleset_fd)); 822 + 823 + /* Checks s1d1 hierarchy with layer2. */ 824 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); 825 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); 826 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); 827 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 828 + 829 + /* Checks s1d2 hierarchy with layer2. */ 830 + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); 831 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); 832 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); 833 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 834 + 835 + /* Checks s1d3 hierarchy with layer2. */ 836 + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); 837 + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); 838 + /* dir_s1d3 should allow READ_FILE and WRITE_FILE (O_RDWR). */ 839 + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); 840 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 841 + 842 + /* Only allows write (but not read) to dir_s1d3. */ 843 + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3); 844 + ASSERT_LE(0, ruleset_fd); 845 + enforce_ruleset(_metadata, ruleset_fd); 846 + ASSERT_EQ(0, close(ruleset_fd)); 847 + 848 + /* Checks s1d1 hierarchy with layer3. */ 849 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); 850 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); 851 + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); 852 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 853 + 854 + /* Checks s1d2 hierarchy with layer3. */ 855 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY)); 856 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); 857 + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); 858 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 859 + 860 + /* Checks s1d3 hierarchy with layer3. */ 861 + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY)); 862 + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); 863 + /* dir_s1d3 should now deny READ_FILE and WRITE_FILE (O_RDWR). */ 864 + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDWR)); 865 + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); 866 + } 867 + 761 868 TEST_F_FORK(layout1, non_overlapping_accesses) 762 869 { 763 870 const struct rule layer1[] = {