Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/hfsplus/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/fs.h>
16#include <linux/swap.h>
17
18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h"
20
21
22/* Copy a specified range of bytes from the raw data of a node */
23void hfs_bnode_read(struct hfs_bnode *node, void *buf, u32 off, u32 len)
24{
25 struct page **pagep;
26 u32 l;
27
28 if (!is_bnode_offset_valid(node, off))
29 return;
30
31 if (len == 0) {
32 pr_err("requested zero length: "
33 "NODE: id %u, type %#x, height %u, "
34 "node_size %u, offset %u, len %u\n",
35 node->this, node->type, node->height,
36 node->tree->node_size, off, len);
37 return;
38 }
39
40 len = check_and_correct_requested_length(node, off, len);
41
42 off += node->page_offset;
43 pagep = node->page + (off >> PAGE_SHIFT);
44 off &= ~PAGE_MASK;
45
46 l = min_t(u32, len, PAGE_SIZE - off);
47 memcpy_from_page(buf, *pagep, off, l);
48
49 while ((len -= l) != 0) {
50 buf += l;
51 l = min_t(u32, len, PAGE_SIZE);
52 memcpy_from_page(buf, *++pagep, 0, l);
53 }
54}
55
56u16 hfs_bnode_read_u16(struct hfs_bnode *node, u32 off)
57{
58 __be16 data;
59 /* TODO: optimize later... */
60 hfs_bnode_read(node, &data, off, 2);
61 return be16_to_cpu(data);
62}
63
64u8 hfs_bnode_read_u8(struct hfs_bnode *node, u32 off)
65{
66 u8 data;
67 /* TODO: optimize later... */
68 hfs_bnode_read(node, &data, off, 1);
69 return data;
70}
71
72void hfs_bnode_read_key(struct hfs_bnode *node, void *key, u32 off)
73{
74 struct hfs_btree *tree;
75 u32 key_len;
76
77 tree = node->tree;
78 if (node->type == HFS_NODE_LEAF ||
79 tree->attributes & HFS_TREE_VARIDXKEYS ||
80 node->tree->cnid == HFSPLUS_ATTR_CNID)
81 key_len = hfs_bnode_read_u16(node, off) + 2;
82 else
83 key_len = tree->max_key_len + 2;
84
85 if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
86 memset(key, 0, sizeof(hfsplus_btree_key));
87 pr_err("hfsplus: Invalid key length: %u\n", key_len);
88 return;
89 }
90
91 hfs_bnode_read(node, key, off, key_len);
92}
93
94void hfs_bnode_write(struct hfs_bnode *node, void *buf, u32 off, u32 len)
95{
96 struct page **pagep;
97 u32 l;
98
99 if (!is_bnode_offset_valid(node, off))
100 return;
101
102 if (len == 0) {
103 pr_err("requested zero length: "
104 "NODE: id %u, type %#x, height %u, "
105 "node_size %u, offset %u, len %u\n",
106 node->this, node->type, node->height,
107 node->tree->node_size, off, len);
108 return;
109 }
110
111 len = check_and_correct_requested_length(node, off, len);
112
113 off += node->page_offset;
114 pagep = node->page + (off >> PAGE_SHIFT);
115 off &= ~PAGE_MASK;
116
117 l = min_t(u32, len, PAGE_SIZE - off);
118 memcpy_to_page(*pagep, off, buf, l);
119 set_page_dirty(*pagep);
120
121 while ((len -= l) != 0) {
122 buf += l;
123 l = min_t(u32, len, PAGE_SIZE);
124 memcpy_to_page(*++pagep, 0, buf, l);
125 set_page_dirty(*pagep);
126 }
127}
128
129void hfs_bnode_write_u16(struct hfs_bnode *node, u32 off, u16 data)
130{
131 __be16 v = cpu_to_be16(data);
132 /* TODO: optimize later... */
133 hfs_bnode_write(node, &v, off, 2);
134}
135
136void hfs_bnode_clear(struct hfs_bnode *node, u32 off, u32 len)
137{
138 struct page **pagep;
139 u32 l;
140
141 if (!is_bnode_offset_valid(node, off))
142 return;
143
144 if (len == 0) {
145 pr_err("requested zero length: "
146 "NODE: id %u, type %#x, height %u, "
147 "node_size %u, offset %u, len %u\n",
148 node->this, node->type, node->height,
149 node->tree->node_size, off, len);
150 return;
151 }
152
153 len = check_and_correct_requested_length(node, off, len);
154
155 off += node->page_offset;
156 pagep = node->page + (off >> PAGE_SHIFT);
157 off &= ~PAGE_MASK;
158
159 l = min_t(u32, len, PAGE_SIZE - off);
160 memzero_page(*pagep, off, l);
161 set_page_dirty(*pagep);
162
163 while ((len -= l) != 0) {
164 l = min_t(u32, len, PAGE_SIZE);
165 memzero_page(*++pagep, 0, l);
166 set_page_dirty(*pagep);
167 }
168}
169
170void hfs_bnode_copy(struct hfs_bnode *dst_node, u32 dst,
171 struct hfs_bnode *src_node, u32 src, u32 len)
172{
173 struct page **src_page, **dst_page;
174 u32 l;
175
176 hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
177 if (!len)
178 return;
179
180 len = check_and_correct_requested_length(src_node, src, len);
181 len = check_and_correct_requested_length(dst_node, dst, len);
182
183 src += src_node->page_offset;
184 dst += dst_node->page_offset;
185 src_page = src_node->page + (src >> PAGE_SHIFT);
186 src &= ~PAGE_MASK;
187 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
188 dst &= ~PAGE_MASK;
189
190 if (src == dst) {
191 l = min_t(u32, len, PAGE_SIZE - src);
192 memcpy_page(*dst_page, src, *src_page, src, l);
193 set_page_dirty(*dst_page);
194
195 while ((len -= l) != 0) {
196 l = min_t(u32, len, PAGE_SIZE);
197 memcpy_page(*++dst_page, 0, *++src_page, 0, l);
198 set_page_dirty(*dst_page);
199 }
200 } else {
201 void *src_ptr, *dst_ptr;
202
203 do {
204 dst_ptr = kmap_local_page(*dst_page) + dst;
205 src_ptr = kmap_local_page(*src_page) + src;
206 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
207 l = PAGE_SIZE - src;
208 src = 0;
209 dst += l;
210 } else {
211 l = PAGE_SIZE - dst;
212 src += l;
213 dst = 0;
214 }
215 l = min(len, l);
216 memcpy(dst_ptr, src_ptr, l);
217 kunmap_local(src_ptr);
218 set_page_dirty(*dst_page);
219 kunmap_local(dst_ptr);
220 if (!dst)
221 dst_page++;
222 else
223 src_page++;
224 } while ((len -= l));
225 }
226}
227
228void hfs_bnode_move(struct hfs_bnode *node, u32 dst, u32 src, u32 len)
229{
230 struct page **src_page, **dst_page;
231 void *src_ptr, *dst_ptr;
232 u32 l;
233
234 hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
235 if (!len)
236 return;
237
238 len = check_and_correct_requested_length(node, src, len);
239 len = check_and_correct_requested_length(node, dst, len);
240
241 src += node->page_offset;
242 dst += node->page_offset;
243 if (dst > src) {
244 src += len - 1;
245 src_page = node->page + (src >> PAGE_SHIFT);
246 src = (src & ~PAGE_MASK) + 1;
247 dst += len - 1;
248 dst_page = node->page + (dst >> PAGE_SHIFT);
249 dst = (dst & ~PAGE_MASK) + 1;
250
251 if (src == dst) {
252 while (src < len) {
253 dst_ptr = kmap_local_page(*dst_page);
254 src_ptr = kmap_local_page(*src_page);
255 memmove(dst_ptr, src_ptr, src);
256 kunmap_local(src_ptr);
257 set_page_dirty(*dst_page);
258 kunmap_local(dst_ptr);
259 len -= src;
260 src = PAGE_SIZE;
261 src_page--;
262 dst_page--;
263 }
264 src -= len;
265 dst_ptr = kmap_local_page(*dst_page);
266 src_ptr = kmap_local_page(*src_page);
267 memmove(dst_ptr + src, src_ptr + src, len);
268 kunmap_local(src_ptr);
269 set_page_dirty(*dst_page);
270 kunmap_local(dst_ptr);
271 } else {
272 do {
273 dst_ptr = kmap_local_page(*dst_page) + dst;
274 src_ptr = kmap_local_page(*src_page) + src;
275 if (src < dst) {
276 l = src;
277 src = PAGE_SIZE;
278 dst -= l;
279 } else {
280 l = dst;
281 src -= l;
282 dst = PAGE_SIZE;
283 }
284 l = min(len, l);
285 memmove(dst_ptr - l, src_ptr - l, l);
286 kunmap_local(src_ptr);
287 set_page_dirty(*dst_page);
288 kunmap_local(dst_ptr);
289 if (dst == PAGE_SIZE)
290 dst_page--;
291 else
292 src_page--;
293 } while ((len -= l));
294 }
295 } else {
296 src_page = node->page + (src >> PAGE_SHIFT);
297 src &= ~PAGE_MASK;
298 dst_page = node->page + (dst >> PAGE_SHIFT);
299 dst &= ~PAGE_MASK;
300
301 if (src == dst) {
302 l = min_t(u32, len, PAGE_SIZE - src);
303
304 dst_ptr = kmap_local_page(*dst_page) + src;
305 src_ptr = kmap_local_page(*src_page) + src;
306 memmove(dst_ptr, src_ptr, l);
307 kunmap_local(src_ptr);
308 set_page_dirty(*dst_page);
309 kunmap_local(dst_ptr);
310
311 while ((len -= l) != 0) {
312 l = min_t(u32, len, PAGE_SIZE);
313 dst_ptr = kmap_local_page(*++dst_page);
314 src_ptr = kmap_local_page(*++src_page);
315 memmove(dst_ptr, src_ptr, l);
316 kunmap_local(src_ptr);
317 set_page_dirty(*dst_page);
318 kunmap_local(dst_ptr);
319 }
320 } else {
321 do {
322 dst_ptr = kmap_local_page(*dst_page) + dst;
323 src_ptr = kmap_local_page(*src_page) + src;
324 if (PAGE_SIZE - src <
325 PAGE_SIZE - dst) {
326 l = PAGE_SIZE - src;
327 src = 0;
328 dst += l;
329 } else {
330 l = PAGE_SIZE - dst;
331 src += l;
332 dst = 0;
333 }
334 l = min(len, l);
335 memmove(dst_ptr, src_ptr, l);
336 kunmap_local(src_ptr);
337 set_page_dirty(*dst_page);
338 kunmap_local(dst_ptr);
339 if (!dst)
340 dst_page++;
341 else
342 src_page++;
343 } while ((len -= l));
344 }
345 }
346}
347
348void hfs_bnode_dump(struct hfs_bnode *node)
349{
350 struct hfs_bnode_desc desc;
351 __be32 cnid;
352 int i, off, key_off;
353
354 hfs_dbg("node %d\n", node->this);
355 hfs_bnode_read(node, &desc, 0, sizeof(desc));
356 hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
357 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
358 desc.type, desc.height, be16_to_cpu(desc.num_recs));
359
360 off = node->tree->node_size - 2;
361 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
362 key_off = hfs_bnode_read_u16(node, off);
363 hfs_dbg(" key_off %d", key_off);
364 if (i && node->type == HFS_NODE_INDEX) {
365 int tmp;
366
367 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
368 node->tree->cnid == HFSPLUS_ATTR_CNID)
369 tmp = hfs_bnode_read_u16(node, key_off) + 2;
370 else
371 tmp = node->tree->max_key_len + 2;
372 hfs_dbg(" (%d", tmp);
373 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
374 hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
375 } else if (i && node->type == HFS_NODE_LEAF) {
376 int tmp;
377
378 tmp = hfs_bnode_read_u16(node, key_off);
379 hfs_dbg(" (%d)", tmp);
380 }
381 }
382 hfs_dbg("\n");
383}
384
385void hfs_bnode_unlink(struct hfs_bnode *node)
386{
387 struct hfs_btree *tree;
388 struct hfs_bnode *tmp;
389 __be32 cnid;
390
391 tree = node->tree;
392 if (node->prev) {
393 tmp = hfs_bnode_find(tree, node->prev);
394 if (IS_ERR(tmp))
395 return;
396 tmp->next = node->next;
397 cnid = cpu_to_be32(tmp->next);
398 hfs_bnode_write(tmp, &cnid,
399 offsetof(struct hfs_bnode_desc, next), 4);
400 hfs_bnode_put(tmp);
401 } else if (node->type == HFS_NODE_LEAF)
402 tree->leaf_head = node->next;
403
404 if (node->next) {
405 tmp = hfs_bnode_find(tree, node->next);
406 if (IS_ERR(tmp))
407 return;
408 tmp->prev = node->prev;
409 cnid = cpu_to_be32(tmp->prev);
410 hfs_bnode_write(tmp, &cnid,
411 offsetof(struct hfs_bnode_desc, prev), 4);
412 hfs_bnode_put(tmp);
413 } else if (node->type == HFS_NODE_LEAF)
414 tree->leaf_tail = node->prev;
415
416 /* move down? */
417 if (!node->prev && !node->next)
418 hfs_dbg("btree delete level\n");
419 if (!node->parent) {
420 tree->root = 0;
421 tree->depth = 0;
422 }
423 set_bit(HFS_BNODE_DELETED, &node->flags);
424}
425
426static inline int hfs_bnode_hash(u32 num)
427{
428 num = (num >> 16) + num;
429 num += num >> 8;
430 return num & (NODE_HASH_SIZE - 1);
431}
432
433struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
434{
435 struct hfs_bnode *node;
436
437 if (cnid >= tree->node_count) {
438 pr_err("request for non-existent node %d in B*Tree\n",
439 cnid);
440 return NULL;
441 }
442
443 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
444 node; node = node->next_hash)
445 if (node->this == cnid)
446 return node;
447 return NULL;
448}
449
450static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
451{
452 struct hfs_bnode *node, *node2;
453 struct address_space *mapping;
454 struct page *page;
455 int size, block, i, hash;
456 loff_t off;
457
458 if (cnid >= tree->node_count) {
459 pr_err("request for non-existent node %d in B*Tree\n",
460 cnid);
461 return NULL;
462 }
463
464 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
465 sizeof(struct page *);
466 node = kzalloc(size, GFP_KERNEL);
467 if (!node)
468 return NULL;
469 node->tree = tree;
470 node->this = cnid;
471 set_bit(HFS_BNODE_NEW, &node->flags);
472 atomic_set(&node->refcnt, 1);
473 hfs_dbg("cnid %d, node %d, refcnt 1\n",
474 node->tree->cnid, node->this);
475 init_waitqueue_head(&node->lock_wq);
476 spin_lock(&tree->hash_lock);
477 node2 = hfs_bnode_findhash(tree, cnid);
478 if (!node2) {
479 hash = hfs_bnode_hash(cnid);
480 node->next_hash = tree->node_hash[hash];
481 tree->node_hash[hash] = node;
482 tree->node_hash_cnt++;
483 } else {
484 hfs_bnode_get(node2);
485 spin_unlock(&tree->hash_lock);
486 kfree(node);
487 wait_event(node2->lock_wq,
488 !test_bit(HFS_BNODE_NEW, &node2->flags));
489 return node2;
490 }
491 spin_unlock(&tree->hash_lock);
492
493 mapping = tree->inode->i_mapping;
494 off = (loff_t)cnid << tree->node_size_shift;
495 block = off >> PAGE_SHIFT;
496 node->page_offset = off & ~PAGE_MASK;
497 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
498 page = read_mapping_page(mapping, block, NULL);
499 if (IS_ERR(page))
500 goto fail;
501 node->page[i] = page;
502 }
503
504 return node;
505fail:
506 set_bit(HFS_BNODE_ERROR, &node->flags);
507 return node;
508}
509
510void hfs_bnode_unhash(struct hfs_bnode *node)
511{
512 struct hfs_bnode **p;
513
514 hfs_dbg("cnid %d, node %d, refcnt %d\n",
515 node->tree->cnid, node->this, atomic_read(&node->refcnt));
516 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
517 *p && *p != node; p = &(*p)->next_hash)
518 ;
519 BUG_ON(!*p);
520 *p = node->next_hash;
521 node->tree->node_hash_cnt--;
522}
523
524/* Load a particular node out of a tree */
525struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
526{
527 struct hfs_bnode *node;
528 struct hfs_bnode_desc *desc;
529 int i, rec_off, off, next_off;
530 int entry_size, key_size;
531
532 spin_lock(&tree->hash_lock);
533 node = hfs_bnode_findhash(tree, num);
534 if (node) {
535 hfs_bnode_get(node);
536 spin_unlock(&tree->hash_lock);
537 wait_event(node->lock_wq,
538 !test_bit(HFS_BNODE_NEW, &node->flags));
539 if (test_bit(HFS_BNODE_ERROR, &node->flags))
540 goto node_error;
541 return node;
542 }
543 spin_unlock(&tree->hash_lock);
544 node = __hfs_bnode_create(tree, num);
545 if (!node)
546 return ERR_PTR(-ENOMEM);
547 if (test_bit(HFS_BNODE_ERROR, &node->flags))
548 goto node_error;
549 if (!test_bit(HFS_BNODE_NEW, &node->flags))
550 return node;
551
552 desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
553 node->page_offset);
554 node->prev = be32_to_cpu(desc->prev);
555 node->next = be32_to_cpu(desc->next);
556 node->num_recs = be16_to_cpu(desc->num_recs);
557 node->type = desc->type;
558 node->height = desc->height;
559 kunmap_local(desc);
560
561 switch (node->type) {
562 case HFS_NODE_HEADER:
563 case HFS_NODE_MAP:
564 if (node->height != 0)
565 goto node_error;
566 break;
567 case HFS_NODE_LEAF:
568 if (node->height != 1)
569 goto node_error;
570 break;
571 case HFS_NODE_INDEX:
572 if (node->height <= 1 || node->height > tree->depth)
573 goto node_error;
574 break;
575 default:
576 goto node_error;
577 }
578
579 rec_off = tree->node_size - 2;
580 off = hfs_bnode_read_u16(node, rec_off);
581 if (off != sizeof(struct hfs_bnode_desc))
582 goto node_error;
583 for (i = 1; i <= node->num_recs; off = next_off, i++) {
584 rec_off -= 2;
585 next_off = hfs_bnode_read_u16(node, rec_off);
586 if (next_off <= off ||
587 next_off > tree->node_size ||
588 next_off & 1)
589 goto node_error;
590 entry_size = next_off - off;
591 if (node->type != HFS_NODE_INDEX &&
592 node->type != HFS_NODE_LEAF)
593 continue;
594 key_size = hfs_bnode_read_u16(node, off) + 2;
595 if (key_size >= entry_size || key_size & 1)
596 goto node_error;
597 }
598 clear_bit(HFS_BNODE_NEW, &node->flags);
599 wake_up(&node->lock_wq);
600 return node;
601
602node_error:
603 set_bit(HFS_BNODE_ERROR, &node->flags);
604 clear_bit(HFS_BNODE_NEW, &node->flags);
605 wake_up(&node->lock_wq);
606 hfs_bnode_put(node);
607 return ERR_PTR(-EIO);
608}
609
610void hfs_bnode_free(struct hfs_bnode *node)
611{
612 int i;
613
614 for (i = 0; i < node->tree->pages_per_bnode; i++)
615 if (node->page[i])
616 put_page(node->page[i]);
617 kfree(node);
618}
619
620struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
621{
622 struct hfs_bnode *node;
623 struct page **pagep;
624 int i;
625
626 spin_lock(&tree->hash_lock);
627 node = hfs_bnode_findhash(tree, num);
628 spin_unlock(&tree->hash_lock);
629 if (node) {
630 pr_crit("new node %u already hashed?\n", num);
631 WARN_ON(1);
632 return node;
633 }
634 node = __hfs_bnode_create(tree, num);
635 if (!node)
636 return ERR_PTR(-ENOMEM);
637 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
638 hfs_bnode_put(node);
639 return ERR_PTR(-EIO);
640 }
641
642 pagep = node->page;
643 memzero_page(*pagep, node->page_offset,
644 min_t(int, PAGE_SIZE, tree->node_size));
645 set_page_dirty(*pagep);
646 for (i = 1; i < tree->pages_per_bnode; i++) {
647 memzero_page(*++pagep, 0, PAGE_SIZE);
648 set_page_dirty(*pagep);
649 }
650 clear_bit(HFS_BNODE_NEW, &node->flags);
651 wake_up(&node->lock_wq);
652
653 return node;
654}
655
656void hfs_bnode_get(struct hfs_bnode *node)
657{
658 if (node) {
659 atomic_inc(&node->refcnt);
660 hfs_dbg("cnid %d, node %d, refcnt %d\n",
661 node->tree->cnid, node->this,
662 atomic_read(&node->refcnt));
663 }
664}
665
666/* Dispose of resources used by a node */
667void hfs_bnode_put(struct hfs_bnode *node)
668{
669 if (node) {
670 struct hfs_btree *tree = node->tree;
671 int i;
672
673 hfs_dbg("cnid %d, node %d, refcnt %d\n",
674 node->tree->cnid, node->this,
675 atomic_read(&node->refcnt));
676 BUG_ON(!atomic_read(&node->refcnt));
677 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
678 return;
679 for (i = 0; i < tree->pages_per_bnode; i++) {
680 if (!node->page[i])
681 continue;
682 mark_page_accessed(node->page[i]);
683 }
684
685 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
686 hfs_bnode_unhash(node);
687 spin_unlock(&tree->hash_lock);
688 if (hfs_bnode_need_zeroout(tree))
689 hfs_bnode_clear(node, 0, tree->node_size);
690 hfs_bmap_free(node);
691 hfs_bnode_free(node);
692 return;
693 }
694 spin_unlock(&tree->hash_lock);
695 }
696}
697
698/*
699 * Unused nodes have to be zeroed if this is the catalog tree and
700 * a corresponding flag in the volume header is set.
701 */
702bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
703{
704 struct super_block *sb = tree->inode->i_sb;
705 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
706 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
707
708 return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
709}