Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.7-rc3 213 lines 5.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _BCACHEFS_CHECKSUM_H 3#define _BCACHEFS_CHECKSUM_H 4 5#include "bcachefs.h" 6#include "extents_types.h" 7#include "super-io.h" 8 9#include <linux/crc64.h> 10#include <crypto/chacha.h> 11 12static inline bool bch2_checksum_mergeable(unsigned type) 13{ 14 15 switch (type) { 16 case BCH_CSUM_none: 17 case BCH_CSUM_crc32c: 18 case BCH_CSUM_crc64: 19 return true; 20 default: 21 return false; 22 } 23} 24 25struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum, 26 struct bch_csum, size_t); 27 28#define BCH_NONCE_EXTENT cpu_to_le32(1 << 28) 29#define BCH_NONCE_BTREE cpu_to_le32(2 << 28) 30#define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28) 31#define BCH_NONCE_PRIO cpu_to_le32(4 << 28) 32#define BCH_NONCE_POLY cpu_to_le32(1 << 31) 33 34struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce, 35 const void *, size_t); 36 37/* 38 * This is used for various on disk data structures - bch_sb, prio_set, bset, 39 * jset: The checksum is _always_ the first field of these structs 40 */ 41#define csum_vstruct(_c, _type, _nonce, _i) \ 42({ \ 43 const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\ 44 \ 45 bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\ 46}) 47 48int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t); 49int bch2_request_key(struct bch_sb *, struct bch_key *); 50#ifndef __KERNEL__ 51int bch2_revoke_key(struct bch_sb *); 52#endif 53 54int bch2_encrypt(struct bch_fs *, unsigned, struct nonce, 55 void *data, size_t); 56 57struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned, 58 struct nonce, struct bio *); 59 60int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion, 61 struct bch_extent_crc_unpacked, 62 struct bch_extent_crc_unpacked *, 63 struct bch_extent_crc_unpacked *, 64 unsigned, unsigned, unsigned); 65 66int __bch2_encrypt_bio(struct bch_fs *, unsigned, 67 struct nonce, struct bio *); 68 69static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type, 70 struct nonce nonce, struct bio *bio) 71{ 72 return bch2_csum_type_is_encryption(type) 73 ? __bch2_encrypt_bio(c, type, nonce, bio) 74 : 0; 75} 76 77extern const struct bch_sb_field_ops bch_sb_field_ops_crypt; 78 79int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *, 80 struct bch_key *); 81 82int bch2_disable_encryption(struct bch_fs *); 83int bch2_enable_encryption(struct bch_fs *, bool); 84 85void bch2_fs_encryption_exit(struct bch_fs *); 86int bch2_fs_encryption_init(struct bch_fs *); 87 88static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type, 89 bool data) 90{ 91 switch (type) { 92 case BCH_CSUM_OPT_none: 93 return BCH_CSUM_none; 94 case BCH_CSUM_OPT_crc32c: 95 return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero; 96 case BCH_CSUM_OPT_crc64: 97 return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero; 98 case BCH_CSUM_OPT_xxhash: 99 return BCH_CSUM_xxhash; 100 default: 101 BUG(); 102 } 103} 104 105static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c, 106 struct bch_io_opts opts) 107{ 108 if (opts.nocow) 109 return 0; 110 111 if (c->sb.encryption_type) 112 return c->opts.wide_macs 113 ? BCH_CSUM_chacha20_poly1305_128 114 : BCH_CSUM_chacha20_poly1305_80; 115 116 return bch2_csum_opt_to_type(opts.data_checksum, true); 117} 118 119static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c) 120{ 121 if (c->sb.encryption_type) 122 return BCH_CSUM_chacha20_poly1305_128; 123 124 return bch2_csum_opt_to_type(c->opts.metadata_checksum, false); 125} 126 127static inline bool bch2_checksum_type_valid(const struct bch_fs *c, 128 unsigned type) 129{ 130 if (type >= BCH_CSUM_NR) 131 return false; 132 133 if (bch2_csum_type_is_encryption(type) && !c->chacha20) 134 return false; 135 136 return true; 137} 138 139/* returns true if not equal */ 140static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r) 141{ 142 /* 143 * XXX: need some way of preventing the compiler from optimizing this 144 * into a form that isn't constant time.. 145 */ 146 return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0; 147} 148 149/* for skipping ahead and encrypting/decrypting at an offset: */ 150static inline struct nonce nonce_add(struct nonce nonce, unsigned offset) 151{ 152 EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1)); 153 154 le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE); 155 return nonce; 156} 157 158static inline struct nonce null_nonce(void) 159{ 160 struct nonce ret; 161 162 memset(&ret, 0, sizeof(ret)); 163 return ret; 164} 165 166static inline struct nonce extent_nonce(struct bversion version, 167 struct bch_extent_crc_unpacked crc) 168{ 169 unsigned compression_type = crc_is_compressed(crc) 170 ? crc.compression_type 171 : 0; 172 unsigned size = compression_type ? crc.uncompressed_size : 0; 173 struct nonce nonce = (struct nonce) {{ 174 [0] = cpu_to_le32(size << 22), 175 [1] = cpu_to_le32(version.lo), 176 [2] = cpu_to_le32(version.lo >> 32), 177 [3] = cpu_to_le32(version.hi| 178 (compression_type << 24))^BCH_NONCE_EXTENT, 179 }}; 180 181 return nonce_add(nonce, crc.nonce << 9); 182} 183 184static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key) 185{ 186 return le64_to_cpu(key->magic) != BCH_KEY_MAGIC; 187} 188 189static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb) 190{ 191 __le64 magic = __bch2_sb_magic(sb); 192 193 return (struct nonce) {{ 194 [0] = 0, 195 [1] = 0, 196 [2] = ((__le32 *) &magic)[0], 197 [3] = ((__le32 *) &magic)[1], 198 }}; 199} 200 201static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c) 202{ 203 __le64 magic = bch2_sb_magic(c); 204 205 return (struct nonce) {{ 206 [0] = 0, 207 [1] = 0, 208 [2] = ((__le32 *) &magic)[0], 209 [3] = ((__le32 *) &magic)[1], 210 }}; 211} 212 213#endif /* _BCACHEFS_CHECKSUM_H */