Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: nx - Fixing SHA update bug

Bug happens when a data size less than SHA block size is passed.
Since first attempt will be saved in buffer, second round attempt
get into two step to calculate op.inlen and op.outlen. The issue
resides in this step. A wrong value of op.inlen and outlen was being
calculated.

This patch fix this eliminate the nx_sha_build_sg_list, that is
useless in SHA's algorithm context. Instead we call nx_build_sg_list
directly and pass a previous calculated max_sg_len to it.

Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Leonidas Da Silva Barbosa and committed by
Herbert Xu
10d87b73 c3365ce1

+101 -121
+50 -36
drivers/crypto/nx/nx-sha256.c
··· 33 33 { 34 34 struct sha256_state *sctx = shash_desc_ctx(desc); 35 35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 36 + struct nx_sg *out_sg; 36 37 int len; 37 - int rc; 38 + u32 max_sg_len; 38 39 39 40 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 40 41 ··· 45 44 46 45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 47 46 48 - len = SHA256_DIGEST_SIZE; 49 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, 50 - &nx_ctx->op.outlen, 51 - &len, 52 - (u8 *) sctx->state, 53 - NX_DS_SHA256); 47 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 48 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 49 + max_sg_len = min_t(u64, max_sg_len, 50 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 54 51 55 - if (rc) 56 - goto out; 52 + len = SHA256_DIGEST_SIZE; 53 + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 54 + &len, max_sg_len); 55 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 56 + 57 + if (len != SHA256_DIGEST_SIZE) 58 + return -EINVAL; 57 59 58 60 sctx->state[0] = __cpu_to_be32(SHA256_H0); 59 61 sctx->state[1] = __cpu_to_be32(SHA256_H1); ··· 68 64 sctx->state[7] = __cpu_to_be32(SHA256_H7); 69 65 sctx->count = 0; 70 66 71 - out: 72 67 return 0; 73 68 } 74 69 ··· 77 74 struct sha256_state *sctx = shash_desc_ctx(desc); 78 75 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 79 76 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 77 + struct nx_sg *in_sg; 80 78 u64 to_process = 0, leftover, total; 81 79 unsigned long irq_flags; 82 80 int rc = 0; 83 81 int data_len; 82 + u32 max_sg_len; 84 83 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); 85 84 86 85 spin_lock_irqsave(&nx_ctx->lock, irq_flags); ··· 102 97 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 103 98 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 104 99 100 + in_sg = nx_ctx->in_sg; 101 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 102 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 103 + max_sg_len = min_t(u64, max_sg_len, 104 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 105 + 105 106 do { 106 107 /* 107 108 * to_process: the SHA256_BLOCK_SIZE data chunk to process in ··· 119 108 120 109 if (buf_len) { 121 110 data_len = buf_len; 122 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 123 - &nx_ctx->op.inlen, 124 - &data_len, 125 - (u8 *) sctx->buf, 126 - NX_DS_SHA256); 111 + in_sg = nx_build_sg_list(nx_ctx->in_sg, 112 + (u8 *) sctx->buf, 113 + &data_len, 114 + max_sg_len); 127 115 128 - if (rc || data_len != buf_len) 116 + if (data_len != buf_len) { 117 + rc = -EINVAL; 129 118 goto out; 119 + } 130 120 } 131 121 132 122 data_len = to_process - buf_len; 133 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 134 - &nx_ctx->op.inlen, 135 - &data_len, 136 - (u8 *) data, 137 - NX_DS_SHA256); 123 + in_sg = nx_build_sg_list(in_sg, (u8 *) data, 124 + &data_len, max_sg_len); 138 125 139 - if (rc) 140 - goto out; 126 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 141 127 142 128 to_process = (data_len + buf_len); 143 129 leftover = total - to_process; ··· 181 173 struct sha256_state *sctx = shash_desc_ctx(desc); 182 174 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 183 175 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 176 + struct nx_sg *in_sg, *out_sg; 184 177 unsigned long irq_flags; 185 - int rc; 178 + u32 max_sg_len; 179 + int rc = 0; 186 180 int len; 187 181 188 182 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 183 + 184 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 185 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 186 + max_sg_len = min_t(u64, max_sg_len, 187 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 189 188 190 189 /* final is represented by continuing the operation and indicating that 191 190 * this is not an intermediate operation */ ··· 210 195 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); 211 196 212 197 len = sctx->count & (SHA256_BLOCK_SIZE - 1); 213 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 214 - &nx_ctx->op.inlen, 215 - &len, 216 - (u8 *) sctx->buf, 217 - NX_DS_SHA256); 198 + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, 199 + &len, max_sg_len); 218 200 219 - if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) 201 + if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { 202 + rc = -EINVAL; 220 203 goto out; 204 + } 221 205 222 206 len = SHA256_DIGEST_SIZE; 223 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, 224 - &nx_ctx->op.outlen, 225 - &len, 226 - out, 227 - NX_DS_SHA256); 207 + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); 228 208 229 - if (rc || len != SHA256_DIGEST_SIZE) 209 + if (len != SHA256_DIGEST_SIZE) { 210 + rc = -EINVAL; 230 211 goto out; 212 + } 231 213 214 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 215 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 232 216 if (!nx_ctx->op.outlen) { 233 217 rc = -EINVAL; 234 218 goto out;
+51 -36
drivers/crypto/nx/nx-sha512.c
··· 32 32 { 33 33 struct sha512_state *sctx = shash_desc_ctx(desc); 34 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 + struct nx_sg *out_sg; 35 36 int len; 36 - int rc; 37 + u32 max_sg_len; 37 38 38 39 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 39 40 ··· 44 43 45 44 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); 46 45 47 - len = SHA512_DIGEST_SIZE; 48 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, 49 - &nx_ctx->op.outlen, 50 - &len, 51 - (u8 *)sctx->state, 52 - NX_DS_SHA512); 46 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 47 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 48 + max_sg_len = min_t(u64, max_sg_len, 49 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 53 50 54 - if (rc || len != SHA512_DIGEST_SIZE) 55 - goto out; 51 + len = SHA512_DIGEST_SIZE; 52 + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 53 + &len, max_sg_len); 54 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 55 + 56 + if (len != SHA512_DIGEST_SIZE) 57 + return -EINVAL; 56 58 57 59 sctx->state[0] = __cpu_to_be64(SHA512_H0); 58 60 sctx->state[1] = __cpu_to_be64(SHA512_H1); ··· 67 63 sctx->state[7] = __cpu_to_be64(SHA512_H7); 68 64 sctx->count[0] = 0; 69 65 70 - out: 71 66 return 0; 72 67 } 73 68 ··· 76 73 struct sha512_state *sctx = shash_desc_ctx(desc); 77 74 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 78 75 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 76 + struct nx_sg *in_sg; 79 77 u64 to_process, leftover = 0, total; 80 78 unsigned long irq_flags; 81 79 int rc = 0; 82 80 int data_len; 81 + u32 max_sg_len; 83 82 u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); 84 83 85 84 spin_lock_irqsave(&nx_ctx->lock, irq_flags); ··· 101 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 102 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 103 98 99 + in_sg = nx_ctx->in_sg; 100 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 101 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 102 + max_sg_len = min_t(u64, max_sg_len, 103 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 104 + 104 105 do { 105 106 /* 106 107 * to_process: the SHA512_BLOCK_SIZE data chunk to process in ··· 119 108 120 109 if (buf_len) { 121 110 data_len = buf_len; 122 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 123 - &nx_ctx->op.inlen, 124 - &data_len, 125 - (u8 *) sctx->buf, 126 - NX_DS_SHA512); 111 + in_sg = nx_build_sg_list(nx_ctx->in_sg, 112 + (u8 *) sctx->buf, 113 + &data_len, max_sg_len); 127 114 128 - if (rc || data_len != buf_len) 115 + if (data_len != buf_len) { 116 + rc = -EINVAL; 129 117 goto out; 118 + } 130 119 } 131 120 132 121 data_len = to_process - buf_len; 133 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 134 - &nx_ctx->op.inlen, 135 - &data_len, 136 - (u8 *) data, 137 - NX_DS_SHA512); 122 + in_sg = nx_build_sg_list(in_sg, (u8 *) data, 123 + &data_len, max_sg_len); 138 124 139 - if (rc || data_len != (to_process - buf_len)) 125 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 126 + 127 + if (data_len != (to_process - buf_len)) { 128 + rc = -EINVAL; 140 129 goto out; 130 + } 141 131 142 132 to_process = (data_len + buf_len); 143 133 leftover = total - to_process; ··· 184 172 struct sha512_state *sctx = shash_desc_ctx(desc); 185 173 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 186 174 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 175 + struct nx_sg *in_sg, *out_sg; 176 + u32 max_sg_len; 187 177 u64 count0; 188 178 unsigned long irq_flags; 189 - int rc; 179 + int rc = 0; 190 180 int len; 191 181 192 182 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 183 + 184 + max_sg_len = min_t(u64, nx_ctx->ap->sglen, 185 + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 186 + max_sg_len = min_t(u64, max_sg_len, 187 + nx_ctx->ap->databytelen/NX_PAGE_SIZE); 193 188 194 189 /* final is represented by continuing the operation and indicating that 195 190 * this is not an intermediate operation */ ··· 219 200 csbcpb->cpb.sha512.message_bit_length_lo = count0; 220 201 221 202 len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); 222 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, 223 - &nx_ctx->op.inlen, 224 - &len, 225 - (u8 *)sctx->buf, 226 - NX_DS_SHA512); 203 + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, 204 + max_sg_len); 227 205 228 - if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) 206 + if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { 207 + rc = -EINVAL; 229 208 goto out; 209 + } 230 210 231 211 len = SHA512_DIGEST_SIZE; 232 - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, 233 - &nx_ctx->op.outlen, 234 - &len, 235 - out, 236 - NX_DS_SHA512); 212 + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 213 + max_sg_len); 237 214 238 - if (rc) 239 - goto out; 215 + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 216 + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 240 217 241 218 if (!nx_ctx->op.outlen) { 242 219 rc = -EINVAL;
-47
drivers/crypto/nx/nx.c
··· 252 252 } 253 253 254 254 /** 255 - * nx_sha_build_sg_list - walk and build sg list to sha modes 256 - * using right bounds and limits. 257 - * @nx_ctx: NX crypto context for the lists we're building 258 - * @nx_sg: current sg list in or out list 259 - * @op_len: current op_len to be used in order to build a sg list 260 - * @nbytes: number or bytes to be processed 261 - * @offset: buf offset 262 - * @mode: SHA256 or SHA512 263 - */ 264 - int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, 265 - struct nx_sg *nx_in_outsg, 266 - s64 *op_len, 267 - unsigned int *nbytes, 268 - u8 *offset, 269 - u32 mode) 270 - { 271 - unsigned int delta = 0; 272 - unsigned int total = *nbytes; 273 - struct nx_sg *nx_insg = nx_in_outsg; 274 - unsigned int max_sg_len; 275 - 276 - max_sg_len = min_t(u64, nx_ctx->ap->sglen, 277 - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 278 - max_sg_len = min_t(u64, max_sg_len, 279 - nx_ctx->ap->databytelen/NX_PAGE_SIZE); 280 - 281 - *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); 282 - nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); 283 - 284 - switch (mode) { 285 - case NX_DS_SHA256: 286 - if (*nbytes < total) 287 - delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); 288 - break; 289 - case NX_DS_SHA512: 290 - if (*nbytes < total) 291 - delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); 292 - break; 293 - default: 294 - return -EINVAL; 295 - } 296 - *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); 297 - 298 - return 0; 299 - } 300 - 301 - /** 302 255 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX 303 256 * scatterlists based on them. 304 257 *
-2
drivers/crypto/nx/nx.h
··· 153 153 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); 154 154 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, 155 155 u32 may_sleep); 156 - int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, 157 - s64 *, unsigned int *, u8 *, u32); 158 156 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); 159 157 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, 160 158 struct scatterlist *, struct scatterlist *, unsigned int *,