Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: nx - fix concurrency issue

The NX driver uses the transformation context to store several fields
containing data related to the state of the operations in progress.
Since a single tfm can be used by different kernel threads at the same
time, we need to protect the data stored into the context.

This patch makes use of spin locks to protect the data where a race
condition can happen.

Reviewed-by: Fionnuala Gunter <fin@linux.vnet.ibm.com>
Reviewed-by: Joy Latten <jmlatten@linux.vnet.ibm.com>
Signed-off-by: Marcelo Cerri <mhcerri@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Marcelo Cerri and committed by
Herbert Xu
c849163b f22d0811

+87 -12
+8 -2
drivers/crypto/nx/nx-aes-cbc.c
··· 70 70 { 71 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 72 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 73 + unsigned long irq_flags; 73 74 int rc; 74 75 75 - if (nbytes > nx_ctx->ap->databytelen) 76 - return -EINVAL; 76 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 77 + 78 + if (nbytes > nx_ctx->ap->databytelen) { 79 + rc = -EINVAL; 80 + goto out; 81 + } 77 82 78 83 if (enc) 79 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; ··· 105 100 atomic64_add(csbcpb->csb.processed_byte_count, 106 101 &(nx_ctx->stats->aes_bytes)); 107 102 out: 103 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 108 104 return rc; 109 105 } 110 106
+16 -4
drivers/crypto/nx/nx-aes-ccm.c
··· 271 271 unsigned int nbytes = req->cryptlen; 272 272 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 273 273 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; 274 + unsigned long irq_flags; 274 275 int rc = -1; 275 276 276 - if (nbytes > nx_ctx->ap->databytelen) 277 - return -EINVAL; 277 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 278 + 279 + if (nbytes > nx_ctx->ap->databytelen) { 280 + rc = -EINVAL; 281 + goto out; 282 + } 278 283 279 284 nbytes -= authsize; 280 285 ··· 313 308 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 314 309 authsize) ? -EBADMSG : 0; 315 310 out: 311 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 316 312 return rc; 317 313 } 318 314 ··· 324 318 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 325 319 unsigned int nbytes = req->cryptlen; 326 320 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 321 + unsigned long irq_flags; 327 322 int rc = -1; 328 323 329 - if (nbytes > nx_ctx->ap->databytelen) 330 - return -EINVAL; 324 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 325 + 326 + if (nbytes > nx_ctx->ap->databytelen) { 327 + rc = -EINVAL; 328 + goto out; 329 + } 331 330 332 331 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, 333 332 csbcpb->cpb.aes_ccm.in_pat_or_b0); ··· 361 350 req->dst, nbytes, authsize, 362 351 SCATTERWALK_TO_SG); 363 352 out: 353 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 364 354 return rc; 365 355 } 366 356
+8 -2
drivers/crypto/nx/nx-aes-ctr.c
··· 88 88 { 89 89 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 90 90 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 91 + unsigned long irq_flags; 91 92 int rc; 92 93 93 - if (nbytes > nx_ctx->ap->databytelen) 94 - return -EINVAL; 94 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 95 + 96 + if (nbytes > nx_ctx->ap->databytelen) { 97 + rc = -EINVAL; 98 + goto out; 99 + } 95 100 96 101 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, 97 102 csbcpb->cpb.aes_ctr.iv); ··· 117 112 atomic64_add(csbcpb->csb.processed_byte_count, 118 113 &(nx_ctx->stats->aes_bytes)); 119 114 out: 115 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 120 116 return rc; 121 117 } 122 118
+8 -2
drivers/crypto/nx/nx-aes-ecb.c
··· 70 70 { 71 71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 72 72 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 73 + unsigned long irq_flags; 73 74 int rc; 74 75 75 - if (nbytes > nx_ctx->ap->databytelen) 76 - return -EINVAL; 76 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 77 + 78 + if (nbytes > nx_ctx->ap->databytelen) { 79 + rc = -EINVAL; 80 + goto out; 81 + } 77 82 78 83 if (enc) 79 84 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; ··· 103 98 atomic64_add(csbcpb->csb.processed_byte_count, 104 99 &(nx_ctx->stats->aes_bytes)); 105 100 out: 101 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 106 102 return rc; 107 103 } 108 104
+4
drivers/crypto/nx/nx-aes-gcm.c
··· 166 166 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 167 167 struct blkcipher_desc desc; 168 168 unsigned int nbytes = req->cryptlen; 169 + unsigned long irq_flags; 169 170 int rc = -EINVAL; 171 + 172 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 170 173 171 174 if (nbytes > nx_ctx->ap->databytelen) 172 175 goto out; ··· 258 255 -EBADMSG : 0; 259 256 } 260 257 out: 258 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 261 259 return rc; 262 260 } 263 261
+8
drivers/crypto/nx/nx-aes-xcbc.c
··· 89 89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 90 90 struct nx_sg *in_sg; 91 91 u32 to_process, leftover; 92 + unsigned long irq_flags; 92 93 int rc = 0; 94 + 95 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 93 96 94 97 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 95 98 /* we've hit the nx chip previously and we're updating again, ··· 161 158 /* everything after the first update is continuation */ 162 159 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 163 160 out: 161 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 164 162 return rc; 165 163 } 166 164 ··· 171 167 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 172 168 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 173 169 struct nx_sg *in_sg, *out_sg; 170 + unsigned long irq_flags; 174 171 int rc = 0; 172 + 173 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 175 174 176 175 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 177 176 /* we've hit the nx chip previously, now we're finalizing, ··· 218 211 219 212 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); 220 213 out: 214 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 221 215 return rc; 222 216 } 223 217
+16
drivers/crypto/nx/nx-sha256.c
··· 57 57 struct nx_sg *in_sg; 58 58 u64 to_process, leftover, total; 59 59 u32 max_sg_len; 60 + unsigned long irq_flags; 60 61 int rc = 0; 62 + 63 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 61 64 62 65 /* 2 cases for total data len: 63 66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 ··· 139 136 memcpy(sctx->buf, data, leftover); 140 137 sctx->count = leftover; 141 138 out: 139 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 142 140 return rc; 143 141 } 144 142 ··· 150 146 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 151 147 struct nx_sg *in_sg, *out_sg; 152 148 u32 max_sg_len; 149 + unsigned long irq_flags; 153 150 int rc; 151 + 152 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 154 153 155 154 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 156 155 ··· 193 186 &(nx_ctx->stats->sha256_bytes)); 194 187 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 195 188 out: 189 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 196 190 return rc; 197 191 } 198 192 ··· 203 195 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 204 196 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 205 197 struct sha256_state *octx = out; 198 + unsigned long irq_flags; 199 + 200 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 206 201 207 202 octx->count = sctx->count + 208 203 (csbcpb->cpb.sha256.message_bit_length / 8); ··· 228 217 octx->state[7] = SHA256_H7; 229 218 } 230 219 220 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 231 221 return 0; 232 222 } 233 223 ··· 238 226 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 239 227 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 240 228 const struct sha256_state *ictx = in; 229 + unsigned long irq_flags; 230 + 231 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 241 232 242 233 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 243 234 ··· 255 240 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 256 241 } 257 242 243 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 258 244 return 0; 259 245 } 260 246
+16
drivers/crypto/nx/nx-sha512.c
··· 57 57 struct nx_sg *in_sg; 58 58 u64 to_process, leftover, total, spbc_bits; 59 59 u32 max_sg_len; 60 + unsigned long irq_flags; 60 61 int rc = 0; 62 + 63 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 61 64 62 65 /* 2 cases for total data len: 63 66 * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 ··· 141 138 memcpy(sctx->buf, data, leftover); 142 139 sctx->count[0] = leftover; 143 140 out: 141 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 144 142 return rc; 145 143 } 146 144 ··· 153 149 struct nx_sg *in_sg, *out_sg; 154 150 u32 max_sg_len; 155 151 u64 count0; 152 + unsigned long irq_flags; 156 153 int rc; 154 + 155 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 157 156 158 157 max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen); 159 158 ··· 200 193 201 194 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 202 195 out: 196 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 203 197 return rc; 204 198 } 205 199 ··· 210 202 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 211 203 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 212 204 struct sha512_state *octx = out; 205 + unsigned long irq_flags; 206 + 207 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 213 208 214 209 /* move message_bit_length (128 bits) into count and convert its value 215 210 * to bytes */ ··· 244 233 octx->state[7] = SHA512_H7; 245 234 } 246 235 236 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 247 237 return 0; 248 238 } 249 239 ··· 254 242 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 255 243 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 256 244 const struct sha512_state *ictx = in; 245 + unsigned long irq_flags; 246 + 247 + spin_lock_irqsave(&nx_ctx->lock, irq_flags); 257 248 258 249 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 259 250 sctx->count[0] = ictx->count[0] & 0x3f; ··· 274 259 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 275 260 } 276 261 262 + spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 277 263 return 0; 278 264 } 279 265
+2 -2
drivers/crypto/nx/nx.c
··· 61 61 62 62 do { 63 63 rc = vio_h_cop_sync(viodev, op); 64 - } while ((rc == -EBUSY && !may_sleep && retries--) || 65 - (rc == -EBUSY && may_sleep && cond_resched())); 64 + } while (rc == -EBUSY && !may_sleep && retries--); 66 65 67 66 if (rc) { 68 67 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " ··· 250 251 */ 251 252 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) 252 253 { 254 + spin_lock_init(&nx_ctx->lock); 253 255 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); 254 256 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; 255 257
+1
drivers/crypto/nx/nx.h
··· 117 117 }; 118 118 119 119 struct nx_crypto_ctx { 120 + spinlock_t lock; /* synchronize access to the context */ 120 121 void *kmem; /* unaligned, kmalloc'd buffer */ 121 122 size_t kmem_len; /* length of kmem */ 122 123 struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */