Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: stm32/crc32 - protect from concurrent accesses

Protect STM32 CRC device from concurrent accesses.

As we create a spinlocked section that increase with buffer size,
we provide a module parameter to release the pressure by splitting
critical section in chunks.

Size of each chunk is defined in burst_size module parameter.
By default burst_size=0, i.e. don't split incoming buffer.

Signed-off-by: Nicolas Toromanoff <nicolas.toromanoff@st.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Nicolas Toromanoff and committed by
Herbert Xu
7795c0ba 100f84be

+45 -2
+45 -2
drivers/crypto/stm32/stm32-crc32.c
··· 35 35 36 36 #define CRC_AUTOSUSPEND_DELAY 50 37 37 38 + static unsigned int burst_size; 39 + module_param(burst_size, uint, 0644); 40 + MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)"); 41 + 38 42 struct stm32_crc { 39 43 struct list_head list; 40 44 struct device *dev; 41 45 void __iomem *regs; 42 46 struct clk *clk; 47 + spinlock_t lock; 43 48 }; 44 49 45 50 struct stm32_crc_list { ··· 114 109 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); 115 110 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); 116 111 struct stm32_crc *crc; 112 + unsigned long flags; 117 113 118 114 crc = stm32_crc_get_next_crc(); 119 115 if (!crc) 120 116 return -ENODEV; 121 117 122 118 pm_runtime_get_sync(crc->dev); 119 + 120 + spin_lock_irqsave(&crc->lock, flags); 123 121 124 122 /* Reset, set key, poly and configure in bit reverse mode */ 125 123 writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); ··· 133 125 /* Store partial result */ 134 126 ctx->partial = readl_relaxed(crc->regs + CRC_DR); 135 127 128 + spin_unlock_irqrestore(&crc->lock, flags); 129 + 136 130 pm_runtime_mark_last_busy(crc->dev); 137 131 pm_runtime_put_autosuspend(crc->dev); 138 132 139 133 return 0; 140 134 } 141 135 142 - static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, 143 - unsigned int length) 136 + static int burst_update(struct shash_desc *desc, const u8 *d8, 137 + size_t length) 144 138 { 145 139 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); 146 140 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); 147 141 struct stm32_crc *crc; 142 + unsigned long flags; 148 143 149 144 crc = stm32_crc_get_next_crc(); 150 145 if (!crc) 151 146 return -ENODEV; 152 147 153 148 pm_runtime_get_sync(crc->dev); 149 + 150 + spin_lock_irqsave(&crc->lock, flags); 154 151 155 152 /* 156 153 * Restore previously calculated CRC for this context as init value ··· 195 182 /* Store partial result */ 196 183 ctx->partial = readl_relaxed(crc->regs + CRC_DR); 197 184 185 + spin_unlock_irqrestore(&crc->lock, flags); 186 + 198 187 pm_runtime_mark_last_busy(crc->dev); 199 188 pm_runtime_put_autosuspend(crc->dev); 189 + 190 + return 0; 191 + } 192 + 193 + static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, 194 + unsigned int length) 195 + { 196 + const unsigned int burst_sz = burst_size; 197 + unsigned int rem_sz; 198 + const u8 *cur; 199 + size_t size; 200 + int ret; 201 + 202 + if (!burst_sz) 203 + return burst_update(desc, d8, length); 204 + 205 + /* Digest first bytes not 32bit aligned at first pass in the loop */ 206 + size = min(length, 207 + burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8, 208 + sizeof(u32))); 209 + for (rem_sz = length, cur = d8; rem_sz; 210 + rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) { 211 + ret = burst_update(desc, cur, size); 212 + if (ret) 213 + return ret; 214 + } 200 215 201 216 return 0; 202 217 } ··· 340 299 pm_runtime_set_active(dev); 341 300 pm_runtime_irq_safe(dev); 342 301 pm_runtime_enable(dev); 302 + 303 + spin_lock_init(&crc->lock); 343 304 344 305 platform_set_drvdata(pdev, crc); 345 306