Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.18-rc3 353 lines 9.2 kB view raw
1/* 2 * Provide TDMA helper functions used by cipher and hash algorithm 3 * implementations. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2 as published 13 * by the Free Software Foundation. 14 */ 15 16#include "cesa.h" 17 18bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, 19 struct mv_cesa_sg_dma_iter *sgiter, 20 unsigned int len) 21{ 22 if (!sgiter->sg) 23 return false; 24 25 sgiter->op_offset += len; 26 sgiter->offset += len; 27 if (sgiter->offset == sg_dma_len(sgiter->sg)) { 28 if (sg_is_last(sgiter->sg)) 29 return false; 30 sgiter->offset = 0; 31 sgiter->sg = sg_next(sgiter->sg); 32 } 33 34 if (sgiter->op_offset == iter->op_len) 35 return false; 36 37 return true; 38} 39 40void mv_cesa_dma_step(struct mv_cesa_req *dreq) 41{ 42 struct mv_cesa_engine *engine = dreq->engine; 43 44 writel_relaxed(0, engine->regs + CESA_SA_CFG); 45 46 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); 47 writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | 48 CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, 49 engine->regs + CESA_TDMA_CONTROL); 50 51 writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | 52 CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, 53 engine->regs + CESA_SA_CFG); 54 writel_relaxed(dreq->chain.first->cur_dma, 55 engine->regs + CESA_TDMA_NEXT_ADDR); 56 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 57 CESA_SA_CMD_EN_CESA_SA_ACCL0); 58 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 59} 60 61void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) 62{ 63 struct mv_cesa_tdma_desc *tdma; 64 65 for (tdma = dreq->chain.first; tdma;) { 66 struct mv_cesa_tdma_desc *old_tdma = tdma; 67 u32 type = tdma->flags & CESA_TDMA_TYPE_MSK; 68 69 if (type == CESA_TDMA_OP) 70 dma_pool_free(cesa_dev->dma->op_pool, tdma->op, 71 le32_to_cpu(tdma->src)); 72 73 tdma = tdma->next; 74 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, 75 old_tdma->cur_dma); 76 } 77 78 dreq->chain.first = NULL; 79 dreq->chain.last = NULL; 80} 81 82void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, 83 struct mv_cesa_engine *engine) 84{ 85 struct mv_cesa_tdma_desc *tdma; 86 87 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { 88 if (tdma->flags & CESA_TDMA_DST_IN_SRAM) 89 tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); 90 91 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) 92 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); 93 94 if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP) 95 mv_cesa_adjust_op(engine, tdma->op); 96 } 97} 98 99void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, 100 struct mv_cesa_req *dreq) 101{ 102 if (engine->chain.first == NULL && engine->chain.last == NULL) { 103 engine->chain.first = dreq->chain.first; 104 engine->chain.last = dreq->chain.last; 105 } else { 106 struct mv_cesa_tdma_desc *last; 107 108 last = engine->chain.last; 109 last->next = dreq->chain.first; 110 engine->chain.last = dreq->chain.last; 111 112 /* 113 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on 114 * the last element of the current chain, or if the request 115 * being queued needs the IV regs to be set before lauching 116 * the request. 117 */ 118 if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && 119 !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) 120 last->next_dma = dreq->chain.first->cur_dma; 121 } 122} 123 124int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) 125{ 126 struct crypto_async_request *req = NULL; 127 struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL; 128 dma_addr_t tdma_cur; 129 int res = 0; 130 131 tdma_cur = readl(engine->regs + CESA_TDMA_CUR); 132 133 for (tdma = engine->chain.first; tdma; tdma = next) { 134 spin_lock_bh(&engine->lock); 135 next = tdma->next; 136 spin_unlock_bh(&engine->lock); 137 138 if (tdma->flags & CESA_TDMA_END_OF_REQ) { 139 struct crypto_async_request *backlog = NULL; 140 struct mv_cesa_ctx *ctx; 141 u32 current_status; 142 143 spin_lock_bh(&engine->lock); 144 /* 145 * if req is NULL, this means we're processing the 146 * request in engine->req. 147 */ 148 if (!req) 149 req = engine->req; 150 else 151 req = mv_cesa_dequeue_req_locked(engine, 152 &backlog); 153 154 /* Re-chaining to the next request */ 155 engine->chain.first = tdma->next; 156 tdma->next = NULL; 157 158 /* If this is the last request, clear the chain */ 159 if (engine->chain.first == NULL) 160 engine->chain.last = NULL; 161 spin_unlock_bh(&engine->lock); 162 163 ctx = crypto_tfm_ctx(req->tfm); 164 current_status = (tdma->cur_dma == tdma_cur) ? 165 status : CESA_SA_INT_ACC0_IDMA_DONE; 166 res = ctx->ops->process(req, current_status); 167 ctx->ops->complete(req); 168 169 if (res == 0) 170 mv_cesa_engine_enqueue_complete_request(engine, 171 req); 172 173 if (backlog) 174 backlog->complete(backlog, -EINPROGRESS); 175 } 176 177 if (res || tdma->cur_dma == tdma_cur) 178 break; 179 } 180 181 /* Save the last request in error to engine->req, so that the core 182 * knows which request was fautly */ 183 if (res) { 184 spin_lock_bh(&engine->lock); 185 engine->req = req; 186 spin_unlock_bh(&engine->lock); 187 } 188 189 return res; 190} 191 192static struct mv_cesa_tdma_desc * 193mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) 194{ 195 struct mv_cesa_tdma_desc *new_tdma = NULL; 196 dma_addr_t dma_handle; 197 198 new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags, 199 &dma_handle); 200 if (!new_tdma) 201 return ERR_PTR(-ENOMEM); 202 203 new_tdma->cur_dma = dma_handle; 204 if (chain->last) { 205 chain->last->next_dma = cpu_to_le32(dma_handle); 206 chain->last->next = new_tdma; 207 } else { 208 chain->first = new_tdma; 209 } 210 211 chain->last = new_tdma; 212 213 return new_tdma; 214} 215 216int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, 217 u32 size, u32 flags, gfp_t gfp_flags) 218{ 219 struct mv_cesa_tdma_desc *tdma, *op_desc; 220 221 tdma = mv_cesa_dma_add_desc(chain, gfp_flags); 222 if (IS_ERR(tdma)) 223 return PTR_ERR(tdma); 224 225 /* We re-use an existing op_desc object to retrieve the context 226 * and result instead of allocating a new one. 227 * There is at least one object of this type in a CESA crypto 228 * req, just pick the first one in the chain. 229 */ 230 for (op_desc = chain->first; op_desc; op_desc = op_desc->next) { 231 u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK; 232 233 if (type == CESA_TDMA_OP) 234 break; 235 } 236 237 if (!op_desc) 238 return -EIO; 239 240 tdma->byte_cnt = cpu_to_le32(size | BIT(31)); 241 tdma->src = src; 242 tdma->dst = op_desc->src; 243 tdma->op = op_desc->op; 244 245 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); 246 tdma->flags = flags | CESA_TDMA_RESULT; 247 return 0; 248} 249 250struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, 251 const struct mv_cesa_op_ctx *op_templ, 252 bool skip_ctx, 253 gfp_t flags) 254{ 255 struct mv_cesa_tdma_desc *tdma; 256 struct mv_cesa_op_ctx *op; 257 dma_addr_t dma_handle; 258 unsigned int size; 259 260 tdma = mv_cesa_dma_add_desc(chain, flags); 261 if (IS_ERR(tdma)) 262 return ERR_CAST(tdma); 263 264 op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); 265 if (!op) 266 return ERR_PTR(-ENOMEM); 267 268 *op = *op_templ; 269 270 size = skip_ctx ? sizeof(op->desc) : sizeof(*op); 271 272 tdma = chain->last; 273 tdma->op = op; 274 tdma->byte_cnt = cpu_to_le32(size | BIT(31)); 275 tdma->src = cpu_to_le32(dma_handle); 276 tdma->dst = CESA_SA_CFG_SRAM_OFFSET; 277 tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; 278 279 return op; 280} 281 282int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, 283 dma_addr_t dst, dma_addr_t src, u32 size, 284 u32 flags, gfp_t gfp_flags) 285{ 286 struct mv_cesa_tdma_desc *tdma; 287 288 tdma = mv_cesa_dma_add_desc(chain, gfp_flags); 289 if (IS_ERR(tdma)) 290 return PTR_ERR(tdma); 291 292 tdma->byte_cnt = cpu_to_le32(size | BIT(31)); 293 tdma->src = src; 294 tdma->dst = dst; 295 296 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); 297 tdma->flags = flags | CESA_TDMA_DATA; 298 299 return 0; 300} 301 302int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags) 303{ 304 struct mv_cesa_tdma_desc *tdma; 305 306 tdma = mv_cesa_dma_add_desc(chain, flags); 307 return PTR_ERR_OR_ZERO(tdma); 308} 309 310int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags) 311{ 312 struct mv_cesa_tdma_desc *tdma; 313 314 tdma = mv_cesa_dma_add_desc(chain, flags); 315 if (IS_ERR(tdma)) 316 return PTR_ERR(tdma); 317 318 tdma->byte_cnt = cpu_to_le32(BIT(31)); 319 320 return 0; 321} 322 323int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, 324 struct mv_cesa_dma_iter *dma_iter, 325 struct mv_cesa_sg_dma_iter *sgiter, 326 gfp_t gfp_flags) 327{ 328 u32 flags = sgiter->dir == DMA_TO_DEVICE ? 329 CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; 330 unsigned int len; 331 332 do { 333 dma_addr_t dst, src; 334 int ret; 335 336 len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); 337 if (sgiter->dir == DMA_TO_DEVICE) { 338 dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; 339 src = sg_dma_address(sgiter->sg) + sgiter->offset; 340 } else { 341 dst = sg_dma_address(sgiter->sg) + sgiter->offset; 342 src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; 343 } 344 345 ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, 346 flags, gfp_flags); 347 if (ret) 348 return ret; 349 350 } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); 351 352 return 0; 353}