Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - Add API's to allocate/free Job Rings

With each of the Job Ring available as a platform device, the
Job Ring driver needs to take care of allocation/deallocation
of the Job Rings to the above interface layers. Added APIs
in Job Ring Driver to allocate/free Job rings

Signed-off-by: Ruchika Gupta <ruchika.gupta@freescale.com>
Reviewed-by: Garg Vakul-B16394 <vakul@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ruchika Gupta and committed by
Herbert Xu
07defbfb 313ea293

+62 -3
+3
drivers/crypto/caam/intern.h
··· 44 44 struct tasklet_struct irqtask; 45 45 int irq; /* One per queue */ 46 46 47 + /* Number of scatterlist crypt transforms active on the JobR */ 48 + atomic_t tfm_count ____cacheline_aligned; 49 + 47 50 /* Job ring info */ 48 51 int ringsize; /* Size of rings (assume input = output) */ 49 52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
+57 -3
drivers/crypto/caam/jr.c
··· 97 97 jrpriv = dev_get_drvdata(jrdev); 98 98 99 99 /* 100 - * Make sure ring is empty before release 100 + * Return EBUSY if job ring already allocated. 101 101 */ 102 - if (rd_reg32(&jrpriv->rregs->outring_used) || 103 - (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) { 102 + if (atomic_read(&jrpriv->tfm_count)) { 104 103 dev_err(jrdev, "Device is busy\n"); 105 104 return -EBUSY; 106 105 } ··· 231 232 /* reenable / unmask IRQs */ 232 233 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 233 234 } 235 + 236 + /** 237 + * caam_jr_alloc() - Alloc a job ring for someone to use as needed. 238 + * 239 + * returns : pointer to the newly allocated physical 240 + * JobR dev can be written to if successful. 241 + **/ 242 + struct device *caam_jr_alloc(void) 243 + { 244 + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; 245 + struct device *dev = NULL; 246 + int min_tfm_cnt = INT_MAX; 247 + int tfm_cnt; 248 + 249 + spin_lock(&driver_data.jr_alloc_lock); 250 + 251 + if (list_empty(&driver_data.jr_list)) { 252 + spin_unlock(&driver_data.jr_alloc_lock); 253 + return ERR_PTR(-ENODEV); 254 + } 255 + 256 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { 257 + tfm_cnt = atomic_read(&jrpriv->tfm_count); 258 + if (tfm_cnt < min_tfm_cnt) { 259 + min_tfm_cnt = tfm_cnt; 260 + min_jrpriv = jrpriv; 261 + } 262 + if (!min_tfm_cnt) 263 + break; 264 + } 265 + 266 + if (min_jrpriv) { 267 + atomic_inc(&min_jrpriv->tfm_count); 268 + dev = min_jrpriv->dev; 269 + } 270 + spin_unlock(&driver_data.jr_alloc_lock); 271 + 272 + return dev; 273 + } 274 + EXPORT_SYMBOL(caam_jr_alloc); 275 + 276 + /** 277 + * caam_jr_free() - Free the Job Ring 278 + * @rdev - points to the dev that identifies the Job ring to 279 + * be released. 280 + **/ 281 + void caam_jr_free(struct device *rdev) 282 + { 283 + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 284 + 285 + atomic_dec(&jrpriv->tfm_count); 286 + } 287 + EXPORT_SYMBOL(caam_jr_free); 234 288 235 289 /** 236 290 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, ··· 493 441 spin_lock(&driver_data.jr_alloc_lock); 494 442 list_add_tail(&jrpriv->list_node, &driver_data.jr_list); 495 443 spin_unlock(&driver_data.jr_alloc_lock); 444 + 445 + atomic_set(&jrpriv->tfm_count, 0); 496 446 497 447 return 0; 498 448 }
+2
drivers/crypto/caam/jr.h
··· 8 8 #define JR_H 9 9 10 10 /* Prototypes for backend-level services exposed to APIs */ 11 + struct device *caam_jr_alloc(void); 12 + void caam_jr_free(struct device *rdev); 11 13 int caam_jr_enqueue(struct device *dev, u32 *desc, 12 14 void (*cbk)(struct device *dev, u32 *desc, u32 status, 13 15 void *areq),