Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: chcr - Select device in Round Robin fashion

When multiple devices are present in system select device
in round-robin fashion for crypto operations

Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Reviewed-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Harsh Jain and committed by
Herbert Xu
14c19b17 738bff48

+44 -21
+4 -4
drivers/crypto/chelsio/chcr_algo.c
··· 1216 1216 1217 1217 static int chcr_device_init(struct chcr_context *ctx) 1218 1218 { 1219 - struct uld_ctx *u_ctx; 1219 + struct uld_ctx *u_ctx = NULL; 1220 1220 struct adapter *adap; 1221 1221 unsigned int id; 1222 1222 int txq_perchan, txq_idx, ntxq; ··· 1224 1224 1225 1225 id = smp_processor_id(); 1226 1226 if (!ctx->dev) { 1227 - err = assign_chcr_device(&ctx->dev); 1228 - if (err) { 1227 + u_ctx = assign_chcr_device(); 1228 + if (!u_ctx) { 1229 1229 pr_err("chcr device assignment fails\n"); 1230 1230 goto out; 1231 1231 } 1232 - u_ctx = ULD_CTX(ctx); 1232 + ctx->dev = u_ctx->dev; 1233 1233 adap = padap(ctx->dev); 1234 1234 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, 1235 1235 adap->vres.ncrypto_fc);
+37 -16
drivers/crypto/chelsio/chcr_core.c
··· 29 29 static LIST_HEAD(uld_ctx_list); 30 30 static DEFINE_MUTEX(dev_mutex); 31 31 static atomic_t dev_count; 32 + static struct uld_ctx *ctx_rr; 32 33 33 34 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); 34 35 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); ··· 50 49 .rx_handler = chcr_uld_rx_handler, 51 50 }; 52 51 53 - int assign_chcr_device(struct chcr_dev **dev) 52 + struct uld_ctx *assign_chcr_device(void) 54 53 { 55 - struct uld_ctx *u_ctx; 56 - int ret = -ENXIO; 54 + struct uld_ctx *u_ctx = NULL; 57 55 58 56 /* 59 - * Which device to use if multiple devices are available TODO 60 - * May be select the device based on round robin. One session 61 - * must go to the same device to maintain the ordering. 57 + * When multiple devices are present in system select 58 + * device in round-robin fashion for crypto operations 59 + * Although One session must use the same device to 60 + * maintain request-response ordering. 62 61 */ 63 - mutex_lock(&dev_mutex); /* TODO ? */ 64 - list_for_each_entry(u_ctx, &uld_ctx_list, entry) 65 - if (u_ctx->dev) { 66 - *dev = u_ctx->dev; 67 - ret = 0; 68 - break; 62 + mutex_lock(&dev_mutex); 63 + if (!list_empty(&uld_ctx_list)) { 64 + u_ctx = ctx_rr; 65 + if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) 66 + ctx_rr = list_first_entry(&uld_ctx_list, 67 + struct uld_ctx, 68 + entry); 69 + else 70 + ctx_rr = list_next_entry(ctx_rr, entry); 69 71 } 70 72 mutex_unlock(&dev_mutex); 71 - return ret; 73 + return u_ctx; 72 74 } 73 75 74 76 static int chcr_dev_add(struct uld_ctx *u_ctx) ··· 86 82 u_ctx->dev = dev; 87 83 dev->u_ctx = u_ctx; 88 84 atomic_inc(&dev_count); 85 + mutex_lock(&dev_mutex); 86 + list_add_tail(&u_ctx->entry, &uld_ctx_list); 87 + if (!ctx_rr) 88 + ctx_rr = u_ctx; 89 + mutex_unlock(&dev_mutex); 89 90 return 0; 90 91 } 91 92 92 93 static int chcr_dev_remove(struct uld_ctx *u_ctx) 93 94 { 95 + if (ctx_rr == u_ctx) { 96 + if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) 97 + ctx_rr = list_first_entry(&uld_ctx_list, 98 + struct uld_ctx, 99 + entry); 100 + else 101 + ctx_rr = list_next_entry(ctx_rr, entry); 102 + } 103 + list_del(&u_ctx->entry); 104 + if (list_empty(&uld_ctx_list)) 105 + ctx_rr = NULL; 94 106 kfree(u_ctx->dev); 95 107 u_ctx->dev = NULL; 96 108 atomic_dec(&dev_count); ··· 159 139 u_ctx = ERR_PTR(-ENOMEM); 160 140 goto out; 161 141 } 142 + if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) { 143 + u_ctx = ERR_PTR(-ENOMEM); 144 + goto out; 145 + } 162 146 u_ctx->lldi = *lld; 163 - mutex_lock(&dev_mutex); 164 - list_add_tail(&u_ctx->entry, &uld_ctx_list); 165 - mutex_unlock(&dev_mutex); 166 147 out: 167 148 return u_ctx; 168 149 }
+1 -1
drivers/crypto/chelsio/chcr_core.h
··· 89 89 struct chcr_dev *dev; 90 90 }; 91 91 92 - int assign_chcr_device(struct chcr_dev **dev); 92 + struct uld_ctx * assign_chcr_device(void); 93 93 int chcr_send_wr(struct sk_buff *skb); 94 94 int start_crypto(void); 95 95 int stop_crypto(void);
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 642 642 lld->sge_ingpadboundary = adap->sge.fl_align; 643 643 lld->sge_egrstatuspagesize = adap->sge.stat_len; 644 644 lld->sge_pktshift = adap->sge.pktshift; 645 + lld->ulp_crypto = adap->params.crypto; 645 646 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 646 647 lld->max_ordird_qp = adap->params.max_ordird_qp; 647 648 lld->max_ird_adapter = adap->params.max_ird_adapter;
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 331 331 unsigned int iscsi_tagmask; /* iscsi ddp tag mask */ 332 332 unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */ 333 333 unsigned int iscsi_llimit; /* chip's iscsi region llimit */ 334 + unsigned int ulp_crypto; /* crypto lookaside support */ 334 335 void **iscsi_ppm; /* iscsi page pod manager */ 335 336 int nodeid; /* device numa node id */ 336 337 bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */