Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxl: Refactor cxl_load_segment() and find_free_sste()

This moves the segment table hash calculation from cxl_load_segment()
into find_free_sste() since that is the only place it is actually used.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Ian Munsie and committed by
Michael Ellerman
b03a7f57 5100a9d6

+18 -16
+18 -16
drivers/misc/cxl/fault.c
··· 21 21 22 22 #include "cxl.h" 23 23 24 - static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, 25 - unsigned int *lru) 24 + /* This finds a free SSTE for the given SLB */ 25 + static struct cxl_sste* find_free_sste(struct cxl_context *ctx, 26 + struct copro_slb *slb) 26 27 { 28 + struct cxl_sste *primary, *sste; 29 + unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ 27 30 unsigned int entry; 28 - struct cxl_sste *sste, *group = primary_group; 31 + unsigned int hash; 29 32 30 - for (entry = 0; entry < 8; entry++) { 31 - sste = group + entry; 33 + if (slb->vsid & SLB_VSID_B_1T) 34 + hash = (slb->esid >> SID_SHIFT_1T) & mask; 35 + else /* 256M */ 36 + hash = (slb->esid >> SID_SHIFT) & mask; 37 + 38 + primary = ctx->sstp + (hash << 3); 39 + 40 + for (entry = 0, sste = primary; entry < 8; entry++, sste++) { 32 41 if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) 33 42 return sste; 34 43 } 44 + 35 45 /* Nothing free, select an entry to cast out */ 36 - sste = primary_group + *lru; 37 - *lru = (*lru + 1) & 0x7; 46 + sste = primary + ctx->sst_lru; 47 + ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; 38 48 39 49 return sste; 40 50 } ··· 52 42 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) 53 43 { 54 44 /* mask is the group index, we search primary and secondary here. */ 55 - unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */ 56 45 struct cxl_sste *sste; 57 - unsigned int hash; 58 46 unsigned long flags; 59 47 60 - 61 - if (slb->vsid & SLB_VSID_B_1T) 62 - hash = (slb->esid >> SID_SHIFT_1T) & mask; 63 - else /* 256M */ 64 - hash = (slb->esid >> SID_SHIFT) & mask; 65 - 66 48 spin_lock_irqsave(&ctx->sste_lock, flags); 67 - sste = find_free_sste(ctx->sstp + (hash << 3), &ctx->sst_lru); 49 + sste = find_free_sste(ctx, slb); 68 50 69 51 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 70 52 sste - ctx->sstp, slb->vsid, slb->esid);