Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: add support for readonly config mode

The read-only configuration mode is defined by the DSA spec as a mode of
the device WQ configuration. When GENCAP register bit 31 is set to 0,
the device is in RO mode and group configuration and some fields of the
workqueue configuration registers are read-only and reflect the fixed
configuration of the device. Add support for RO mode. The driver will
load the values from the registers directly setup all the internally
cached data structures based on the device configuration.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/161894438847.3202472.6317563824045432727.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
8c66bbdc 93a40a6d

+137 -8
+116
drivers/dma/idxd/device.c
··· 884 884 885 885 return 0; 886 886 } 887 + 888 + static int idxd_wq_load_config(struct idxd_wq *wq) 889 + { 890 + struct idxd_device *idxd = wq->idxd; 891 + struct device *dev = &idxd->pdev->dev; 892 + int wqcfg_offset; 893 + int i; 894 + 895 + wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); 896 + memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); 897 + 898 + wq->size = wq->wqcfg->wq_size; 899 + wq->threshold = wq->wqcfg->wq_thresh; 900 + if (wq->wqcfg->priv) 901 + wq->type = IDXD_WQT_KERNEL; 902 + 903 + /* The driver does not support shared WQ mode in read-only config yet */ 904 + if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) 905 + return -EOPNOTSUPP; 906 + 907 + set_bit(WQ_FLAG_DEDICATED, &wq->flags); 908 + 909 + wq->priority = wq->wqcfg->priority; 910 + 911 + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 912 + wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); 913 + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); 914 + } 915 + 916 + return 0; 917 + } 918 + 919 + static void idxd_group_load_config(struct idxd_group *group) 920 + { 921 + struct idxd_device *idxd = group->idxd; 922 + struct device *dev = &idxd->pdev->dev; 923 + int i, j, grpcfg_offset; 924 + 925 + /* 926 + * Load WQS bit fields 927 + * Iterate through all 256 bits 64 bits at a time 928 + */ 929 + for (i = 0; i < GRPWQCFG_STRIDES; i++) { 930 + struct idxd_wq *wq; 931 + 932 + grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); 933 + group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); 934 + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 935 + group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); 936 + 937 + if (i * 64 >= idxd->max_wqs) 938 + break; 939 + 940 + /* Iterate through all 64 bits and check for wq set */ 941 + for (j = 0; j < 64; j++) { 942 + int id = i * 64 + j; 943 + 944 + /* No need to check beyond max wqs */ 945 + if (id >= idxd->max_wqs) 946 + break; 947 + 948 + /* Set group assignment for wq if wq bit is set */ 949 + if (group->grpcfg.wqs[i] & BIT(j)) { 950 + wq = idxd->wqs[id]; 951 + wq->group = group; 952 + } 953 + } 954 + } 955 + 956 + grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); 957 + group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); 958 + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 959 + grpcfg_offset, group->grpcfg.engines); 960 + 961 + /* Iterate through all 64 bits to check engines set */ 962 + for (i = 0; i < 64; i++) { 963 + if (i >= idxd->max_engines) 964 + break; 965 + 966 + if (group->grpcfg.engines & BIT(i)) { 967 + struct idxd_engine *engine = idxd->engines[i]; 968 + 969 + engine->group = group; 970 + } 971 + } 972 + 973 + grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); 974 + group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset); 975 + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 976 + group->id, grpcfg_offset, group->grpcfg.flags.bits); 977 + } 978 + 979 + int idxd_device_load_config(struct idxd_device *idxd) 980 + { 981 + union gencfg_reg reg; 982 + int i, rc; 983 + 984 + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 985 + idxd->token_limit = reg.token_limit; 986 + 987 + for (i = 0; i < idxd->max_groups; i++) { 988 + struct idxd_group *group = idxd->groups[i]; 989 + 990 + idxd_group_load_config(group); 991 + } 992 + 993 + for (i = 0; i < idxd->max_wqs; i++) { 994 + struct idxd_wq *wq = idxd->wqs[i]; 995 + 996 + rc = idxd_wq_load_config(wq); 997 + if (rc < 0) 998 + return rc; 999 + } 1000 + 1001 + return 0; 1002 + }
+1
drivers/dma/idxd/idxd.h
··· 384 384 int idxd_device_config(struct idxd_device *idxd); 385 385 void idxd_device_wqs_clear_state(struct idxd_device *idxd); 386 386 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid); 387 + int idxd_device_load_config(struct idxd_device *idxd); 387 388 388 389 /* work queue control */ 389 390 int idxd_wq_alloc_resources(struct idxd_wq *wq);
+8
drivers/dma/idxd/init.c
··· 482 482 if (rc) 483 483 goto err; 484 484 485 + /* If the configs are readonly, then load them from device */ 486 + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 487 + dev_dbg(dev, "Loading RO device config\n"); 488 + rc = idxd_device_load_config(idxd); 489 + if (rc < 0) 490 + goto err; 491 + } 492 + 485 493 rc = idxd_setup_interrupts(idxd); 486 494 if (rc) 487 495 goto err;
+12 -8
drivers/dma/idxd/sysfs.c
··· 110 110 } 111 111 112 112 spin_lock_irqsave(&idxd->dev_lock, flags); 113 - rc = idxd_device_config(idxd); 113 + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 114 + rc = idxd_device_config(idxd); 114 115 spin_unlock_irqrestore(&idxd->dev_lock, flags); 115 116 if (rc < 0) { 116 117 mutex_unlock(&wq->wq_lock); ··· 171 170 172 171 static int idxd_config_bus_probe(struct device *dev) 173 172 { 174 - int rc; 173 + int rc = 0; 175 174 unsigned long flags; 176 175 177 176 dev_dbg(dev, "%s called\n", __func__); ··· 189 188 190 189 /* Perform IDXD configuration and enabling */ 191 190 spin_lock_irqsave(&idxd->dev_lock, flags); 192 - rc = idxd_device_config(idxd); 191 + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 192 + rc = idxd_device_config(idxd); 193 193 spin_unlock_irqrestore(&idxd->dev_lock, flags); 194 194 if (rc < 0) { 195 195 module_put(THIS_MODULE); ··· 289 287 290 288 idxd_unregister_dma_device(idxd); 291 289 rc = idxd_device_disable(idxd); 292 - for (i = 0; i < idxd->max_wqs; i++) { 293 - struct idxd_wq *wq = idxd->wqs[i]; 290 + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 291 + for (i = 0; i < idxd->max_wqs; i++) { 292 + struct idxd_wq *wq = idxd->wqs[i]; 294 293 295 - mutex_lock(&wq->wq_lock); 296 - idxd_wq_disable_cleanup(wq); 297 - mutex_unlock(&wq->wq_lock); 294 + mutex_lock(&wq->wq_lock); 295 + idxd_wq_disable_cleanup(wq); 296 + mutex_unlock(&wq->wq_lock); 297 + } 298 298 } 299 299 module_put(THIS_MODULE); 300 300 if (rc < 0)