Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: iaa - Add Intel IAA Compression Accelerator crypto driver core

The Intel Analytics Accelerator (IAA) is a hardware accelerator that
provides very high thoughput compression/decompression compatible with
the DEFLATE compression standard described in RFC 1951, which is the
compression/decompression algorithm exported by this module.

Users can select IAA compress/decompress acceleration by specifying
one of the deflate-iaa* algorithms as the compression algorithm to use
by whatever facility allows asynchronous compression algorithms to be
selected.

For example, zswap can select the IAA fixed deflate algorithm
'deflate-iaa' via:

# echo deflate-iaa > /sys/module/zswap/parameters/compressor

This patch adds iaa_crypto as an idxd sub-driver and tracks iaa
devices and workqueues as they are probed or removed.

[ Based on work originally by George Powley, Jing Lin and Kyung Min
Park ]

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Tom Zanussi and committed by
Herbert Xu
ea7a5cbb 8ccc257b

+382
+7
MAINTAINERS
··· 10719 10719 Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 10720 10720 F: drivers/dma/ioat* 10721 10721 10722 + INTEL IAA CRYPTO DRIVER 10723 + M: Tom Zanussi <tom.zanussi@linux.intel.com> 10724 + L: linux-crypto@vger.kernel.org 10725 + S: Supported 10726 + F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst 10727 + F: drivers/crypto/intel/iaa/* 10728 + 10722 10729 INTEL IDLE DRIVER 10723 10730 M: Jacob Pan <jacob.jun.pan@linux.intel.com> 10724 10731 M: Len Brown <lenb@kernel.org>
+1
drivers/crypto/intel/Kconfig
··· 3 3 source "drivers/crypto/intel/keembay/Kconfig" 4 4 source "drivers/crypto/intel/ixp4xx/Kconfig" 5 5 source "drivers/crypto/intel/qat/Kconfig" 6 + source "drivers/crypto/intel/iaa/Kconfig"
+1
drivers/crypto/intel/Makefile
··· 3 3 obj-y += keembay/ 4 4 obj-y += ixp4xx/ 5 5 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ 6 + obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) += iaa/
+10
drivers/crypto/intel/iaa/Kconfig
··· 1 + config CRYPTO_DEV_IAA_CRYPTO 2 + tristate "Support for Intel(R) IAA Compression Accelerator" 3 + depends on CRYPTO_DEFLATE 4 + depends on INTEL_IDXD 5 + default n 6 + help 7 + This driver supports acceleration for compression and 8 + decompression with the Intel Analytics Accelerator (IAA) 9 + hardware using the cryptographic API. If you choose 'M' 10 + here, the module will be called iaa_crypto.
+10
drivers/crypto/intel/iaa/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Makefile for IAA crypto device drivers 4 + # 5 + 6 + ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD 7 + 8 + obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o 9 + 10 + iaa_crypto-y := iaa_crypto_main.o
+30
drivers/crypto/intel/iaa/iaa_crypto.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ 3 + 4 + #ifndef __IAA_CRYPTO_H__ 5 + #define __IAA_CRYPTO_H__ 6 + 7 + #include <linux/crypto.h> 8 + #include <linux/idxd.h> 9 + #include <uapi/linux/idxd.h> 10 + 11 + #define IDXD_SUBDRIVER_NAME "crypto" 12 + 13 + /* Representation of IAA workqueue */ 14 + struct iaa_wq { 15 + struct list_head list; 16 + struct idxd_wq *wq; 17 + 18 + struct iaa_device *iaa_device; 19 + }; 20 + 21 + /* Representation of IAA device with wqs, populated by probe */ 22 + struct iaa_device { 23 + struct list_head list; 24 + struct idxd_device *idxd; 25 + 26 + int n_wq; 27 + struct list_head wqs; 28 + }; 29 + 30 + #endif
+323
drivers/crypto/intel/iaa/iaa_crypto_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ 3 + 4 + #include <linux/init.h> 5 + #include <linux/kernel.h> 6 + #include <linux/module.h> 7 + #include <linux/pci.h> 8 + #include <linux/device.h> 9 + #include <linux/iommu.h> 10 + #include <uapi/linux/idxd.h> 11 + #include <linux/highmem.h> 12 + #include <linux/sched/smt.h> 13 + 14 + #include "idxd.h" 15 + #include "iaa_crypto.h" 16 + 17 + #ifdef pr_fmt 18 + #undef pr_fmt 19 + #endif 20 + 21 + #define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt 22 + 23 + /* number of iaa instances probed */ 24 + static unsigned int nr_iaa; 25 + 26 + static LIST_HEAD(iaa_devices); 27 + static DEFINE_MUTEX(iaa_devices_lock); 28 + 29 + static struct iaa_device *iaa_device_alloc(void) 30 + { 31 + struct iaa_device *iaa_device; 32 + 33 + iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL); 34 + if (!iaa_device) 35 + return NULL; 36 + 37 + INIT_LIST_HEAD(&iaa_device->wqs); 38 + 39 + return iaa_device; 40 + } 41 + 42 + static void iaa_device_free(struct iaa_device *iaa_device) 43 + { 44 + struct iaa_wq *iaa_wq, *next; 45 + 46 + list_for_each_entry_safe(iaa_wq, next, &iaa_device->wqs, list) { 47 + list_del(&iaa_wq->list); 48 + kfree(iaa_wq); 49 + } 50 + 51 + kfree(iaa_device); 52 + } 53 + 54 + static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 55 + { 56 + struct iaa_wq *iaa_wq; 57 + 58 + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 59 + if (iaa_wq->wq == wq) 60 + return true; 61 + } 62 + 63 + return false; 64 + } 65 + 66 + static struct iaa_device *add_iaa_device(struct idxd_device *idxd) 67 + { 68 + struct iaa_device *iaa_device; 69 + 70 + iaa_device = iaa_device_alloc(); 71 + if (!iaa_device) 72 + return NULL; 73 + 74 + iaa_device->idxd = idxd; 75 + 76 + list_add_tail(&iaa_device->list, &iaa_devices); 77 + 78 + nr_iaa++; 79 + 80 + return iaa_device; 81 + } 82 + 83 + static void del_iaa_device(struct iaa_device *iaa_device) 84 + { 85 + list_del(&iaa_device->list); 86 + 87 + iaa_device_free(iaa_device); 88 + 89 + nr_iaa--; 90 + } 91 + 92 + static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, 93 + struct iaa_wq **new_wq) 94 + { 95 + struct idxd_device *idxd = iaa_device->idxd; 96 + struct pci_dev *pdev = idxd->pdev; 97 + struct device *dev = &pdev->dev; 98 + struct iaa_wq *iaa_wq; 99 + 100 + iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL); 101 + if (!iaa_wq) 102 + return -ENOMEM; 103 + 104 + iaa_wq->wq = wq; 105 + iaa_wq->iaa_device = iaa_device; 106 + idxd_wq_set_private(wq, iaa_wq); 107 + 108 + list_add_tail(&iaa_wq->list, &iaa_device->wqs); 109 + 110 + iaa_device->n_wq++; 111 + 112 + if (new_wq) 113 + *new_wq = iaa_wq; 114 + 115 + dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n", 116 + wq->id, iaa_device->idxd->id, iaa_device->n_wq); 117 + 118 + return 0; 119 + } 120 + 121 + static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 122 + { 123 + struct idxd_device *idxd = iaa_device->idxd; 124 + struct pci_dev *pdev = idxd->pdev; 125 + struct device *dev = &pdev->dev; 126 + struct iaa_wq *iaa_wq; 127 + 128 + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 129 + if (iaa_wq->wq == wq) { 130 + list_del(&iaa_wq->list); 131 + iaa_device->n_wq--; 132 + 133 + dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n", 134 + wq->id, iaa_device->idxd->id, 135 + iaa_device->n_wq, nr_iaa); 136 + 137 + if (iaa_device->n_wq == 0) 138 + del_iaa_device(iaa_device); 139 + break; 140 + } 141 + } 142 + } 143 + 144 + static int save_iaa_wq(struct idxd_wq *wq) 145 + { 146 + struct iaa_device *iaa_device, *found = NULL; 147 + struct idxd_device *idxd; 148 + struct pci_dev *pdev; 149 + struct device *dev; 150 + int ret = 0; 151 + 152 + list_for_each_entry(iaa_device, &iaa_devices, list) { 153 + if (iaa_device->idxd == wq->idxd) { 154 + idxd = iaa_device->idxd; 155 + pdev = idxd->pdev; 156 + dev = &pdev->dev; 157 + /* 158 + * Check to see that we don't already have this wq. 159 + * Shouldn't happen but we don't control probing. 160 + */ 161 + if (iaa_has_wq(iaa_device, wq)) { 162 + dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n", 163 + iaa_device); 164 + goto out; 165 + } 166 + 167 + found = iaa_device; 168 + 169 + ret = add_iaa_wq(iaa_device, wq, NULL); 170 + if (ret) 171 + goto out; 172 + 173 + break; 174 + } 175 + } 176 + 177 + if (!found) { 178 + struct iaa_device *new_device; 179 + struct iaa_wq *new_wq; 180 + 181 + new_device = add_iaa_device(wq->idxd); 182 + if (!new_device) { 183 + ret = -ENOMEM; 184 + goto out; 185 + } 186 + 187 + ret = add_iaa_wq(new_device, wq, &new_wq); 188 + if (ret) { 189 + del_iaa_device(new_device); 190 + goto out; 191 + } 192 + } 193 + 194 + if (WARN_ON(nr_iaa == 0)) 195 + return -EINVAL; 196 + out: 197 + return 0; 198 + } 199 + 200 + static void remove_iaa_wq(struct idxd_wq *wq) 201 + { 202 + struct iaa_device *iaa_device; 203 + 204 + list_for_each_entry(iaa_device, &iaa_devices, list) { 205 + if (iaa_has_wq(iaa_device, wq)) { 206 + del_iaa_wq(iaa_device, wq); 207 + break; 208 + } 209 + } 210 + } 211 + 212 + static int iaa_crypto_probe(struct idxd_dev *idxd_dev) 213 + { 214 + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 215 + struct idxd_device *idxd = wq->idxd; 216 + struct idxd_driver_data *data = idxd->data; 217 + struct device *dev = &idxd_dev->conf_dev; 218 + int ret = 0; 219 + 220 + if (idxd->state != IDXD_DEV_ENABLED) 221 + return -ENXIO; 222 + 223 + if (data->type != IDXD_TYPE_IAX) 224 + return -ENODEV; 225 + 226 + mutex_lock(&wq->wq_lock); 227 + 228 + if (!idxd_wq_driver_name_match(wq, dev)) { 229 + dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", 230 + idxd->id, wq->id, wq->driver_name, dev->driver->name); 231 + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 232 + ret = -ENODEV; 233 + goto err; 234 + } 235 + 236 + wq->type = IDXD_WQT_KERNEL; 237 + 238 + ret = idxd_drv_enable_wq(wq); 239 + if (ret < 0) { 240 + dev_dbg(dev, "enable wq %d.%d failed: %d\n", 241 + idxd->id, wq->id, ret); 242 + ret = -ENXIO; 243 + goto err; 244 + } 245 + 246 + mutex_lock(&iaa_devices_lock); 247 + 248 + ret = save_iaa_wq(wq); 249 + if (ret) 250 + goto err_save; 251 + 252 + mutex_unlock(&iaa_devices_lock); 253 + out: 254 + mutex_unlock(&wq->wq_lock); 255 + 256 + return ret; 257 + 258 + err_save: 259 + idxd_drv_disable_wq(wq); 260 + err: 261 + wq->type = IDXD_WQT_NONE; 262 + 263 + goto out; 264 + } 265 + 266 + static void iaa_crypto_remove(struct idxd_dev *idxd_dev) 267 + { 268 + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 269 + 270 + idxd_wq_quiesce(wq); 271 + 272 + mutex_lock(&wq->wq_lock); 273 + mutex_lock(&iaa_devices_lock); 274 + 275 + remove_iaa_wq(wq); 276 + idxd_drv_disable_wq(wq); 277 + 278 + mutex_unlock(&iaa_devices_lock); 279 + mutex_unlock(&wq->wq_lock); 280 + } 281 + 282 + static enum idxd_dev_type dev_types[] = { 283 + IDXD_DEV_WQ, 284 + IDXD_DEV_NONE, 285 + }; 286 + 287 + static struct idxd_device_driver iaa_crypto_driver = { 288 + .probe = iaa_crypto_probe, 289 + .remove = iaa_crypto_remove, 290 + .name = IDXD_SUBDRIVER_NAME, 291 + .type = dev_types, 292 + }; 293 + 294 + static int __init iaa_crypto_init_module(void) 295 + { 296 + int ret = 0; 297 + 298 + ret = idxd_driver_register(&iaa_crypto_driver); 299 + if (ret) { 300 + pr_debug("IAA wq sub-driver registration failed\n"); 301 + goto out; 302 + } 303 + 304 + pr_debug("initialized\n"); 305 + out: 306 + return ret; 307 + } 308 + 309 + static void __exit iaa_crypto_cleanup_module(void) 310 + { 311 + idxd_driver_unregister(&iaa_crypto_driver); 312 + 313 + pr_debug("cleaned up\n"); 314 + } 315 + 316 + MODULE_IMPORT_NS(IDXD); 317 + MODULE_LICENSE("GPL"); 318 + MODULE_ALIAS_IDXD_DEVICE(0); 319 + MODULE_AUTHOR("Intel Corporation"); 320 + MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver"); 321 + 322 + module_init(iaa_crypto_init_module); 323 + module_exit(iaa_crypto_cleanup_module);