Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - Add support for SRIOV

Add code that enables SRIOV on dh895xcc devices.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Tadeusz Struk and committed by
Herbert Xu
ed8ccaef a5733139

+1425 -93
+1
drivers/crypto/qat/qat_common/Makefile
··· 19 19 qat_hal.o 20 20 21 21 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o 22 + intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
+37
drivers/crypto/qat/qat_common/adf_accel_devices.h
··· 46 46 */ 47 47 #ifndef ADF_ACCEL_DEVICES_H_ 48 48 #define ADF_ACCEL_DEVICES_H_ 49 + #include <linux/interrupt.h> 49 50 #include <linux/module.h> 50 51 #include <linux/list.h> 51 52 #include <linux/io.h> 53 + #include <linux/ratelimit.h> 52 54 #include "adf_cfg_common.h" 53 55 54 56 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" 57 + #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" 55 58 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 59 + #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 56 60 #define ADF_PCI_MAX_BARS 3 57 61 #define ADF_DEVICE_NAME_LENGTH 32 58 62 #define ADF_ETR_MAX_RINGS_PER_BANK 16 ··· 83 79 struct adf_accel_msix { 84 80 struct msix_entry *entries; 85 81 char **names; 82 + u32 num_entries; 86 83 } __packed; 87 84 88 85 struct adf_accel_pci { ··· 104 99 DEV_SKU_2, 105 100 DEV_SKU_3, 106 101 DEV_SKU_4, 102 + DEV_SKU_VF, 107 103 DEV_SKU_UNKNOWN, 108 104 }; 109 105 ··· 119 113 return "SKU3"; 120 114 case DEV_SKU_4: 121 115 return "SKU4"; 116 + case DEV_SKU_VF: 117 + return "SKUVF"; 122 118 case DEV_SKU_UNKNOWN: 123 119 default: 124 120 break; ··· 148 140 uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); 149 141 uint32_t (*get_num_aes)(struct adf_hw_device_data *self); 150 142 uint32_t (*get_num_accels)(struct adf_hw_device_data *self); 143 + uint32_t (*get_pf2vf_offset)(uint32_t i); 144 + uint32_t (*get_vintmsk_offset)(uint32_t i); 151 145 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); 152 146 int (*alloc_irq)(struct adf_accel_dev *accel_dev); 153 147 void (*free_irq)(struct adf_accel_dev *accel_dev); ··· 161 151 void (*exit_arb)(struct adf_accel_dev *accel_dev); 162 152 void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, 163 153 const uint32_t **cfg); 154 + void (*disable_iov)(struct adf_accel_dev *accel_dev); 164 155 void (*enable_ints)(struct adf_accel_dev *accel_dev); 156 + int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); 165 157 const char *fw_name; 166 158 const char *fw_mmp_name; 167 159 uint32_t fuses; ··· 177 165 uint8_t num_accel; 178 166 uint8_t num_logical_accel; 179 167 uint8_t num_engines; 168 + uint8_t min_iov_compat_ver; 180 169 } __packed; 181 170 182 171 /* CSR write macro */ ··· 202 189 const struct firmware *mmp_fw; 203 190 }; 204 191 192 + struct adf_accel_vf_info { 193 + struct adf_accel_dev *accel_dev; 194 + struct tasklet_struct vf2pf_bh_tasklet; 195 + struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ 196 + struct ratelimit_state vf2pf_ratelimit; 197 + u32 vf_nr; 198 + bool init; 199 + }; 200 + 205 201 struct adf_accel_dev { 206 202 struct adf_etr_data *transport; 207 203 struct adf_hw_device_data *hw_device; ··· 224 202 struct list_head list; 225 203 struct module *owner; 226 204 struct adf_accel_pci accel_pci_dev; 205 + union { 206 + struct { 207 + /* vf_info is non-zero when SR-IOV is init'ed */ 208 + struct adf_accel_vf_info *vf_info; 209 + } pf; 210 + struct { 211 + char *irq_name; 212 + struct tasklet_struct pf2vf_bh_tasklet; 213 + struct mutex vf2pf_lock; /* protect CSR access */ 214 + struct completion iov_msg_completion; 215 + uint8_t compatible; 216 + uint8_t pf_version; 217 + } vf; 218 + }; 219 + bool is_vf; 227 220 uint8_t accel_id; 228 221 } __packed; 229 222 #endif
+3
drivers/crypto/qat/qat_common/adf_aer.c
··· 91 91 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", 92 92 accel_dev->accel_id); 93 93 94 + if (!parent) 95 + parent = pdev; 96 + 94 97 if (!pci_wait_for_pending_transaction(pdev)) 95 98 dev_info(&GET_DEV(accel_dev), 96 99 "Transaction still in progress. Proceeding\n");
+3
drivers/crypto/qat/qat_common/adf_cfg.c
··· 178 178 { 179 179 struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; 180 180 181 + if (!dev_cfg_data) 182 + return; 183 + 181 184 down_write(&dev_cfg_data->lock); 182 185 adf_cfg_section_del_all(&dev_cfg_data->sec_list); 183 186 up_write(&dev_cfg_data->lock);
+2 -1
drivers/crypto/qat/qat_common/adf_cfg_common.h
··· 60 60 #define ADF_CFG_NO_DEVICE 0xFF 61 61 #define ADF_CFG_AFFINITY_WHATEVER 0xFF 62 62 #define MAX_DEVICE_NAME_SIZE 32 63 - #define ADF_MAX_DEVICES 32 63 + #define ADF_MAX_DEVICES (32 * 32) 64 64 65 65 enum adf_cfg_val_type { 66 66 ADF_DEC, ··· 71 71 enum adf_device_type { 72 72 DEV_UNKNOWN = 0, 73 73 DEV_DH895XCC, 74 + DEV_DH895XCCVF, 74 75 }; 75 76 76 77 struct adf_dev_status_info {
+32 -5
drivers/crypto/qat/qat_common/adf_common_drv.h
··· 54 54 #include "icp_qat_hal.h" 55 55 56 56 #define ADF_MAJOR_VERSION 0 57 - #define ADF_MINOR_VERSION 1 58 - #define ADF_BUILD_VERSION 4 57 + #define ADF_MINOR_VERSION 2 58 + #define ADF_BUILD_VERSION 0 59 59 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ 60 60 __stringify(ADF_MINOR_VERSION) "." \ 61 61 __stringify(ADF_BUILD_VERSION) ··· 95 95 96 96 static inline int get_current_node(void) 97 97 { 98 - return cpu_data(current_thread_info()->cpu).phys_proc_id; 98 + return topology_physical_package_id(smp_processor_id()); 99 99 } 100 100 101 101 int adf_service_register(struct service_hndl *service); ··· 106 106 int adf_dev_stop(struct adf_accel_dev *accel_dev); 107 107 void adf_dev_shutdown(struct adf_accel_dev *accel_dev); 108 108 109 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 110 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); 111 + int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); 112 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); 113 + int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); 114 + void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); 115 + void adf_clean_vf_map(bool); 116 + 109 117 int adf_ctl_dev_register(void); 110 118 void adf_ctl_dev_unregister(void); 111 119 int adf_processes_dev_register(void); 112 120 void adf_processes_dev_unregister(void); 113 121 114 - int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); 115 - void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); 122 + int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, 123 + struct adf_accel_dev *pf); 124 + void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, 125 + struct adf_accel_dev *pf); 116 126 struct list_head *adf_devmgr_get_head(void); 117 127 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); 118 128 struct adf_accel_dev *adf_devmgr_get_first(void); ··· 221 211 void *addr_ptr, int mem_size); 222 212 void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 223 213 void *addr_ptr, int mem_size); 214 + #if defined(CONFIG_PCI_IOV) 215 + int adf_sriov_configure(struct pci_dev *pdev, int numvfs); 216 + void adf_disable_sriov(struct adf_accel_dev *accel_dev); 217 + void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, 218 + uint32_t vf_mask); 219 + void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, 220 + uint32_t vf_mask); 221 + #else 222 + static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) 223 + { 224 + return 0; 225 + } 226 + 227 + static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) 228 + { 229 + } 230 + #endif 224 231 #endif
+3 -3
drivers/crypto/qat/qat_common/adf_ctl_drv.c
··· 398 398 } 399 399 400 400 accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); 401 - if (!accel_dev) { 402 - pr_err("QAT: Device %d not found\n", dev_info.accel_id); 401 + if (!accel_dev) 403 402 return -ENODEV; 404 - } 403 + 405 404 hw_data = accel_dev->hw_device; 406 405 dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; 407 406 dev_info.num_ae = hw_data->get_num_aes(hw_data); ··· 494 495 adf_exit_aer(); 495 496 qat_crypto_unregister(); 496 497 qat_algs_exit(); 498 + adf_clean_vf_map(false); 497 499 mutex_destroy(&adf_ctl_lock); 498 500 } 499 501
+270 -15
drivers/crypto/qat/qat_common/adf_dev_mgr.c
··· 50 50 #include "adf_common_drv.h" 51 51 52 52 static LIST_HEAD(accel_table); 53 + static LIST_HEAD(vfs_table); 53 54 static DEFINE_MUTEX(table_lock); 54 55 static uint32_t num_devices; 56 + 57 + struct vf_id_map { 58 + u32 bdf; 59 + u32 id; 60 + u32 fake_id; 61 + bool attached; 62 + struct list_head list; 63 + }; 64 + 65 + static int adf_get_vf_id(struct adf_accel_dev *vf) 66 + { 67 + return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) + 68 + PCI_FUNC(accel_to_pci_dev(vf)->devfn) + 69 + (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)); 70 + } 71 + 72 + static int adf_get_vf_num(struct adf_accel_dev *vf) 73 + { 74 + return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf); 75 + } 76 + 77 + static struct vf_id_map *adf_find_vf(u32 bdf) 78 + { 79 + struct list_head *itr; 80 + 81 + list_for_each(itr, &vfs_table) { 82 + struct vf_id_map *ptr = 83 + list_entry(itr, struct vf_id_map, list); 84 + 85 + if (ptr->bdf == bdf) 86 + return ptr; 87 + } 88 + return NULL; 89 + } 90 + 91 + static int adf_get_vf_real_id(u32 fake) 92 + { 93 + struct list_head *itr; 94 + 95 + list_for_each(itr, &vfs_table) { 96 + struct vf_id_map *ptr = 97 + list_entry(itr, struct vf_id_map, list); 98 + if (ptr->fake_id == fake) 99 + return ptr->id; 100 + } 101 + return -1; 102 + } 103 + 104 + /** 105 + * adf_clean_vf_map() - Cleans VF id mapings 106 + * 107 + * Function cleans internal ids for virtual functions. 108 + * @vf: flag indicating whether mappings is cleaned 109 + * for vfs only or for vfs and pfs 110 + */ 111 + void adf_clean_vf_map(bool vf) 112 + { 113 + struct vf_id_map *map; 114 + struct list_head *ptr, *tmp; 115 + 116 + mutex_lock(&table_lock); 117 + list_for_each_safe(ptr, tmp, &vfs_table) { 118 + map = list_entry(ptr, struct vf_id_map, list); 119 + if (map->bdf != -1) 120 + num_devices--; 121 + 122 + if (vf && map->bdf == -1) 123 + continue; 124 + 125 + list_del(ptr); 126 + kfree(map); 127 + } 128 + mutex_unlock(&table_lock); 129 + } 130 + EXPORT_SYMBOL_GPL(adf_clean_vf_map); 131 + 132 + /** 133 + * adf_devmgr_update_class_index() - Update internal index 134 + * @hw_data: Pointer to internal device data. 135 + * 136 + * Function updates internal dev index for VFs 137 + */ 138 + void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) 139 + { 140 + struct adf_hw_device_class *class = hw_data->dev_class; 141 + struct list_head *itr; 142 + int i = 0; 143 + 144 + list_for_each(itr, &accel_table) { 145 + struct adf_accel_dev *ptr = 146 + list_entry(itr, struct adf_accel_dev, list); 147 + 148 + if (ptr->hw_device->dev_class == class) 149 + ptr->hw_device->instance_id = i++; 150 + 151 + if (i == class->instances) 152 + break; 153 + } 154 + } 155 + EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); 55 156 56 157 /** 57 158 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework 58 159 * @accel_dev: Pointer to acceleration device. 160 + * @pf: Corresponding PF if the accel_dev is a VF 59 161 * 60 162 * Function adds acceleration device to the acceleration framework. 61 163 * To be used by QAT device specific drivers. 62 164 * 63 165 * Return: 0 on success, error code otherwise. 64 166 */ 65 - int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) 167 + int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, 168 + struct adf_accel_dev *pf) 66 169 { 67 170 struct list_head *itr; 171 + int ret = 0; 68 172 69 173 if (num_devices == ADF_MAX_DEVICES) { 70 174 dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", ··· 177 73 } 178 74 179 75 mutex_lock(&table_lock); 180 - list_for_each(itr, &accel_table) { 181 - struct adf_accel_dev *ptr = 76 + atomic_set(&accel_dev->ref_count, 0); 77 + 78 + /* PF on host or VF on guest */ 79 + if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { 80 + struct vf_id_map *map; 81 + 82 + list_for_each(itr, &accel_table) { 83 + struct adf_accel_dev *ptr = 182 84 list_entry(itr, struct adf_accel_dev, list); 183 85 184 - if (ptr == accel_dev) { 185 - mutex_unlock(&table_lock); 186 - return -EEXIST; 86 + if (ptr == accel_dev) { 87 + ret = -EEXIST; 88 + goto unlock; 89 + } 187 90 } 91 + 92 + list_add_tail(&accel_dev->list, &accel_table); 93 + accel_dev->accel_id = num_devices++; 94 + 95 + map = kzalloc(sizeof(*map), GFP_KERNEL); 96 + if (!map) { 97 + ret = -ENOMEM; 98 + goto unlock; 99 + } 100 + map->bdf = ~0; 101 + map->id = accel_dev->accel_id; 102 + map->fake_id = map->id; 103 + map->attached = true; 104 + list_add_tail(&map->list, &vfs_table); 105 + } else if (accel_dev->is_vf && pf) { 106 + /* VF on host */ 107 + struct adf_accel_vf_info *vf_info; 108 + struct vf_id_map *map; 109 + 110 + vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev); 111 + 112 + map = adf_find_vf(adf_get_vf_num(accel_dev)); 113 + if (map) { 114 + struct vf_id_map *next; 115 + 116 + accel_dev->accel_id = map->id; 117 + list_add_tail(&accel_dev->list, &accel_table); 118 + map->fake_id++; 119 + map->attached = true; 120 + next = list_next_entry(map, list); 121 + while (next && &next->list != &vfs_table) { 122 + next->fake_id++; 123 + next = list_next_entry(next, list); 124 + } 125 + 126 + ret = 0; 127 + goto unlock; 128 + } 129 + 130 + map = kzalloc(sizeof(*map), GFP_KERNEL); 131 + if (!map) { 132 + ret = -ENOMEM; 133 + goto unlock; 134 + } 135 + 136 + accel_dev->accel_id = num_devices++; 137 + list_add_tail(&accel_dev->list, &accel_table); 138 + map->bdf = adf_get_vf_num(accel_dev); 139 + map->id = accel_dev->accel_id; 140 + map->fake_id = map->id; 141 + map->attached = true; 142 + list_add_tail(&map->list, &vfs_table); 188 143 } 189 - atomic_set(&accel_dev->ref_count, 0); 190 - list_add_tail(&accel_dev->list, &accel_table); 191 - accel_dev->accel_id = num_devices++; 144 + unlock: 192 145 mutex_unlock(&table_lock); 193 - return 0; 146 + return ret; 194 147 } 195 148 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); 196 149 ··· 259 98 /** 260 99 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. 261 100 * @accel_dev: Pointer to acceleration device. 101 + * @pf: Corresponding PF if the accel_dev is a VF 262 102 * 263 103 * Function removes acceleration device from the acceleration framework. 264 104 * To be used by QAT device specific drivers. 265 105 * 266 106 * Return: void 267 107 */ 268 - void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) 108 + void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, 109 + struct adf_accel_dev *pf) 269 110 { 270 111 mutex_lock(&table_lock); 112 + if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { 113 + num_devices--; 114 + } else if (accel_dev->is_vf && pf) { 115 + struct vf_id_map *map, *next; 116 + 117 + map = adf_find_vf(adf_get_vf_num(accel_dev)); 118 + if (!map) { 119 + dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n"); 120 + goto unlock; 121 + } 122 + map->fake_id--; 123 + map->attached = false; 124 + next = list_next_entry(map, list); 125 + while (next && &next->list != &vfs_table) { 126 + next->fake_id--; 127 + next = list_next_entry(next, list); 128 + } 129 + } 130 + unlock: 271 131 list_del(&accel_dev->list); 272 - num_devices--; 273 132 mutex_unlock(&table_lock); 274 133 } 275 134 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); ··· 335 154 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) 336 155 { 337 156 struct list_head *itr; 157 + int real_id; 338 158 339 159 mutex_lock(&table_lock); 160 + real_id = adf_get_vf_real_id(id); 161 + if (real_id < 0) 162 + goto unlock; 163 + 164 + id = real_id; 165 + 340 166 list_for_each(itr, &accel_table) { 341 167 struct adf_accel_dev *ptr = 342 168 list_entry(itr, struct adf_accel_dev, list); 343 - 344 169 if (ptr->accel_id == id) { 345 170 mutex_unlock(&table_lock); 346 171 return ptr; 347 172 } 348 173 } 174 + unlock: 349 175 mutex_unlock(&table_lock); 350 176 return NULL; 351 177 } ··· 368 180 return -ENODEV; 369 181 } 370 182 371 - void adf_devmgr_get_num_dev(uint32_t *num) 183 + static int adf_get_num_dettached_vfs(void) 372 184 { 373 - *num = num_devices; 185 + struct list_head *itr; 186 + int vfs = 0; 187 + 188 + mutex_lock(&table_lock); 189 + list_for_each(itr, &vfs_table) { 190 + struct vf_id_map *ptr = 191 + list_entry(itr, struct vf_id_map, list); 192 + if (ptr->bdf != ~0 && !ptr->attached) 193 + vfs++; 194 + } 195 + mutex_unlock(&table_lock); 196 + return vfs; 374 197 } 375 198 199 + void adf_devmgr_get_num_dev(uint32_t *num) 200 + { 201 + *num = num_devices - adf_get_num_dettached_vfs(); 202 + } 203 + 204 + /** 205 + * adf_dev_in_use() - Check whether accel_dev is currently in use 206 + * @accel_dev: Pointer to acceleration device. 207 + * 208 + * To be used by QAT device specific drivers. 209 + * 210 + * Return: 1 when device is in use, 0 otherwise. 211 + */ 376 212 int adf_dev_in_use(struct adf_accel_dev *accel_dev) 377 213 { 378 214 return atomic_read(&accel_dev->ref_count) != 0; 379 215 } 216 + EXPORT_SYMBOL_GPL(adf_dev_in_use); 380 217 218 + /** 219 + * adf_dev_get() - Increment accel_dev reference count 220 + * @accel_dev: Pointer to acceleration device. 221 + * 222 + * Increment the accel_dev refcount and if this is the first time 223 + * incrementing it during this period the accel_dev is in use, 224 + * increment the module refcount too. 225 + * To be used by QAT device specific drivers. 226 + * 227 + * Return: 0 when successful, EFAULT when fail to bump module refcount 228 + */ 381 229 int adf_dev_get(struct adf_accel_dev *accel_dev) 382 230 { 383 231 if (atomic_add_return(1, &accel_dev->ref_count) == 1) ··· 421 197 return -EFAULT; 422 198 return 0; 423 199 } 200 + EXPORT_SYMBOL_GPL(adf_dev_get); 424 201 202 + /** 203 + * adf_dev_put() - Decrement accel_dev reference count 204 + * @accel_dev: Pointer to acceleration device. 205 + * 206 + * Decrement the accel_dev refcount and if this is the last time 207 + * decrementing it during this period the accel_dev is in use, 208 + * decrement the module refcount too. 209 + * To be used by QAT device specific drivers. 210 + * 211 + * Return: void 212 + */ 425 213 void adf_dev_put(struct adf_accel_dev *accel_dev) 426 214 { 427 215 if (atomic_sub_return(1, &accel_dev->ref_count) == 0) 428 216 module_put(accel_dev->owner); 429 217 } 218 + EXPORT_SYMBOL_GPL(adf_dev_put); 430 219 220 + /** 221 + * adf_devmgr_in_reset() - Check whether device is in reset 222 + * @accel_dev: Pointer to acceleration device. 223 + * 224 + * To be used by QAT device specific drivers. 225 + * 226 + * Return: 1 when the device is being reset, 0 otherwise. 227 + */ 431 228 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) 432 229 { 433 230 return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 434 231 } 232 + EXPORT_SYMBOL_GPL(adf_devmgr_in_reset); 435 233 234 + /** 235 + * adf_dev_started() - Check whether device has started 236 + * @accel_dev: Pointer to acceleration device. 237 + * 238 + * To be used by QAT device specific drivers. 239 + * 240 + * Return: 1 when the device has started, 0 otherwise 241 + */ 436 242 int adf_dev_started(struct adf_accel_dev *accel_dev) 437 243 { 438 244 return test_bit(ADF_STATUS_STARTED, &accel_dev->status); 439 245 } 246 + EXPORT_SYMBOL_GPL(adf_dev_started);
+7 -3
drivers/crypto/qat/qat_common/adf_init.c
··· 187 187 } 188 188 189 189 hw_data->enable_error_correction(accel_dev); 190 + hw_data->enable_vf2pf_comms(accel_dev); 190 191 191 192 return 0; 192 193 } ··· 236 235 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 237 236 set_bit(ADF_STATUS_STARTED, &accel_dev->status); 238 237 239 - if (qat_algs_register() || qat_asym_algs_register()) { 238 + if (!list_empty(&accel_dev->crypto_list) && 239 + (qat_algs_register() || qat_asym_algs_register())) { 240 240 dev_err(&GET_DEV(accel_dev), 241 241 "Failed to register crypto algs\n"); 242 242 set_bit(ADF_STATUS_STARTING, &accel_dev->status); ··· 272 270 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 273 271 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 274 272 275 - if (qat_algs_unregister()) 273 + if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister()) 276 274 dev_err(&GET_DEV(accel_dev), 277 275 "Failed to unregister crypto algs\n"); 278 276 279 - qat_asym_algs_unregister(); 277 + if (!list_empty(&accel_dev->crypto_list)) 278 + qat_asym_algs_unregister(); 280 279 281 280 list_for_each(list_itr, &service_table) { 282 281 service = list_entry(list_itr, struct service_hndl, list); ··· 366 363 if (hw_data->exit_admin_comms) 367 364 hw_data->exit_admin_comms(accel_dev); 368 365 366 + hw_data->disable_iov(accel_dev); 369 367 adf_cleanup_etr_data(accel_dev); 370 368 } 371 369 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
+336
drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
··· 1 + /* 2 + This file is provided under a dual BSD/GPLv2 license. When using or 3 + redistributing this file, you may do so under either license. 4 + 5 + GPL LICENSE SUMMARY 6 + Copyright(c) 2015 Intel Corporation. 7 + This program is free software; you can redistribute it and/or modify 8 + it under the terms of version 2 of the GNU General Public License as 9 + published by the Free Software Foundation. 10 + 11 + This program is distributed in the hope that it will be useful, but 12 + WITHOUT ANY WARRANTY; without even the implied warranty of 13 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + General Public License for more details. 15 + 16 + Contact Information: 17 + qat-linux@intel.com 18 + 19 + BSD LICENSE 20 + Copyright(c) 2015 Intel Corporation. 21 + Redistribution and use in source and binary forms, with or without 22 + modification, are permitted provided that the following conditions 23 + are met: 24 + 25 + * Redistributions of source code must retain the above copyright 26 + notice, this list of conditions and the following disclaimer. 27 + * Redistributions in binary form must reproduce the above copyright 28 + notice, this list of conditions and the following disclaimer in 29 + the documentation and/or other materials provided with the 30 + distribution. 31 + * Neither the name of Intel Corporation nor the names of its 32 + contributors may be used to endorse or promote products derived 33 + from this software without specific prior written permission. 34 + 35 + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 + */ 47 + 48 + #include <linux/pci.h> 49 + #include <linux/mutex.h> 50 + #include <linux/delay.h> 51 + #include "adf_accel_devices.h" 52 + #include "adf_common_drv.h" 53 + #include "adf_pf2vf_msg.h" 54 + 55 + #define ADF_DH895XCC_EP_OFFSET 0x3A000 56 + #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C) 57 + #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9) 58 + #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) 59 + #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) 60 + 61 + /** 62 + * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts 63 + * @accel_dev: Pointer to acceleration device. 64 + * 65 + * Function enables PF to VF interrupts 66 + */ 67 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 68 + { 69 + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; 70 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 71 + void __iomem *pmisc_bar_addr = 72 + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; 73 + 74 + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); 75 + } 76 + EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); 77 + 78 + /** 79 + * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts 80 + * @accel_dev: Pointer to acceleration device. 81 + * 82 + * Function disables PF to VF interrupts 83 + */ 84 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 85 + { 86 + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; 87 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 88 + void __iomem *pmisc_bar_addr = 89 + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; 90 + 91 + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); 92 + } 93 + EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); 94 + 95 + void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, 96 + u32 vf_mask) 97 + { 98 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 99 + struct adf_bar *pmisc = 100 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 101 + void __iomem *pmisc_addr = pmisc->virt_addr; 102 + u32 reg; 103 + 104 + /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */ 105 + if (vf_mask & 0xFFFF) { 106 + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3); 107 + reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); 108 + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); 109 + } 110 + 111 + /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */ 112 + if (vf_mask >> 16) { 113 + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5); 114 + reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); 115 + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); 116 + } 117 + } 118 + 119 + /** 120 + * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts 121 + * @accel_dev: Pointer to acceleration device. 122 + * 123 + * Function disables VF to PF interrupts 124 + */ 125 + void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) 126 + { 127 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 128 + struct adf_bar *pmisc = 129 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 130 + void __iomem *pmisc_addr = pmisc->virt_addr; 131 + u32 reg; 132 + 133 + /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */ 134 + if (vf_mask & 0xFFFF) { 135 + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) | 136 + ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask); 137 + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg); 138 + } 139 + 140 + /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */ 141 + if (vf_mask >> 16) { 142 + reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) | 143 + ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask); 144 + ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); 145 + } 146 + } 147 + EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); 148 + 149 + static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) 150 + { 151 + struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; 152 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 153 + void __iomem *pmisc_bar_addr = 154 + pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr; 155 + u32 val, pf2vf_offset, count = 0; 156 + u32 local_in_use_mask, local_in_use_pattern; 157 + u32 remote_in_use_mask, remote_in_use_pattern; 158 + struct mutex *lock; /* lock preventing concurrent acces of CSR */ 159 + u32 int_bit; 160 + int ret = 0; 161 + 162 + if (accel_dev->is_vf) { 163 + pf2vf_offset = hw_data->get_pf2vf_offset(0); 164 + lock = &accel_dev->vf.vf2pf_lock; 165 + local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; 166 + local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; 167 + remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; 168 + remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; 169 + int_bit = ADF_VF2PF_INT; 170 + } else { 171 + pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr); 172 + lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock; 173 + local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK; 174 + local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF; 175 + remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK; 176 + remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF; 177 + int_bit = ADF_PF2VF_INT; 178 + } 179 + 180 + mutex_lock(lock); 181 + 182 + /* Check if PF2VF CSR is in use by remote function */ 183 + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); 184 + if ((val & remote_in_use_mask) == remote_in_use_pattern) { 185 + dev_dbg(&GET_DEV(accel_dev), 186 + "PF2VF CSR in use by remote function\n"); 187 + ret = -EBUSY; 188 + goto out; 189 + } 190 + 191 + /* Attempt to get ownership of PF2VF CSR */ 192 + msg &= ~local_in_use_mask; 193 + msg |= local_in_use_pattern; 194 + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); 195 + 196 + /* Wait in case remote func also attempting to get ownership */ 197 + msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); 198 + 199 + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); 200 + if ((val & local_in_use_mask) != local_in_use_pattern) { 201 + dev_dbg(&GET_DEV(accel_dev), 202 + "PF2VF CSR in use by remote - collision detected\n"); 203 + ret = -EBUSY; 204 + goto out; 205 + } 206 + 207 + /* 208 + * This function now owns the PV2VF CSR. The IN_USE_BY pattern must 209 + * remain in the PF2VF CSR for all writes including ACK from remote 210 + * until this local function relinquishes the CSR. Send the message 211 + * by interrupting the remote. 212 + */ 213 + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); 214 + 215 + /* Wait for confirmation from remote func it received the message */ 216 + do { 217 + msleep(ADF_IOV_MSG_ACK_DELAY); 218 + val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); 219 + } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); 220 + 221 + if (val & int_bit) { 222 + dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); 223 + val &= ~int_bit; 224 + ret = -EIO; 225 + } 226 + 227 + /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ 228 + ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); 229 + out: 230 + mutex_unlock(lock); 231 + return ret; 232 + } 233 + 234 + /** 235 + * adf_iov_putmsg() - send PF2VF message 236 + * @accel_dev: Pointer to acceleration device. 237 + * @msg: Message to send 238 + * @vf_nr: VF number to which the message will be sent 239 + * 240 + * Function sends a messge from the PF to a VF 241 + * 242 + * Return: 0 on success, error code otherwise. 243 + */ 244 + int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) 245 + { 246 + u32 count = 0; 247 + int ret; 248 + 249 + do { 250 + ret = __adf_iov_putmsg(accel_dev, msg, vf_nr); 251 + if (ret) 252 + msleep(ADF_IOV_MSG_RETRY_DELAY); 253 + } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES)); 254 + 255 + return ret; 256 + } 257 + EXPORT_SYMBOL_GPL(adf_iov_putmsg); 258 + 259 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) 260 + { 261 + struct adf_accel_vf_info *vf; 262 + u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM | 263 + (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT)); 264 + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); 265 + 266 + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { 267 + if (vf->init && adf_iov_putmsg(accel_dev, msg, i)) 268 + dev_err(&GET_DEV(accel_dev), 269 + "Failed to send restarting msg to VF%d\n", i); 270 + } 271 + } 272 + 273 + static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) 274 + { 275 + unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT); 276 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 277 + u32 msg = 0; 278 + int ret; 279 + 280 + msg = ADF_VF2PF_MSGORIGIN_SYSTEM; 281 + msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT; 282 + msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; 283 + BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255); 284 + 285 + /* Send request from VF to PF */ 286 + ret = adf_iov_putmsg(accel_dev, msg, 0); 287 + if (ret) { 288 + dev_err(&GET_DEV(accel_dev), 289 + "Failed to send Compatibility Version Request.\n"); 290 + return ret; 291 + } 292 + 293 + /* Wait for response */ 294 + if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion, 295 + timeout)) { 296 + dev_err(&GET_DEV(accel_dev), 297 + "IOV request/response message timeout expired\n"); 298 + return -EIO; 299 + } 300 + 301 + /* Response from PF received, check compatibility */ 302 + switch (accel_dev->vf.compatible) { 303 + case ADF_PF2VF_VF_COMPATIBLE: 304 + break; 305 + case ADF_PF2VF_VF_COMPAT_UNKNOWN: 306 + /* VF is newer than PF and decides whether it is compatible */ 307 + if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) 308 + break; 309 + /* fall through */ 310 + case ADF_PF2VF_VF_INCOMPATIBLE: 311 + dev_err(&GET_DEV(accel_dev), 312 + "PF (vers %d) and VF (vers %d) are not compatible\n", 313 + accel_dev->vf.pf_version, 314 + ADF_PFVF_COMPATIBILITY_VERSION); 315 + return -EINVAL; 316 + default: 317 + dev_err(&GET_DEV(accel_dev), 318 + "Invalid response from PF; assume not compatible\n"); 319 + return -EINVAL; 320 + } 321 + return ret; 322 + } 323 + 324 + /** 325 + * adf_enable_vf2pf_comms() - Function enables communication from vf to pf 326 + * 327 + * @accel_dev: Pointer to acceleration device virtual function. 328 + * 329 + * Return: 0 on success, error code otherwise. 330 + */ 331 + int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) 332 + { 333 + adf_enable_pf2vf_interrupts(accel_dev); 334 + return adf_vf2pf_request_version(accel_dev); 335 + } 336 + EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
+144
drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
··· 1 + /* 2 + This file is provided under a dual BSD/GPLv2 license. When using or 3 + redistributing this file, you may do so under either license. 4 + 5 + GPL LICENSE SUMMARY 6 + Copyright(c) 2015 Intel Corporation. 7 + This program is free software; you can redistribute it and/or modify 8 + it under the terms of version 2 of the GNU General Public License as 9 + published by the Free Software Foundation. 10 + 11 + This program is distributed in the hope that it will be useful, but 12 + WITHOUT ANY WARRANTY; without even the implied warranty of 13 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + General Public License for more details. 15 + 16 + Contact Information: 17 + qat-linux@intel.com 18 + 19 + BSD LICENSE 20 + Copyright(c) 2015 Intel Corporation. 21 + Redistribution and use in source and binary forms, with or without 22 + modification, are permitted provided that the following conditions 23 + are met: 24 + 25 + * Redistributions of source code must retain the above copyright 26 + notice, this list of conditions and the following disclaimer. 27 + * Redistributions in binary form must reproduce the above copyright 28 + notice, this list of conditions and the following disclaimer in 29 + the documentation and/or other materials provided with the 30 + distribution. 31 + * Neither the name of Intel Corporation nor the names of its 32 + contributors may be used to endorse or promote products derived 33 + from this software without specific prior written permission. 34 + 35 + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 + */ 47 + #ifndef ADF_PF2VF_MSG_H 48 + #define ADF_PF2VF_MSG_H 49 + 50 + /* 51 + * PF<->VF Messaging 52 + * The PF has an array of 32-bit PF2VF registers, one for each VF. The 53 + * PF can access all these registers; each VF can access only the one 54 + * register associated with that particular VF. 55 + * 56 + * The register functionally is split into two parts: 57 + * The bottom half is for PF->VF messages. In particular when the first 58 + * bit of this register (bit 0) gets set an interrupt will be triggered 59 + * in the respective VF. 60 + * The top half is for VF->PF messages. In particular when the first bit 61 + * of this half of register (bit 16) gets set an interrupt will be triggered 62 + * in the PF. 63 + * 64 + * The remaining bits within this register are available to encode messages. 65 + * and implement a collision control mechanism to prevent concurrent use of 66 + * the PF2VF register by both the PF and VF. 67 + * 68 + * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 69 + * _______________________________________________ 70 + * | | | | | | | | | | | | | | | | | 71 + * +-----------------------------------------------+ 72 + * \___________________________/ \_________/ ^ ^ 73 + * ^ ^ | | 74 + * | | | VF2PF Int 75 + * | | Message Origin 76 + * | Message Type 77 + * Message-specific Data/Reserved 78 + * 79 + * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 80 + * _______________________________________________ 81 + * | | | | | | | | | | | | | | | | | 82 + * +-----------------------------------------------+ 83 + * \___________________________/ \_________/ ^ ^ 84 + * ^ ^ | | 85 + * | | | PF2VF Int 86 + * | | Message Origin 87 + * | Message Type 88 + * Message-specific Data/Reserved 89 + * 90 + * Message Origin (Should always be 1) 91 + * A legacy out-of-tree QAT driver allowed for a set of messages not supported 92 + * by this driver; these had a Msg Origin of 0 and are ignored by this driver. 93 + * 94 + * When a PF or VF attempts to send a message in the lower or upper 16 bits, 95 + * respectively, the other 16 bits are written to first with a defined 96 + * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg). 97 + */ 98 + 99 + #define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */ 100 + 101 + /* PF->VF messages */ 102 + #define ADF_PF2VF_INT BIT(0) 103 + #define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1) 104 + #define ADF_PF2VF_MSGTYPE_MASK 0x0000003C 105 + #define ADF_PF2VF_MSGTYPE_SHIFT 2 106 + #define ADF_PF2VF_MSGTYPE_RESTARTING 0x01 107 + #define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02 108 + #define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000 109 + #define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000 110 + 111 + /* PF->VF Version Response */ 112 + #define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0 113 + #define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6 114 + #define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000 115 + #define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14 116 + #define ADF_PF2VF_VF_COMPATIBLE 1 117 + #define ADF_PF2VF_VF_INCOMPATIBLE 2 118 + #define ADF_PF2VF_VF_COMPAT_UNKNOWN 3 119 + 120 + /* VF->PF messages */ 121 + #define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2 122 + #define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE 123 + #define ADF_VF2PF_INT BIT(16) 124 + #define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17) 125 + #define ADF_VF2PF_MSGTYPE_MASK 0x003C0000 126 + #define ADF_VF2PF_MSGTYPE_SHIFT 18 127 + #define ADF_VF2PF_MSGTYPE_INIT 0x3 128 + #define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4 129 + #define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5 130 + #define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6 131 + 132 + /* VF->PF Compatible Version Request */ 133 + #define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22 134 + 135 + /* Collision detection */ 136 + #define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10 137 + #define ADF_IOV_MSG_ACK_DELAY 2 138 + #define ADF_IOV_MSG_ACK_MAX_RETRY 100 139 + #define ADF_IOV_MSG_RETRY_DELAY 5 140 + #define ADF_IOV_MSG_MAX_RETRIES 3 141 + #define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \ 142 + ADF_IOV_MSG_ACK_MAX_RETRY + \ 143 + ADF_IOV_MSG_COLLISION_DETECT_DELAY) 144 + #endif /* ADF_IOV_MSG_H */
+406
drivers/crypto/qat/qat_common/adf_sriov.c
··· 1 + /* 2 + This file is provided under a dual BSD/GPLv2 license. When using or 3 + redistributing this file, you may do so under either license. 4 + 5 + GPL LICENSE SUMMARY 6 + Copyright(c) 2015 Intel Corporation. 7 + This program is free software; you can redistribute it and/or modify 8 + it under the terms of version 2 of the GNU General Public License as 9 + published by the Free Software Foundation. 10 + 11 + This program is distributed in the hope that it will be useful, but 12 + WITHOUT ANY WARRANTY; without even the implied warranty of 13 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 + General Public License for more details. 15 + 16 + Contact Information: 17 + qat-linux@intel.com 18 + 19 + BSD LICENSE 20 + Copyright(c) 2015 Intel Corporation. 21 + Redistribution and use in source and binary forms, with or without 22 + modification, are permitted provided that the following conditions 23 + are met: 24 + 25 + * Redistributions of source code must retain the above copyright 26 + notice, this list of conditions and the following disclaimer. 27 + * Redistributions in binary form must reproduce the above copyright 28 + notice, this list of conditions and the following disclaimer in 29 + the documentation and/or other materials provided with the 30 + distribution. 31 + * Neither the name of Intel Corporation nor the names of its 32 + contributors may be used to endorse or promote products derived 33 + from this software without specific prior written permission. 34 + 35 + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 36 + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 37 + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 38 + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 39 + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 40 + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 41 + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 42 + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 43 + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 44 + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 45 + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 + */ 47 + #include <linux/workqueue.h> 48 + #include <linux/pci.h> 49 + #include <linux/device.h> 50 + #include <linux/iommu.h> 51 + #include "adf_common_drv.h" 52 + #include "adf_cfg.h" 53 + #include "adf_pf2vf_msg.h" 54 + 55 + static struct workqueue_struct *pf2vf_resp_wq; 56 + 57 + #define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) 58 + #define ME2FUNCTION_MAP_A_NUM_REGS 96 59 + 60 + #define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) 61 + #define ME2FUNCTION_MAP_B_NUM_REGS 12 62 + 63 + #define ME2FUNCTION_MAP_REG_SIZE 4 64 + #define ME2FUNCTION_MAP_VALID BIT(7) 65 + 66 + #define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \ 67 + ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ 68 + ME2FUNCTION_MAP_REG_SIZE * index) 69 + 70 + #define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \ 71 + ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \ 72 + ME2FUNCTION_MAP_REG_SIZE * index, value) 73 + 74 + #define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \ 75 + ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ 76 + ME2FUNCTION_MAP_REG_SIZE * index) 77 + 78 + #define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \ 79 + ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \ 80 + ME2FUNCTION_MAP_REG_SIZE * index, value) 81 + 82 + struct adf_pf2vf_resp_data { 83 + struct work_struct pf2vf_resp_work; 84 + struct adf_accel_dev *accel_dev; 85 + u32 resp; 86 + u8 vf_nr; 87 + }; 88 + 89 + static void adf_iov_send_resp(struct work_struct *work) 90 + { 91 + struct adf_pf2vf_resp_data *pf2vf_resp_data = 92 + container_of(work, struct adf_pf2vf_resp_data, pf2vf_resp_work); 93 + 94 + if (adf_iov_putmsg(pf2vf_resp_data->accel_dev, pf2vf_resp_data->resp, 95 + pf2vf_resp_data->vf_nr)) { 96 + dev_err(&GET_DEV(pf2vf_resp_data->accel_dev), 97 + "Failed to send response\n"); 98 + } 99 + 100 + kfree(pf2vf_resp_data); 101 + } 102 + 103 + static void adf_vf2pf_bh_handler(void *data) 104 + { 105 + struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data; 106 + struct adf_accel_dev *accel_dev = vf_info->accel_dev; 107 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 108 + struct adf_bar *pmisc = 109 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 110 + void __iomem *pmisc_addr = pmisc->virt_addr; 111 + u32 msg; 112 + 113 + /* Read message from the VF */ 114 + msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_info->vf_nr)); 115 + 116 + if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM)) 117 + /* Ignore legacy non-system (non-kernel) VF2PF messages */ 118 + goto err; 119 + 120 + switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) { 121 + case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ: 122 + { 123 + u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT; 124 + struct adf_pf2vf_resp_data *pf2vf_resp_data; 125 + u32 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM | 126 + (ADF_PF2VF_MSGTYPE_VERSION_RESP << 127 + ADF_PF2VF_MSGTYPE_SHIFT) | 128 + (ADF_PFVF_COMPATIBILITY_VERSION << 129 + ADF_PF2VF_VERSION_RESP_VERS_SHIFT)); 130 + 131 + dev_dbg(&GET_DEV(accel_dev), 132 + "Compatibility Version Request from VF%d vers=%u\n", 133 + vf_info->vf_nr + 1, vf_compat_ver); 134 + 135 + if (vf_compat_ver < hw_data->min_iov_compat_ver) { 136 + dev_err(&GET_DEV(accel_dev), 137 + "VF (vers %d) incompatible with PF (vers %d)\n", 138 + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); 139 + resp |= ADF_PF2VF_VF_INCOMPATIBLE << 140 + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; 141 + } else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) { 142 + dev_err(&GET_DEV(accel_dev), 143 + "VF (vers %d) compat with PF (vers %d) unkn.\n", 144 + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); 145 + resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN << 146 + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; 147 + } else { 148 + dev_dbg(&GET_DEV(accel_dev), 149 + "VF (vers %d) compatible with PF (vers %d)\n", 150 + vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION); 151 + resp |= ADF_PF2VF_VF_COMPATIBLE << 152 + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; 153 + } 154 + 155 + pf2vf_resp_data = kzalloc(sizeof(*pf2vf_resp_data), GFP_ATOMIC); 156 + if (!pf2vf_resp_data) 157 + return; 158 + 159 + pf2vf_resp_data->accel_dev = accel_dev; 160 + pf2vf_resp_data->vf_nr = vf_info->vf_nr; 161 + pf2vf_resp_data->resp = resp; 162 + INIT_WORK(&pf2vf_resp_data->pf2vf_resp_work, adf_iov_send_resp); 163 + queue_work(pf2vf_resp_wq, &pf2vf_resp_data->pf2vf_resp_work); 164 + } 165 + break; 166 + case ADF_VF2PF_MSGTYPE_INIT: 167 + { 168 + dev_dbg(&GET_DEV(accel_dev), 169 + "Init message received from VF%d 0x%x\n", 170 + vf_info->vf_nr + 1, msg); 171 + vf_info->init = true; 172 + } 173 + break; 174 + case ADF_VF2PF_MSGTYPE_SHUTDOWN: 175 + { 176 + dev_dbg(&GET_DEV(accel_dev), 177 + "Shutdown message received from VF%d 0x%x\n", 178 + vf_info->vf_nr + 1, msg); 179 + vf_info->init = false; 180 + } 181 + break; 182 + case ADF_VF2PF_MSGTYPE_VERSION_REQ: 183 + dev_err(&GET_DEV(accel_dev), 184 + "Incompatible VersionRequest received from VF%d 0x%x\n", 185 + vf_info->vf_nr + 1, msg); 186 + break; 187 + default: 188 + goto err; 189 + } 190 + 191 + /* To ACK, clear the VF2PFINT bit */ 192 + msg &= ~ADF_VF2PF_INT; 193 + ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_info->vf_nr), msg); 194 + 195 + /* re-enable interrupt on PF from this VF */ 196 + adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_info->vf_nr)); 197 + return; 198 + err: 199 + dev_err(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n", 200 + vf_info->vf_nr + 1, msg); 201 + } 202 + 203 + static int adf_enable_sriov(struct adf_accel_dev *accel_dev) 204 + { 205 + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 206 + int totalvfs = pci_sriov_get_totalvfs(pdev); 207 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 208 + struct adf_bar *pmisc = 209 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 210 + void __iomem *pmisc_addr = pmisc->virt_addr; 211 + struct adf_accel_vf_info *vf_info; 212 + int i, ret; 213 + u32 reg; 214 + 215 + /* Workqueue for PF2VF responses */ 216 + pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); 217 + if (!pf2vf_resp_wq) 218 + return -ENOMEM; 219 + 220 + for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; 221 + i++, vf_info++) { 222 + /* This ptr will be populated when VFs will be created */ 223 + vf_info->accel_dev = accel_dev; 224 + vf_info->vf_nr = i; 225 + 226 + tasklet_init(&vf_info->vf2pf_bh_tasklet, 227 + (void *)adf_vf2pf_bh_handler, 228 + (unsigned long)vf_info); 229 + mutex_init(&vf_info->pf2vf_lock); 230 + ratelimit_state_init(&vf_info->vf2pf_ratelimit, 231 + DEFAULT_RATELIMIT_INTERVAL, 232 + DEFAULT_RATELIMIT_BURST); 233 + } 234 + 235 + /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */ 236 + for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { 237 + reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); 238 + reg |= ME2FUNCTION_MAP_VALID; 239 + WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); 240 + } 241 + 242 + /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */ 243 + for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { 244 + reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); 245 + reg |= ME2FUNCTION_MAP_VALID; 246 + WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); 247 + } 248 + 249 + /* Enable VF to PF interrupts for all VFs */ 250 + adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0)); 251 + 252 + /* 253 + * Due to the hardware design, when SR-IOV and the ring arbiter 254 + * are enabled all the VFs supported in hardware must be enabled in 255 + * order for all the hardware resources (i.e. bundles) to be usable. 256 + * When SR-IOV is enabled, each of the VFs will own one bundle. 257 + */ 258 + ret = pci_enable_sriov(pdev, totalvfs); 259 + if (ret) 260 + return ret; 261 + 262 + return 0; 263 + } 264 + 265 + /** 266 + * adf_disable_sriov() - Disable SRIOV for the device 267 + * @pdev: Pointer to pci device. 268 + * 269 + * Function disables SRIOV for the pci device. 270 + * 271 + * Return: 0 on success, error code otherwise. 272 + */ 273 + void adf_disable_sriov(struct adf_accel_dev *accel_dev) 274 + { 275 + struct adf_hw_device_data *hw_data = accel_dev->hw_device; 276 + struct adf_bar *pmisc = 277 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 278 + void __iomem *pmisc_addr = pmisc->virt_addr; 279 + int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev)); 280 + struct adf_accel_vf_info *vf; 281 + u32 reg; 282 + int i; 283 + 284 + if (!accel_dev->pf.vf_info) 285 + return; 286 + 287 + adf_pf2vf_notify_restarting(accel_dev); 288 + 289 + pci_disable_sriov(accel_to_pci_dev(accel_dev)); 290 + 291 + /* Disable VF to PF interrupts */ 292 + adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF); 293 + 294 + /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */ 295 + for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) { 296 + reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i); 297 + reg &= ~ME2FUNCTION_MAP_VALID; 298 + WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg); 299 + } 300 + 301 + /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */ 302 + for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) { 303 + reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i); 304 + reg &= ~ME2FUNCTION_MAP_VALID; 305 + WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg); 306 + } 307 + 308 + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { 309 + tasklet_disable(&vf->vf2pf_bh_tasklet); 310 + tasklet_kill(&vf->vf2pf_bh_tasklet); 311 + mutex_destroy(&vf->pf2vf_lock); 312 + } 313 + 314 + kfree(accel_dev->pf.vf_info); 315 + accel_dev->pf.vf_info = NULL; 316 + 317 + if (pf2vf_resp_wq) { 318 + destroy_workqueue(pf2vf_resp_wq); 319 + pf2vf_resp_wq = NULL; 320 + } 321 + } 322 + EXPORT_SYMBOL_GPL(adf_disable_sriov); 323 + 324 + /** 325 + * adf_sriov_configure() - Enable SRIOV for the device 326 + * @pdev: Pointer to pci device. 327 + * 328 + * Function enables SRIOV for the pci device. 329 + * 330 + * Return: 0 on success, error code otherwise. 331 + */ 332 + int adf_sriov_configure(struct pci_dev *pdev, int numvfs) 333 + { 334 + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); 335 + int totalvfs = pci_sriov_get_totalvfs(pdev); 336 + unsigned long val; 337 + int ret; 338 + 339 + if (!accel_dev) { 340 + dev_err(&pdev->dev, "Failed to find accel_dev\n"); 341 + return -EFAULT; 342 + } 343 + 344 + if (!iommu_present(&pci_bus_type)) { 345 + dev_err(&pdev->dev, 346 + "IOMMU must be enabled for SR-IOV to work\n"); 347 + return -EINVAL; 348 + } 349 + 350 + if (accel_dev->pf.vf_info) { 351 + dev_info(&pdev->dev, "Already enabled for this device\n"); 352 + return -EINVAL; 353 + } 354 + 355 + if (adf_dev_started(accel_dev)) { 356 + if (adf_devmgr_in_reset(accel_dev) || 357 + adf_dev_in_use(accel_dev)) { 358 + dev_err(&GET_DEV(accel_dev), "Device busy\n"); 359 + return -EBUSY; 360 + } 361 + 362 + if (adf_dev_stop(accel_dev)) { 363 + dev_err(&GET_DEV(accel_dev), 364 + "Failed to stop qat_dev%d\n", 365 + accel_dev->accel_id); 366 + return -EFAULT; 367 + } 368 + 369 + adf_dev_shutdown(accel_dev); 370 + } 371 + 372 + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) 373 + return -EFAULT; 374 + val = 0; 375 + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, 376 + ADF_NUM_CY, (void *)&val, ADF_DEC)) 377 + return -EFAULT; 378 + 379 + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 380 + 381 + /* Allocate memory for VF info structs */ 382 + accel_dev->pf.vf_info = kcalloc(totalvfs, 383 + sizeof(struct adf_accel_vf_info), 384 + GFP_KERNEL); 385 + if (!accel_dev->pf.vf_info) 386 + return -ENOMEM; 387 + 388 + if (adf_dev_init(accel_dev)) { 389 + dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n", 390 + accel_dev->accel_id); 391 + return -EFAULT; 392 + } 393 + 394 + if (adf_dev_start(accel_dev)) { 395 + dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", 396 + accel_dev->accel_id); 397 + return -EFAULT; 398 + } 399 + 400 + ret = adf_enable_sriov(accel_dev); 401 + if (ret) 402 + return ret; 403 + 404 + return numvfs; 405 + } 406 + EXPORT_SYMBOL_GPL(adf_sriov_configure);
+3 -1
drivers/crypto/qat/qat_common/qat_crypto.c
··· 103 103 104 104 list_for_each(itr, adf_devmgr_get_head()) { 105 105 accel_dev = list_entry(itr, struct adf_accel_dev, list); 106 + 106 107 if ((node == dev_to_node(&GET_DEV(accel_dev)) || 107 108 dev_to_node(&GET_DEV(accel_dev)) < 0) && 108 - adf_dev_started(accel_dev)) 109 + adf_dev_started(accel_dev) && 110 + !list_empty(&accel_dev->crypto_list)) 109 111 break; 110 112 accel_dev = NULL; 111 113 }
+23 -1
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
··· 45 45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 46 */ 47 47 #include <adf_accel_devices.h> 48 + #include <adf_pf2vf_msg.h> 48 49 #include <adf_common_drv.h> 49 50 #include "adf_dh895xcc_hw_data.h" 50 51 #include "adf_drv.h" ··· 162 161 } 163 162 } 164 163 164 + static uint32_t get_pf2vf_offset(uint32_t i) 165 + { 166 + return ADF_DH895XCC_PF2VF_OFFSET(i); 167 + } 168 + 169 + static uint32_t get_vintmsk_offset(uint32_t i) 170 + { 171 + return ADF_DH895XCC_VINTMSK_OFFSET(i); 172 + } 173 + 165 174 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) 166 175 { 167 176 struct adf_hw_device_data *hw_device = accel_dev->hw_device; ··· 208 197 209 198 /* Enable bundle and misc interrupts */ 210 199 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, 211 - ADF_DH895XCC_SMIA0_MASK); 200 + accel_dev->pf.vf_info ? 0 : 201 + GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0)); 212 202 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, 213 203 ADF_DH895XCC_SMIA1_MASK); 204 + } 205 + 206 + static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) 207 + { 208 + return 0; 214 209 } 215 210 216 211 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) ··· 238 221 hw_data->get_num_aes = get_num_aes; 239 222 hw_data->get_etr_bar_id = get_etr_bar_id; 240 223 hw_data->get_misc_bar_id = get_misc_bar_id; 224 + hw_data->get_pf2vf_offset = get_pf2vf_offset; 225 + hw_data->get_vintmsk_offset = get_vintmsk_offset; 241 226 hw_data->get_sram_bar_id = get_sram_bar_id; 242 227 hw_data->get_sku = get_sku; 243 228 hw_data->fw_name = ADF_DH895XCC_FW; 244 229 hw_data->fw_mmp_name = ADF_DH895XCC_MMP; 245 230 hw_data->init_admin_comms = adf_init_admin_comms; 246 231 hw_data->exit_admin_comms = adf_exit_admin_comms; 232 + hw_data->disable_iov = adf_disable_sriov; 247 233 hw_data->send_admin_init = adf_send_admin_init; 248 234 hw_data->init_arb = adf_init_arb; 249 235 hw_data->exit_arb = adf_exit_arb; 250 236 hw_data->get_arb_mapping = adf_get_arbiter_mapping; 251 237 hw_data->enable_ints = adf_enable_ints; 238 + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; 239 + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; 252 240 } 253 241 254 242 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+4
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
··· 80 80 #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) 81 81 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) 82 82 83 + #define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) 84 + #define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) 85 + #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) 86 + #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) 83 87 /* FW names */ 84 88 #define ADF_DH895XCC_FW "qat_895xcc.bin" 85 89 #define ADF_DH895XCC_MMP "qat_mmp.bin"
+50 -30
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
··· 82 82 .id_table = adf_pci_tbl, 83 83 .name = adf_driver_name, 84 84 .probe = adf_probe, 85 - .remove = adf_remove 85 + .remove = adf_remove, 86 + .sriov_configure = adf_sriov_configure, 86 87 }; 88 + 89 + static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) 90 + { 91 + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); 92 + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); 93 + } 87 94 88 95 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) 89 96 { 90 97 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; 91 98 int i; 92 - 93 - adf_dev_shutdown(accel_dev); 94 99 95 100 for (i = 0; i < ADF_PCI_MAX_BARS; i++) { 96 101 struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; ··· 113 108 break; 114 109 } 115 110 kfree(accel_dev->hw_device); 111 + accel_dev->hw_device = NULL; 116 112 } 117 113 adf_cfg_dev_remove(accel_dev); 118 114 debugfs_remove(accel_dev->debugfs_dir); 119 - adf_devmgr_rm_dev(accel_dev); 120 - pci_release_regions(accel_pci_dev->pci_dev); 121 - pci_disable_device(accel_pci_dev->pci_dev); 122 - kfree(accel_dev); 115 + adf_devmgr_rm_dev(accel_dev, NULL); 123 116 } 124 117 125 118 static int adf_dev_configure(struct adf_accel_dev *accel_dev) ··· 208 205 struct adf_hw_device_data *hw_data; 209 206 char name[ADF_DEVICE_NAME_LENGTH]; 210 207 unsigned int i, bar_nr; 211 - int ret; 208 + int ret, bar_mask; 212 209 213 210 switch (ent->device) { 214 211 case ADF_DH895XCC_PCI_DEVICE_ID: ··· 232 229 return -ENOMEM; 233 230 234 231 INIT_LIST_HEAD(&accel_dev->crypto_list); 232 + accel_pci_dev = &accel_dev->accel_pci_dev; 233 + accel_pci_dev->pci_dev = pdev; 235 234 236 235 /* Add accel device to accel table. 237 236 * This should be called before adf_cleanup_accel is called */ 238 - if (adf_devmgr_add_dev(accel_dev)) { 237 + if (adf_devmgr_add_dev(accel_dev, NULL)) { 239 238 dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); 240 239 kfree(accel_dev); 241 240 return -EFAULT; ··· 260 255 default: 261 256 return -ENODEV; 262 257 } 263 - accel_pci_dev = &accel_dev->accel_pci_dev; 264 258 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); 265 259 pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, 266 260 &hw_data->fuses); ··· 268 264 hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); 269 265 hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); 270 266 accel_pci_dev->sku = hw_data->get_sku(hw_data); 271 - accel_pci_dev->pci_dev = pdev; 272 267 /* If the device has no acceleration engines then ignore it. */ 273 268 if (!hw_data->accel_mask || !hw_data->ae_mask || 274 269 ((~hw_data->ae_mask) & 0x01)) { ··· 277 274 } 278 275 279 276 /* Create dev top level debugfs entry */ 280 - snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, 281 - hw_data->dev_class->name, hw_data->instance_id); 277 + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", 278 + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, 279 + pdev->bus->number, PCI_SLOT(pdev->devfn), 280 + PCI_FUNC(pdev->devfn)); 281 + 282 282 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); 283 283 if (!accel_dev->debugfs_dir) { 284 - dev_err(&pdev->dev, "Could not create debugfs dir\n"); 284 + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); 285 285 ret = -EINVAL; 286 286 goto out_err; 287 287 } ··· 307 301 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 308 302 dev_err(&pdev->dev, "No usable DMA configuration\n"); 309 303 ret = -EFAULT; 310 - goto out_err; 304 + goto out_err_disable; 311 305 } else { 312 306 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 313 307 } ··· 318 312 319 313 if (pci_request_regions(pdev, adf_driver_name)) { 320 314 ret = -EFAULT; 321 - goto out_err; 315 + goto out_err_disable; 322 316 } 323 317 324 318 /* Read accelerator capabilities mask */ ··· 326 320 &hw_data->accel_capabilities_mask); 327 321 328 322 /* Find and map all the device's BARS */ 329 - for (i = 0; i < ADF_PCI_MAX_BARS; i++) { 330 - struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; 323 + i = 0; 324 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 325 + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 326 + ADF_PCI_MAX_BARS * 2) { 327 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 331 328 332 - bar_nr = i * 2; 333 329 bar->base_addr = pci_resource_start(pdev, bar_nr); 334 330 if (!bar->base_addr) 335 331 break; 336 332 bar->size = pci_resource_len(pdev, bar_nr); 337 333 bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); 338 334 if (!bar->virt_addr) { 339 - dev_err(&pdev->dev, "Failed to map BAR %d\n", i); 335 + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); 340 336 ret = -EFAULT; 341 - goto out_err; 337 + goto out_err_free_reg; 342 338 } 343 339 } 344 340 pci_set_master(pdev); ··· 348 340 if (adf_enable_aer(accel_dev, &adf_driver)) { 349 341 dev_err(&pdev->dev, "Failed to enable aer\n"); 350 342 ret = -EFAULT; 351 - goto out_err; 343 + goto out_err_free_reg; 352 344 } 353 345 354 346 if (pci_save_state(pdev)) { 355 347 dev_err(&pdev->dev, "Failed to save pci state\n"); 356 348 ret = -ENOMEM; 357 - goto out_err; 349 + goto out_err_free_reg; 358 350 } 359 351 360 352 ret = adf_dev_configure(accel_dev); 361 353 if (ret) 362 - goto out_err; 354 + goto out_err_free_reg; 363 355 364 356 ret = adf_dev_init(accel_dev); 365 357 if (ret) 366 - goto out_err; 358 + goto out_err_dev_shutdown; 367 359 368 360 ret = adf_dev_start(accel_dev); 369 - if (ret) { 370 - adf_dev_stop(accel_dev); 371 - goto out_err; 372 - } 361 + if (ret) 362 + goto out_err_dev_stop; 373 363 374 - return 0; 364 + return ret; 365 + 366 + out_err_dev_stop: 367 + adf_dev_stop(accel_dev); 368 + out_err_dev_shutdown: 369 + adf_dev_shutdown(accel_dev); 370 + out_err_free_reg: 371 + pci_release_regions(accel_pci_dev->pci_dev); 372 + out_err_disable: 373 + pci_disable_device(accel_pci_dev->pci_dev); 375 374 out_err: 376 375 adf_cleanup_accel(accel_dev); 376 + kfree(accel_dev); 377 377 return ret; 378 378 } 379 379 ··· 395 379 } 396 380 if (adf_dev_stop(accel_dev)) 397 381 dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); 382 + 383 + adf_dev_shutdown(accel_dev); 398 384 adf_disable_aer(accel_dev); 399 385 adf_cleanup_accel(accel_dev); 386 + adf_cleanup_pci_dev(accel_dev); 387 + kfree(accel_dev); 400 388 } 401 389 402 390 static int __init adfdrv_init(void)
+101 -34
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
··· 59 59 #include <adf_transport_access_macros.h> 60 60 #include <adf_transport_internal.h> 61 61 #include "adf_drv.h" 62 + #include "adf_dh895xcc_hw_data.h" 62 63 63 64 static int adf_enable_msix(struct adf_accel_dev *accel_dev) 64 65 { 65 66 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 66 67 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 67 - uint32_t msix_num_entries = hw_data->num_banks + 1; 68 - int i; 68 + u32 msix_num_entries = 1; 69 69 70 - for (i = 0; i < msix_num_entries; i++) 71 - pci_dev_info->msix_entries.entries[i].entry = i; 70 + /* If SR-IOV is disabled, add entries for each bank */ 71 + if (!accel_dev->pf.vf_info) { 72 + int i; 73 + 74 + msix_num_entries += hw_data->num_banks; 75 + for (i = 0; i < msix_num_entries; i++) 76 + pci_dev_info->msix_entries.entries[i].entry = i; 77 + } else { 78 + pci_dev_info->msix_entries.entries[0].entry = 79 + hw_data->num_banks; 80 + } 72 81 73 82 if (pci_enable_msix_exact(pci_dev_info->pci_dev, 74 83 pci_dev_info->msix_entries.entries, 75 84 msix_num_entries)) { 76 - dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n"); 85 + dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); 77 86 return -EFAULT; 78 87 } 79 88 return 0; ··· 106 97 { 107 98 struct adf_accel_dev *accel_dev = dev_ptr; 108 99 109 - dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", 110 - accel_dev->accel_id); 111 - return IRQ_HANDLED; 100 + #ifdef CONFIG_PCI_IOV 101 + /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ 102 + if (accel_dev->pf.vf_info) { 103 + void __iomem *pmisc_bar_addr = 104 + (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; 105 + u32 vf_mask; 106 + 107 + /* Get the interrupt sources triggered by VFs */ 108 + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & 109 + 0x0000FFFF) << 16) | 110 + ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & 111 + 0x01FFFE00) >> 9); 112 + 113 + if (vf_mask) { 114 + struct adf_accel_vf_info *vf_info; 115 + bool irq_handled = false; 116 + int i; 117 + 118 + /* Disable VF2PF interrupts for VFs with pending ints */ 119 + adf_disable_vf2pf_interrupts(accel_dev, vf_mask); 120 + 121 + /* 122 + * Schedule tasklets to handle VF2PF interrupt BHs 123 + * unless the VF is malicious and is attempting to 124 + * flood the host OS with VF2PF interrupts. 125 + */ 126 + for_each_set_bit(i, (const unsigned long *)&vf_mask, 127 + (sizeof(vf_mask) * BITS_PER_BYTE)) { 128 + vf_info = accel_dev->pf.vf_info + i; 129 + 130 + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { 131 + dev_info(&GET_DEV(accel_dev), 132 + "Too many ints from VF%d\n", 133 + vf_info->vf_nr + 1); 134 + continue; 135 + } 136 + 137 + /* Tasklet will re-enable ints from this VF */ 138 + tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet); 139 + irq_handled = true; 140 + } 141 + 142 + if (irq_handled) 143 + return IRQ_HANDLED; 144 + } 145 + } 146 + #endif /* CONFIG_PCI_IOV */ 147 + 148 + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", 149 + accel_dev->accel_id); 150 + 151 + return IRQ_NONE; 112 152 } 113 153 114 154 static int adf_request_irqs(struct adf_accel_dev *accel_dev) ··· 166 108 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 167 109 struct msix_entry *msixe = pci_dev_info->msix_entries.entries; 168 110 struct adf_etr_data *etr_data = accel_dev->transport; 169 - int ret, i; 111 + int ret, i = 0; 170 112 char *name; 171 113 172 - /* Request msix irq for all banks */ 173 - for (i = 0; i < hw_data->num_banks; i++) { 174 - struct adf_etr_bank_data *bank = &etr_data->banks[i]; 175 - unsigned int cpu, cpus = num_online_cpus(); 114 + /* Request msix irq for all banks unless SR-IOV enabled */ 115 + if (!accel_dev->pf.vf_info) { 116 + for (i = 0; i < hw_data->num_banks; i++) { 117 + struct adf_etr_bank_data *bank = &etr_data->banks[i]; 118 + unsigned int cpu, cpus = num_online_cpus(); 176 119 177 - name = *(pci_dev_info->msix_entries.names + i); 178 - snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 179 - "qat%d-bundle%d", accel_dev->accel_id, i); 180 - ret = request_irq(msixe[i].vector, 181 - adf_msix_isr_bundle, 0, name, bank); 182 - if (ret) { 183 - dev_err(&GET_DEV(accel_dev), 184 - "failed to enable irq %d for %s\n", 185 - msixe[i].vector, name); 186 - return ret; 120 + name = *(pci_dev_info->msix_entries.names + i); 121 + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 122 + "qat%d-bundle%d", accel_dev->accel_id, i); 123 + ret = request_irq(msixe[i].vector, 124 + adf_msix_isr_bundle, 0, name, bank); 125 + if (ret) { 126 + dev_err(&GET_DEV(accel_dev), 127 + "failed to enable irq %d for %s\n", 128 + msixe[i].vector, name); 129 + return ret; 130 + } 131 + 132 + cpu = ((accel_dev->accel_id * hw_data->num_banks) + 133 + i) % cpus; 134 + irq_set_affinity_hint(msixe[i].vector, 135 + get_cpu_mask(cpu)); 187 136 } 188 - 189 - cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; 190 - irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); 191 137 } 192 138 193 139 /* Request msix irq for AE */ ··· 214 152 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 215 153 struct msix_entry *msixe = pci_dev_info->msix_entries.entries; 216 154 struct adf_etr_data *etr_data = accel_dev->transport; 217 - int i; 155 + int i = 0; 218 156 219 - for (i = 0; i < hw_data->num_banks; i++) { 220 - irq_set_affinity_hint(msixe[i].vector, NULL); 221 - free_irq(msixe[i].vector, &etr_data->banks[i]); 157 + if (pci_dev_info->msix_entries.num_entries > 1) { 158 + for (i = 0; i < hw_data->num_banks; i++) { 159 + irq_set_affinity_hint(msixe[i].vector, NULL); 160 + free_irq(msixe[i].vector, &etr_data->banks[i]); 161 + } 222 162 } 223 163 irq_set_affinity_hint(msixe[i].vector, NULL); 224 164 free_irq(msixe[i].vector, accel_dev); ··· 232 168 char **names; 233 169 struct msix_entry *entries; 234 170 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 235 - uint32_t msix_num_entries = hw_data->num_banks + 1; 171 + u32 msix_num_entries = 1; 172 + 173 + /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ 174 + if (!accel_dev->pf.vf_info) 175 + msix_num_entries += hw_data->num_banks; 236 176 237 177 entries = kzalloc_node(msix_num_entries * sizeof(*entries), 238 178 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); ··· 253 185 if (!(*(names + i))) 254 186 goto err; 255 187 } 188 + accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; 256 189 accel_dev->accel_pci_dev.msix_entries.entries = entries; 257 190 accel_dev->accel_pci_dev.msix_entries.names = names; 258 191 return 0; ··· 267 198 268 199 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) 269 200 { 270 - struct adf_hw_device_data *hw_data = accel_dev->hw_device; 271 - uint32_t msix_num_entries = hw_data->num_banks + 1; 272 201 char **names = accel_dev->accel_pci_dev.msix_entries.names; 273 202 int i; 274 203 275 204 kfree(accel_dev->accel_pci_dev.msix_entries.entries); 276 - for (i = 0; i < msix_num_entries; i++) 205 + for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) 277 206 kfree(*(names + i)); 278 207 kfree(names); 279 208 }