Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'fpga-for-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga into char-misc-next

Xu writes:

Here is the first set of FPGA changes for 5.20-rc1

FPGA static firmware loader

- Russ's change to add support for Intel MAX10 BMC Secure
Update driver which instantiates the new Firmware Upload
functionality (merged on last cycle) of the Firmware
Loader.

DFL

- keliu's change to use ida_alloc()/ida_free() instead of
deprecated ida_simple_get()/ida_simple_remove()

ALTERA

- Marco's change to fix a "comparison with less than zero"
warning

All patches have been reviewed on the mailing list, and have been in the
last linux-next releases (as part of our for-next branch).

Signed-off-by: Xu Yilun <yilun.xu@intel.com>

* tag 'fpga-for-5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/fpga/linux-fpga:
fpga: altera-pr-ip: fix unsigned comparison with less than zero
fpga: Directly use ida_alloc()/free()
fpga: m10bmc-sec: add max10 secure update functions
fpga: m10bmc-sec: expose max10 canceled keys in sysfs
fpga: m10bmc-sec: expose max10 flash update count
fpga: m10bmc-sec: create max10 bmc secure update
mfd: intel-m10-bmc: Rename n3000bmc-secure driver

+721 -13
+61
Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
··· 1 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_root_entry_hash 2 + Date: Sep 2022 3 + KernelVersion: 5.20 4 + Contact: Russ Weight <russell.h.weight@intel.com> 5 + Description: Read only. Returns the root entry hash for the static 6 + region if one is programmed, else it returns the 7 + string: "hash not programmed". This file is only 8 + visible if the underlying device supports it. 9 + Format: string. 10 + 11 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_root_entry_hash 12 + Date: Sep 2022 13 + KernelVersion: 5.20 14 + Contact: Russ Weight <russell.h.weight@intel.com> 15 + Description: Read only. Returns the root entry hash for the partial 16 + reconfiguration region if one is programmed, else it 17 + returns the string: "hash not programmed". This file 18 + is only visible if the underlying device supports it. 19 + Format: string. 20 + 21 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_root_entry_hash 22 + Date: Sep 2022 23 + KernelVersion: 5.20 24 + Contact: Russ Weight <russell.h.weight@intel.com> 25 + Description: Read only. Returns the root entry hash for the BMC image 26 + if one is programmed, else it returns the string: 27 + "hash not programmed". This file is only visible if the 28 + underlying device supports it. 29 + Format: string. 30 + 31 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_canceled_csks 32 + Date: Sep 2022 33 + KernelVersion: 5.20 34 + Contact: Russ Weight <russell.h.weight@intel.com> 35 + Description: Read only. Returns a list of indices for canceled code 36 + signing keys for the static region. The standard bitmap 37 + list format is used (e.g. "1,2-6,9"). 38 + 39 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_canceled_csks 40 + Date: Sep 2022 41 + KernelVersion: 5.20 42 + Contact: Russ Weight <russell.h.weight@intel.com> 43 + Description: Read only. Returns a list of indices for canceled code 44 + signing keys for the partial reconfiguration region. The 45 + standard bitmap list format is used (e.g. "1,2-6,9"). 46 + 47 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_canceled_csks 48 + Date: Sep 2022 49 + KernelVersion: 5.20 50 + Contact: Russ Weight <russell.h.weight@intel.com> 51 + Description: Read only. Returns a list of indices for canceled code 52 + signing keys for the BMC. The standard bitmap list format 53 + is used (e.g. "1,2-6,9"). 54 + 55 + What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/flash_count 56 + Date: Sep 2022 57 + KernelVersion: 5.20 58 + Contact: Russ Weight <russell.h.weight@intel.com> 59 + Description: Read only. Returns number of times the secure update 60 + staging area has been flashed. 61 + Format: "%u".
+7
MAINTAINERS
··· 7815 7815 F: drivers/fpga/ 7816 7816 F: include/linux/fpga/ 7817 7817 7818 + INTEL MAX10 BMC SECURE UPDATES 7819 + M: Russ Weight <russell.h.weight@intel.com> 7820 + L: linux-fpga@vger.kernel.org 7821 + S: Maintained 7822 + F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update 7823 + F: drivers/fpga/intel-m10-bmc-sec-update.c 7824 + 7818 7825 FPU EMULATOR 7819 7826 M: Bill Metzenthen <billm@melbpc.org.au> 7820 7827 S: Maintained
+12
drivers/fpga/Kconfig
··· 243 243 configure the programmable logic(PL). 244 244 245 245 To compile this as a module, choose M here. 246 + 247 + config FPGA_M10_BMC_SEC_UPDATE 248 + tristate "Intel MAX10 BMC Secure Update driver" 249 + depends on MFD_INTEL_M10_BMC && FW_UPLOAD 250 + help 251 + Secure update support for the Intel MAX10 board management 252 + controller. 253 + 254 + This is a subdriver of the Intel MAX10 board management controller 255 + (BMC) and provides support for secure updates for the BMC image, 256 + the FPGA image, the Root Entry Hashes, etc. 257 + 246 258 endif # FPGA
+3
drivers/fpga/Makefile
··· 22 22 obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o 23 23 obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o 24 24 25 + # FPGA Secure Update Drivers 26 + obj-$(CONFIG_FPGA_M10_BMC_SEC_UPDATE) += intel-m10-bmc-sec-update.o 27 + 25 28 # FPGA Bridge Drivers 26 29 obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o 27 30 obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
+1 -1
drivers/fpga/altera-pr-ip-core.c
··· 108 108 u32 *buffer_32 = (u32 *)buf; 109 109 size_t i = 0; 110 110 111 - if (count <= 0) 111 + if (!count) 112 112 return -EINVAL; 113 113 114 114 /* Write out the complete 32-bit chunks */
+2 -2
drivers/fpga/dfl.c
··· 342 342 if (ddev->mmio_res.parent) 343 343 release_resource(&ddev->mmio_res); 344 344 345 - ida_simple_remove(&dfl_device_ida, ddev->id); 345 + ida_free(&dfl_device_ida, ddev->id); 346 346 kfree(ddev->irqs); 347 347 kfree(ddev); 348 348 } ··· 360 360 if (!ddev) 361 361 return ERR_PTR(-ENOMEM); 362 362 363 - id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL); 363 + id = ida_alloc(&dfl_device_ida, GFP_KERNEL); 364 364 if (id < 0) { 365 365 dev_err(&pdev->dev, "unable to get id\n"); 366 366 kfree(ddev);
+3 -3
drivers/fpga/fpga-bridge.c
··· 342 342 if (!bridge) 343 343 return ERR_PTR(-ENOMEM); 344 344 345 - id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL); 345 + id = ida_alloc(&fpga_bridge_ida, GFP_KERNEL); 346 346 if (id < 0) { 347 347 ret = id; 348 348 goto error_kfree; ··· 375 375 return bridge; 376 376 377 377 error_device: 378 - ida_simple_remove(&fpga_bridge_ida, id); 378 + ida_free(&fpga_bridge_ida, id); 379 379 error_kfree: 380 380 kfree(bridge); 381 381 ··· 407 407 { 408 408 struct fpga_bridge *bridge = to_fpga_bridge(dev); 409 409 410 - ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); 410 + ida_free(&fpga_bridge_ida, bridge->dev.id); 411 411 kfree(bridge); 412 412 } 413 413
+3 -3
drivers/fpga/fpga-mgr.c
··· 623 623 if (!mgr) 624 624 return ERR_PTR(-ENOMEM); 625 625 626 - id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL); 626 + id = ida_alloc(&fpga_mgr_ida, GFP_KERNEL); 627 627 if (id < 0) { 628 628 ret = id; 629 629 goto error_kfree; ··· 662 662 return mgr; 663 663 664 664 error_device: 665 - ida_simple_remove(&fpga_mgr_ida, id); 665 + ida_free(&fpga_mgr_ida, id); 666 666 error_kfree: 667 667 kfree(mgr); 668 668 ··· 790 790 { 791 791 struct fpga_manager *mgr = to_fpga_manager(dev); 792 792 793 - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); 793 + ida_free(&fpga_mgr_ida, mgr->dev.id); 794 794 kfree(mgr); 795 795 } 796 796
+3 -3
drivers/fpga/fpga-region.c
··· 202 202 if (!region) 203 203 return ERR_PTR(-ENOMEM); 204 204 205 - id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); 205 + id = ida_alloc(&fpga_region_ida, GFP_KERNEL); 206 206 if (id < 0) { 207 207 ret = id; 208 208 goto err_free; ··· 234 234 return region; 235 235 236 236 err_remove: 237 - ida_simple_remove(&fpga_region_ida, id); 237 + ida_free(&fpga_region_ida, id); 238 238 err_free: 239 239 kfree(region); 240 240 ··· 283 283 { 284 284 struct fpga_region *region = to_fpga_region(dev); 285 285 286 - ida_simple_remove(&fpga_region_ida, region->dev.id); 286 + ida_free(&fpga_region_ida, region->dev.id); 287 287 kfree(region); 288 288 } 289 289
+625
drivers/fpga/intel-m10-bmc-sec-update.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Intel MAX10 Board Management Controller Secure Update Driver 4 + * 5 + * Copyright (C) 2019-2022 Intel Corporation. All rights reserved. 6 + * 7 + */ 8 + #include <linux/bitfield.h> 9 + #include <linux/device.h> 10 + #include <linux/firmware.h> 11 + #include <linux/mfd/intel-m10-bmc.h> 12 + #include <linux/mod_devicetable.h> 13 + #include <linux/module.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/slab.h> 16 + 17 + struct m10bmc_sec { 18 + struct device *dev; 19 + struct intel_m10bmc *m10bmc; 20 + struct fw_upload *fwl; 21 + char *fw_name; 22 + u32 fw_name_id; 23 + bool cancel_request; 24 + }; 25 + 26 + static DEFINE_XARRAY_ALLOC(fw_upload_xa); 27 + 28 + /* Root Entry Hash (REH) support */ 29 + #define REH_SHA256_SIZE 32 30 + #define REH_SHA384_SIZE 48 31 + #define REH_MAGIC GENMASK(15, 0) 32 + #define REH_SHA_NUM_BYTES GENMASK(31, 16) 33 + 34 + static ssize_t 35 + show_root_entry_hash(struct device *dev, u32 exp_magic, 36 + u32 prog_addr, u32 reh_addr, char *buf) 37 + { 38 + struct m10bmc_sec *sec = dev_get_drvdata(dev); 39 + int sha_num_bytes, i, ret, cnt = 0; 40 + u8 hash[REH_SHA384_SIZE]; 41 + unsigned int stride; 42 + u32 magic; 43 + 44 + stride = regmap_get_reg_stride(sec->m10bmc->regmap); 45 + ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic); 46 + if (ret) 47 + return ret; 48 + 49 + if (FIELD_GET(REH_MAGIC, magic) != exp_magic) 50 + return sysfs_emit(buf, "hash not programmed\n"); 51 + 52 + sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8; 53 + if ((sha_num_bytes % stride) || 54 + (sha_num_bytes != REH_SHA256_SIZE && 55 + sha_num_bytes != REH_SHA384_SIZE)) { 56 + dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, 57 + sha_num_bytes); 58 + return -EINVAL; 59 + } 60 + 61 + ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr, 62 + hash, sha_num_bytes / stride); 63 + if (ret) { 64 + dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n", 65 + reh_addr, sha_num_bytes / stride, ret); 66 + return ret; 67 + } 68 + 69 + for (i = 0; i < sha_num_bytes; i++) 70 + cnt += sprintf(buf + cnt, "%02x", hash[i]); 71 + cnt += sprintf(buf + cnt, "\n"); 72 + 73 + return cnt; 74 + } 75 + 76 + #define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \ 77 + static ssize_t _name##_root_entry_hash_show(struct device *dev, \ 78 + struct device_attribute *attr, \ 79 + char *buf) \ 80 + { return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \ 81 + static DEVICE_ATTR_RO(_name##_root_entry_hash) 82 + 83 + DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR); 84 + DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR); 85 + DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR); 86 + 87 + #define CSK_BIT_LEN 128U 88 + #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) 89 + 90 + static ssize_t 91 + show_canceled_csk(struct device *dev, u32 addr, char *buf) 92 + { 93 + unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32); 94 + struct m10bmc_sec *sec = dev_get_drvdata(dev); 95 + DECLARE_BITMAP(csk_map, CSK_BIT_LEN); 96 + __le32 csk_le32[CSK_32ARRAY_SIZE]; 97 + u32 csk32[CSK_32ARRAY_SIZE]; 98 + int ret; 99 + 100 + stride = regmap_get_reg_stride(sec->m10bmc->regmap); 101 + if (size % stride) { 102 + dev_err(sec->dev, 103 + "CSK vector size (0x%x) not aligned to stride (0x%x)\n", 104 + size, stride); 105 + WARN_ON_ONCE(1); 106 + return -EINVAL; 107 + } 108 + 109 + ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32, 110 + size / stride); 111 + if (ret) { 112 + dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n", 113 + addr, size / stride, ret); 114 + return ret; 115 + } 116 + 117 + for (i = 0; i < CSK_32ARRAY_SIZE; i++) 118 + csk32[i] = le32_to_cpu(((csk_le32[i]))); 119 + 120 + bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN); 121 + bitmap_complement(csk_map, csk_map, CSK_BIT_LEN); 122 + return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN); 123 + } 124 + 125 + #define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \ 126 + static ssize_t _name##_canceled_csks_show(struct device *dev, \ 127 + struct device_attribute *attr, \ 128 + char *buf) \ 129 + { return show_canceled_csk(dev, _addr, buf); } \ 130 + static DEVICE_ATTR_RO(_name##_canceled_csks) 131 + 132 + #define CSK_VEC_OFFSET 0x34 133 + 134 + DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET); 135 + DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET); 136 + DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET); 137 + 138 + #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */ 139 + 140 + static ssize_t flash_count_show(struct device *dev, 141 + struct device_attribute *attr, char *buf) 142 + { 143 + struct m10bmc_sec *sec = dev_get_drvdata(dev); 144 + unsigned int stride, num_bits; 145 + u8 *flash_buf; 146 + int cnt, ret; 147 + 148 + stride = regmap_get_reg_stride(sec->m10bmc->regmap); 149 + num_bits = FLASH_COUNT_SIZE * 8; 150 + 151 + flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); 152 + if (!flash_buf) 153 + return -ENOMEM; 154 + 155 + if (FLASH_COUNT_SIZE % stride) { 156 + dev_err(sec->dev, 157 + "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n", 158 + FLASH_COUNT_SIZE, stride); 159 + WARN_ON_ONCE(1); 160 + return -EINVAL; 161 + } 162 + 163 + ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT, 164 + flash_buf, FLASH_COUNT_SIZE / stride); 165 + if (ret) { 166 + dev_err(sec->dev, 167 + "failed to read flash count: %x cnt %x: %d\n", 168 + STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret); 169 + goto exit_free; 170 + } 171 + cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); 172 + 173 + exit_free: 174 + kfree(flash_buf); 175 + 176 + return ret ? : sysfs_emit(buf, "%u\n", cnt); 177 + } 178 + static DEVICE_ATTR_RO(flash_count); 179 + 180 + static struct attribute *m10bmc_security_attrs[] = { 181 + &dev_attr_flash_count.attr, 182 + &dev_attr_bmc_root_entry_hash.attr, 183 + &dev_attr_sr_root_entry_hash.attr, 184 + &dev_attr_pr_root_entry_hash.attr, 185 + &dev_attr_sr_canceled_csks.attr, 186 + &dev_attr_pr_canceled_csks.attr, 187 + &dev_attr_bmc_canceled_csks.attr, 188 + NULL, 189 + }; 190 + 191 + static struct attribute_group m10bmc_security_attr_group = { 192 + .name = "security", 193 + .attrs = m10bmc_security_attrs, 194 + }; 195 + 196 + static const struct attribute_group *m10bmc_sec_attr_groups[] = { 197 + &m10bmc_security_attr_group, 198 + NULL, 199 + }; 200 + 201 + static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) 202 + { 203 + u32 auth_result; 204 + 205 + dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell); 206 + 207 + if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result)) 208 + dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); 209 + } 210 + 211 + static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) 212 + { 213 + u32 doorbell; 214 + int ret; 215 + 216 + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); 217 + if (ret) 218 + return FW_UPLOAD_ERR_RW_ERROR; 219 + 220 + if (rsu_prog(doorbell) != RSU_PROG_IDLE && 221 + rsu_prog(doorbell) != RSU_PROG_RSU_DONE) { 222 + log_error_regs(sec, doorbell); 223 + return FW_UPLOAD_ERR_BUSY; 224 + } 225 + 226 + return FW_UPLOAD_ERR_NONE; 227 + } 228 + 229 + static inline bool rsu_start_done(u32 doorbell) 230 + { 231 + u32 status, progress; 232 + 233 + if (doorbell & DRBL_RSU_REQUEST) 234 + return false; 235 + 236 + status = rsu_stat(doorbell); 237 + if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT) 238 + return true; 239 + 240 + progress = rsu_prog(doorbell); 241 + if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE) 242 + return true; 243 + 244 + return false; 245 + } 246 + 247 + static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) 248 + { 249 + u32 doorbell, status; 250 + int ret; 251 + 252 + ret = regmap_update_bits(sec->m10bmc->regmap, 253 + M10BMC_SYS_BASE + M10BMC_DOORBELL, 254 + DRBL_RSU_REQUEST | DRBL_HOST_STATUS, 255 + DRBL_RSU_REQUEST | 256 + FIELD_PREP(DRBL_HOST_STATUS, 257 + HOST_STATUS_IDLE)); 258 + if (ret) 259 + return FW_UPLOAD_ERR_RW_ERROR; 260 + 261 + ret = regmap_read_poll_timeout(sec->m10bmc->regmap, 262 + M10BMC_SYS_BASE + M10BMC_DOORBELL, 263 + doorbell, 264 + rsu_start_done(doorbell), 265 + NIOS_HANDSHAKE_INTERVAL_US, 266 + NIOS_HANDSHAKE_TIMEOUT_US); 267 + 268 + if (ret == -ETIMEDOUT) { 269 + log_error_regs(sec, doorbell); 270 + return FW_UPLOAD_ERR_TIMEOUT; 271 + } else if (ret) { 272 + return FW_UPLOAD_ERR_RW_ERROR; 273 + } 274 + 275 + status = rsu_stat(doorbell); 276 + if (status == RSU_STAT_WEAROUT) { 277 + dev_warn(sec->dev, "Excessive flash update count detected\n"); 278 + return FW_UPLOAD_ERR_WEAROUT; 279 + } else if (status == RSU_STAT_ERASE_FAIL) { 280 + log_error_regs(sec, doorbell); 281 + return FW_UPLOAD_ERR_HW_ERROR; 282 + } 283 + 284 + return FW_UPLOAD_ERR_NONE; 285 + } 286 + 287 + static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) 288 + { 289 + unsigned long poll_timeout; 290 + u32 doorbell, progress; 291 + int ret; 292 + 293 + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); 294 + if (ret) 295 + return FW_UPLOAD_ERR_RW_ERROR; 296 + 297 + poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS); 298 + while (rsu_prog(doorbell) == RSU_PROG_PREPARE) { 299 + msleep(RSU_PREP_INTERVAL_MS); 300 + if (time_after(jiffies, poll_timeout)) 301 + break; 302 + 303 + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); 304 + if (ret) 305 + return FW_UPLOAD_ERR_RW_ERROR; 306 + } 307 + 308 + progress = rsu_prog(doorbell); 309 + if (progress == RSU_PROG_PREPARE) { 310 + log_error_regs(sec, doorbell); 311 + return FW_UPLOAD_ERR_TIMEOUT; 312 + } else if (progress != RSU_PROG_READY) { 313 + log_error_regs(sec, doorbell); 314 + return FW_UPLOAD_ERR_HW_ERROR; 315 + } 316 + 317 + return FW_UPLOAD_ERR_NONE; 318 + } 319 + 320 + static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) 321 + { 322 + u32 doorbell; 323 + int ret; 324 + 325 + ret = regmap_update_bits(sec->m10bmc->regmap, 326 + M10BMC_SYS_BASE + M10BMC_DOORBELL, 327 + DRBL_HOST_STATUS, 328 + FIELD_PREP(DRBL_HOST_STATUS, 329 + HOST_STATUS_WRITE_DONE)); 330 + if (ret) 331 + return FW_UPLOAD_ERR_RW_ERROR; 332 + 333 + ret = regmap_read_poll_timeout(sec->m10bmc->regmap, 334 + M10BMC_SYS_BASE + M10BMC_DOORBELL, 335 + doorbell, 336 + rsu_prog(doorbell) != RSU_PROG_READY, 337 + NIOS_HANDSHAKE_INTERVAL_US, 338 + NIOS_HANDSHAKE_TIMEOUT_US); 339 + 340 + if (ret == -ETIMEDOUT) { 341 + log_error_regs(sec, doorbell); 342 + return FW_UPLOAD_ERR_TIMEOUT; 343 + } else if (ret) { 344 + return FW_UPLOAD_ERR_RW_ERROR; 345 + } 346 + 347 + switch (rsu_stat(doorbell)) { 348 + case RSU_STAT_NORMAL: 349 + case RSU_STAT_NIOS_OK: 350 + case RSU_STAT_USER_OK: 351 + case RSU_STAT_FACTORY_OK: 352 + break; 353 + default: 354 + log_error_regs(sec, doorbell); 355 + return FW_UPLOAD_ERR_HW_ERROR; 356 + } 357 + 358 + return FW_UPLOAD_ERR_NONE; 359 + } 360 + 361 + static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell) 362 + { 363 + if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell)) 364 + return -EIO; 365 + 366 + switch (rsu_stat(*doorbell)) { 367 + case RSU_STAT_NORMAL: 368 + case RSU_STAT_NIOS_OK: 369 + case RSU_STAT_USER_OK: 370 + case RSU_STAT_FACTORY_OK: 371 + break; 372 + default: 373 + return -EINVAL; 374 + } 375 + 376 + switch (rsu_prog(*doorbell)) { 377 + case RSU_PROG_IDLE: 378 + case RSU_PROG_RSU_DONE: 379 + return 0; 380 + case RSU_PROG_AUTHENTICATING: 381 + case RSU_PROG_COPYING: 382 + case RSU_PROG_UPDATE_CANCEL: 383 + case RSU_PROG_PROGRAM_KEY_HASH: 384 + return -EAGAIN; 385 + default: 386 + return -EINVAL; 387 + } 388 + } 389 + 390 + static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) 391 + { 392 + u32 doorbell; 393 + int ret; 394 + 395 + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); 396 + if (ret) 397 + return FW_UPLOAD_ERR_RW_ERROR; 398 + 399 + if (rsu_prog(doorbell) != RSU_PROG_READY) 400 + return FW_UPLOAD_ERR_BUSY; 401 + 402 + ret = regmap_update_bits(sec->m10bmc->regmap, 403 + M10BMC_SYS_BASE + M10BMC_DOORBELL, 404 + DRBL_HOST_STATUS, 405 + FIELD_PREP(DRBL_HOST_STATUS, 406 + HOST_STATUS_ABORT_RSU)); 407 + if (ret) 408 + return FW_UPLOAD_ERR_RW_ERROR; 409 + 410 + return FW_UPLOAD_ERR_CANCELED; 411 + } 412 + 413 + static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, 414 + const u8 *data, u32 size) 415 + { 416 + struct m10bmc_sec *sec = fwl->dd_handle; 417 + u32 ret; 418 + 419 + sec->cancel_request = false; 420 + 421 + if (!size || size > M10BMC_STAGING_SIZE) 422 + return FW_UPLOAD_ERR_INVALID_SIZE; 423 + 424 + ret = rsu_check_idle(sec); 425 + if (ret != FW_UPLOAD_ERR_NONE) 426 + return ret; 427 + 428 + ret = rsu_update_init(sec); 429 + if (ret != FW_UPLOAD_ERR_NONE) 430 + return ret; 431 + 432 + ret = rsu_prog_ready(sec); 433 + if (ret != FW_UPLOAD_ERR_NONE) 434 + return ret; 435 + 436 + if (sec->cancel_request) 437 + return rsu_cancel(sec); 438 + 439 + return FW_UPLOAD_ERR_NONE; 440 + } 441 + 442 + #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */ 443 + 444 + static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data, 445 + u32 offset, u32 size, u32 *written) 446 + { 447 + struct m10bmc_sec *sec = fwl->dd_handle; 448 + u32 blk_size, doorbell, extra_offset; 449 + unsigned int stride, extra = 0; 450 + int ret; 451 + 452 + stride = regmap_get_reg_stride(sec->m10bmc->regmap); 453 + if (sec->cancel_request) 454 + return rsu_cancel(sec); 455 + 456 + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); 457 + if (ret) { 458 + return FW_UPLOAD_ERR_RW_ERROR; 459 + } else if (rsu_prog(doorbell) != RSU_PROG_READY) { 460 + log_error_regs(sec, doorbell); 461 + return FW_UPLOAD_ERR_HW_ERROR; 462 + } 463 + 464 + WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride); 465 + blk_size = min_t(u32, WRITE_BLOCK_SIZE, size); 466 + ret = regmap_bulk_write(sec->m10bmc->regmap, 467 + M10BMC_STAGING_BASE + offset, 468 + (void *)data + offset, 469 + blk_size / stride); 470 + if (ret) 471 + return FW_UPLOAD_ERR_RW_ERROR; 472 + 473 + /* 474 + * If blk_size is not aligned to stride, then handle the extra 475 + * bytes with regmap_write. 476 + */ 477 + if (blk_size % stride) { 478 + extra_offset = offset + ALIGN_DOWN(blk_size, stride); 479 + memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride); 480 + ret = regmap_write(sec->m10bmc->regmap, 481 + M10BMC_STAGING_BASE + extra_offset, extra); 482 + if (ret) 483 + return FW_UPLOAD_ERR_RW_ERROR; 484 + } 485 + 486 + *written = blk_size; 487 + return FW_UPLOAD_ERR_NONE; 488 + } 489 + 490 + static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl) 491 + { 492 + struct m10bmc_sec *sec = fwl->dd_handle; 493 + unsigned long poll_timeout; 494 + u32 doorbell, result; 495 + int ret; 496 + 497 + if (sec->cancel_request) 498 + return rsu_cancel(sec); 499 + 500 + result = rsu_send_data(sec); 501 + if (result != FW_UPLOAD_ERR_NONE) 502 + return result; 503 + 504 + poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS); 505 + do { 506 + msleep(RSU_COMPLETE_INTERVAL_MS); 507 + ret = rsu_check_complete(sec, &doorbell); 508 + } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout)); 509 + 510 + if (ret == -EAGAIN) { 511 + log_error_regs(sec, doorbell); 512 + return FW_UPLOAD_ERR_TIMEOUT; 513 + } else if (ret == -EIO) { 514 + return FW_UPLOAD_ERR_RW_ERROR; 515 + } else if (ret) { 516 + log_error_regs(sec, doorbell); 517 + return FW_UPLOAD_ERR_HW_ERROR; 518 + } 519 + 520 + return FW_UPLOAD_ERR_NONE; 521 + } 522 + 523 + /* 524 + * m10bmc_sec_cancel() may be called asynchronously with an on-going update. 525 + * All other functions are called sequentially in a single thread. To avoid 526 + * contention on register accesses, m10bmc_sec_cancel() must only update 527 + * the cancel_request flag. Other functions will check this flag and handle 528 + * the cancel request synchronously. 529 + */ 530 + static void m10bmc_sec_cancel(struct fw_upload *fwl) 531 + { 532 + struct m10bmc_sec *sec = fwl->dd_handle; 533 + 534 + sec->cancel_request = true; 535 + } 536 + 537 + static void m10bmc_sec_cleanup(struct fw_upload *fwl) 538 + { 539 + struct m10bmc_sec *sec = fwl->dd_handle; 540 + 541 + (void)rsu_cancel(sec); 542 + } 543 + 544 + static const struct fw_upload_ops m10bmc_ops = { 545 + .prepare = m10bmc_sec_prepare, 546 + .write = m10bmc_sec_write, 547 + .poll_complete = m10bmc_sec_poll_complete, 548 + .cancel = m10bmc_sec_cancel, 549 + .cleanup = m10bmc_sec_cleanup, 550 + }; 551 + 552 + #define SEC_UPDATE_LEN_MAX 32 553 + static int m10bmc_sec_probe(struct platform_device *pdev) 554 + { 555 + char buf[SEC_UPDATE_LEN_MAX]; 556 + struct m10bmc_sec *sec; 557 + struct fw_upload *fwl; 558 + unsigned int len; 559 + int ret; 560 + 561 + sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 562 + if (!sec) 563 + return -ENOMEM; 564 + 565 + sec->dev = &pdev->dev; 566 + sec->m10bmc = dev_get_drvdata(pdev->dev.parent); 567 + dev_set_drvdata(&pdev->dev, sec); 568 + 569 + ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec, 570 + xa_limit_32b, GFP_KERNEL); 571 + if (ret) 572 + return ret; 573 + 574 + len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d", 575 + sec->fw_name_id); 576 + sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL); 577 + if (!sec->fw_name) 578 + return -ENOMEM; 579 + 580 + fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name, 581 + &m10bmc_ops, sec); 582 + if (IS_ERR(fwl)) { 583 + dev_err(sec->dev, "Firmware Upload driver failed to start\n"); 584 + kfree(sec->fw_name); 585 + xa_erase(&fw_upload_xa, sec->fw_name_id); 586 + return PTR_ERR(fwl); 587 + } 588 + 589 + sec->fwl = fwl; 590 + return 0; 591 + } 592 + 593 + static int m10bmc_sec_remove(struct platform_device *pdev) 594 + { 595 + struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); 596 + 597 + firmware_upload_unregister(sec->fwl); 598 + kfree(sec->fw_name); 599 + xa_erase(&fw_upload_xa, sec->fw_name_id); 600 + 601 + return 0; 602 + } 603 + 604 + static const struct platform_device_id intel_m10bmc_sec_ids[] = { 605 + { 606 + .name = "n3000bmc-sec-update", 607 + }, 608 + { } 609 + }; 610 + MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids); 611 + 612 + static struct platform_driver intel_m10bmc_sec_driver = { 613 + .probe = m10bmc_sec_probe, 614 + .remove = m10bmc_sec_remove, 615 + .driver = { 616 + .name = "intel-m10bmc-sec-update", 617 + .dev_groups = m10bmc_sec_attr_groups, 618 + }, 619 + .id_table = intel_m10bmc_sec_ids, 620 + }; 621 + module_platform_driver(intel_m10bmc_sec_driver); 622 + 623 + MODULE_AUTHOR("Intel Corporation"); 624 + MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update"); 625 + MODULE_LICENSE("GPL");
+1 -1
drivers/mfd/intel-m10-bmc.c
··· 26 26 static struct mfd_cell m10bmc_pacn3000_subdevs[] = { 27 27 { .name = "n3000bmc-hwmon" }, 28 28 { .name = "n3000bmc-retimer" }, 29 - { .name = "n3000bmc-secure" }, 29 + { .name = "n3000bmc-sec-update" }, 30 30 }; 31 31 32 32 static struct mfd_cell m10bmc_n5010_subdevs[] = {