Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

- Add PCI automatic error recovery.

- Fix tape driver timer initialization broken during timers api
cleanup.

- Fix bogus CPU measurement counters values on CPUs offlining.

- Check the validity of subchanel before reading other fields in the
schib in cio code.

* tag 's390-5.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/cio: check the subchannel validity for dev_busid
s390/cpumf: cpum_cf PMU displays invalid value after hotplug remove
s390/tape: fix timer initialization in tape_std_assign()
s390/pci: implement minimal PCI error recovery
PCI: Export pci_dev_lock()
s390/pci: implement reset_slot for hotplug slot
s390/pci: refresh function handle in iomap

+417 -19
+5 -1
arch/s390/include/asm/pci.h
··· 210 210 void zpci_device_reserved(struct zpci_dev *zdev); 211 211 bool zpci_is_device_configured(struct zpci_dev *zdev); 212 212 213 + int zpci_hot_reset_device(struct zpci_dev *zdev); 213 214 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); 214 215 int zpci_unregister_ioat(struct zpci_dev *, u8); 215 216 void zpci_remove_reserved_devices(void); 217 + void zpci_update_fh(struct zpci_dev *zdev, u32 fh); 216 218 217 219 /* CLP */ 218 220 int clp_setup_writeback_mio(void); ··· 296 294 void zpci_debug_init_device(struct zpci_dev *, const char *); 297 295 void zpci_debug_exit_device(struct zpci_dev *); 298 296 299 - /* Error reporting */ 297 + /* Error handling */ 300 298 int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *); 299 + int zpci_clear_error_state(struct zpci_dev *zdev); 300 + int zpci_reset_load_store_blocked(struct zpci_dev *zdev); 301 301 302 302 #ifdef CONFIG_NUMA 303 303
+3 -1
arch/s390/kernel/perf_cpum_cf.c
··· 687 687 false); 688 688 if (cfdiag_diffctr(cpuhw, event->hw.config_base)) 689 689 cfdiag_push_sample(event, cpuhw); 690 - } else 690 + } else if (cpuhw->flags & PMU_F_RESERVED) { 691 + /* Only update when PMU not hotplugged off */ 691 692 hw_perf_event_update(event); 693 + } 692 694 hwc->state |= PERF_HES_UPTODATE; 693 695 } 694 696 }
+144 -4
arch/s390/pci/pci.c
··· 481 481 spin_unlock(&zpci_iomap_lock); 482 482 } 483 483 484 + static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh) 485 + { 486 + int bar, idx; 487 + 488 + spin_lock(&zpci_iomap_lock); 489 + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 490 + if (!zdev->bars[bar].size) 491 + continue; 492 + idx = zdev->bars[bar].map_idx; 493 + if (!zpci_iomap_start[idx].count) 494 + continue; 495 + WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh); 496 + } 497 + spin_unlock(&zpci_iomap_lock); 498 + } 499 + 500 + void zpci_update_fh(struct zpci_dev *zdev, u32 fh) 501 + { 502 + if (!fh || zdev->fh == fh) 503 + return; 504 + 505 + zdev->fh = fh; 506 + if (zpci_use_mio(zdev)) 507 + return; 508 + if (zdev->has_resources && zdev_enabled(zdev)) 509 + zpci_do_update_iomap_fh(zdev, fh); 510 + } 511 + 484 512 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, 485 513 unsigned long size, unsigned long flags) 486 514 { ··· 696 668 if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES)) 697 669 rc = -EIO; 698 670 else 699 - zdev->fh = fh; 671 + zpci_update_fh(zdev, fh); 700 672 return rc; 701 673 } 702 674 ··· 707 679 708 680 cc = clp_disable_fh(zdev, &fh); 709 681 if (!cc) { 710 - zdev->fh = fh; 682 + zpci_update_fh(zdev, fh); 711 683 } else if (cc == CLP_RC_SETPCIFN_ALRDY) { 712 684 pr_info("Disabling PCI function %08x had no effect as it was already disabled\n", 713 685 zdev->fid); 714 686 /* Function is already disabled - update handle */ 715 687 rc = clp_refresh_fh(zdev->fid, &fh); 716 688 if (!rc) { 717 - zdev->fh = fh; 689 + zpci_update_fh(zdev, fh); 718 690 rc = -EINVAL; 719 691 } 720 692 } else { 721 693 rc = -EIO; 722 694 } 723 695 return rc; 696 + } 697 + 698 + /** 699 + * zpci_hot_reset_device - perform a reset of the given zPCI function 700 + * @zdev: the slot which should be reset 701 + * 702 + * Performs a low level reset of the zPCI function. The reset is low level in 703 + * the sense that the zPCI function can be reset without detaching it from the 704 + * common PCI subsystem. The reset may be performed while under control of 705 + * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation 706 + * table is reinstated at the end of the reset. 707 + * 708 + * After the reset the functions internal state is reset to an initial state 709 + * equivalent to its state during boot when first probing a driver. 710 + * Consequently after reset the PCI function requires re-initialization via the 711 + * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors() 712 + * and enabling the function via e.g.pci_enablde_device_flags().The caller 713 + * must guard against concurrent reset attempts. 714 + * 715 + * In most cases this function should not be called directly but through 716 + * pci_reset_function() or pci_reset_bus() which handle the save/restore and 717 + * locking. 718 + * 719 + * Return: 0 on success and an error value otherwise 720 + */ 721 + int zpci_hot_reset_device(struct zpci_dev *zdev) 722 + { 723 + int rc; 724 + 725 + zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh); 726 + if (zdev_enabled(zdev)) { 727 + /* Disables device access, DMAs and IRQs (reset state) */ 728 + rc = zpci_disable_device(zdev); 729 + /* 730 + * Due to a z/VM vs LPAR inconsistency in the error state the 731 + * FH may indicate an enabled device but disable says the 732 + * device is already disabled don't treat it as an error here. 733 + */ 734 + if (rc == -EINVAL) 735 + rc = 0; 736 + if (rc) 737 + return rc; 738 + } 739 + 740 + rc = zpci_enable_device(zdev); 741 + if (rc) 742 + return rc; 743 + 744 + if (zdev->dma_table) 745 + rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 746 + (u64)zdev->dma_table); 747 + else 748 + rc = zpci_dma_init_device(zdev); 749 + if (rc) { 750 + zpci_disable_device(zdev); 751 + return rc; 752 + } 753 + 754 + return 0; 724 755 } 725 756 726 757 /** ··· 863 776 { 864 777 int rc; 865 778 866 - zdev->fh = fh; 779 + zpci_update_fh(zdev, fh); 867 780 /* the PCI function will be scanned once function 0 appears */ 868 781 if (!zdev->zbus->bus) 869 782 return 0; ··· 989 902 return sclp_pci_report(report, zdev->fh, zdev->fid); 990 903 } 991 904 EXPORT_SYMBOL(zpci_report_error); 905 + 906 + /** 907 + * zpci_clear_error_state() - Clears the zPCI error state of the device 908 + * @zdev: The zdev for which the zPCI error state should be reset 909 + * 910 + * Clear the zPCI error state of the device. If clearing the zPCI error state 911 + * fails the device is left in the error state. In this case it may make sense 912 + * to call zpci_io_perm_failure() on the associated pdev if it exists. 913 + * 914 + * Returns: 0 on success, -EIO otherwise 915 + */ 916 + int zpci_clear_error_state(struct zpci_dev *zdev) 917 + { 918 + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR); 919 + struct zpci_fib fib = {0}; 920 + u8 status; 921 + int cc; 922 + 923 + cc = zpci_mod_fc(req, &fib, &status); 924 + if (cc) { 925 + zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status); 926 + return -EIO; 927 + } 928 + 929 + return 0; 930 + } 931 + 932 + /** 933 + * zpci_reset_load_store_blocked() - Re-enables L/S from error state 934 + * @zdev: The zdev for which to unblock load/store access 935 + * 936 + * Re-enables load/store access for a PCI function in the error state while 937 + * keeping DMA blocked. In this state drivers can poke MMIO space to determine 938 + * if error recovery is possible while catching any rogue DMA access from the 939 + * device. 940 + * 941 + * Returns: 0 on success, -EIO otherwise 942 + */ 943 + int zpci_reset_load_store_blocked(struct zpci_dev *zdev) 944 + { 945 + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK); 946 + struct zpci_fib fib = {0}; 947 + u8 status; 948 + int cc; 949 + 950 + cc = zpci_mod_fc(req, &fib, &status); 951 + if (cc) { 952 + zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status); 953 + return -EIO; 954 + } 955 + 956 + return 0; 957 + } 992 958 993 959 static int zpci_mem_init(void) 994 960 {
+224 -6
arch/s390/pci/pci_event.c
··· 47 47 u16 pec; /* PCI event code */ 48 48 } __packed; 49 49 50 + static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res) 51 + { 52 + switch (ers_res) { 53 + case PCI_ERS_RESULT_CAN_RECOVER: 54 + case PCI_ERS_RESULT_RECOVERED: 55 + case PCI_ERS_RESULT_NEED_RESET: 56 + return false; 57 + default: 58 + return true; 59 + } 60 + } 61 + 62 + static bool is_passed_through(struct zpci_dev *zdev) 63 + { 64 + return zdev->s390_domain; 65 + } 66 + 67 + static bool is_driver_supported(struct pci_driver *driver) 68 + { 69 + if (!driver || !driver->err_handler) 70 + return false; 71 + if (!driver->err_handler->error_detected) 72 + return false; 73 + if (!driver->err_handler->slot_reset) 74 + return false; 75 + if (!driver->err_handler->resume) 76 + return false; 77 + return true; 78 + } 79 + 80 + static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev, 81 + struct pci_driver *driver) 82 + { 83 + pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT; 84 + 85 + ers_res = driver->err_handler->error_detected(pdev, pdev->error_state); 86 + if (ers_result_indicates_abort(ers_res)) 87 + pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev)); 88 + else if (ers_res == PCI_ERS_RESULT_NEED_RESET) 89 + pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev)); 90 + 91 + return ers_res; 92 + } 93 + 94 + static pci_ers_result_t zpci_event_do_error_state_clear(struct pci_dev *pdev, 95 + struct pci_driver *driver) 96 + { 97 + pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT; 98 + struct zpci_dev *zdev = to_zpci(pdev); 99 + int rc; 100 + 101 + pr_info("%s: Unblocking device access for examination\n", pci_name(pdev)); 102 + rc = zpci_reset_load_store_blocked(zdev); 103 + if (rc) { 104 + pr_err("%s: Unblocking device access failed\n", pci_name(pdev)); 105 + /* Let's try a full reset instead */ 106 + return PCI_ERS_RESULT_NEED_RESET; 107 + } 108 + 109 + if (driver->err_handler->mmio_enabled) { 110 + ers_res = driver->err_handler->mmio_enabled(pdev); 111 + if (ers_result_indicates_abort(ers_res)) { 112 + pr_info("%s: Automatic recovery failed after MMIO re-enable\n", 113 + pci_name(pdev)); 114 + return ers_res; 115 + } else if (ers_res == PCI_ERS_RESULT_NEED_RESET) { 116 + pr_debug("%s: Driver needs reset to recover\n", pci_name(pdev)); 117 + return ers_res; 118 + } 119 + } 120 + 121 + pr_debug("%s: Unblocking DMA\n", pci_name(pdev)); 122 + rc = zpci_clear_error_state(zdev); 123 + if (!rc) { 124 + pdev->error_state = pci_channel_io_normal; 125 + } else { 126 + pr_err("%s: Unblocking DMA failed\n", pci_name(pdev)); 127 + /* Let's try a full reset instead */ 128 + return PCI_ERS_RESULT_NEED_RESET; 129 + } 130 + 131 + return ers_res; 132 + } 133 + 134 + static pci_ers_result_t zpci_event_do_reset(struct pci_dev *pdev, 135 + struct pci_driver *driver) 136 + { 137 + pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT; 138 + 139 + pr_info("%s: Initiating reset\n", pci_name(pdev)); 140 + if (zpci_hot_reset_device(to_zpci(pdev))) { 141 + pr_err("%s: The reset request failed\n", pci_name(pdev)); 142 + return ers_res; 143 + } 144 + pdev->error_state = pci_channel_io_normal; 145 + ers_res = driver->err_handler->slot_reset(pdev); 146 + if (ers_result_indicates_abort(ers_res)) { 147 + pr_info("%s: Automatic recovery failed after slot reset\n", pci_name(pdev)); 148 + return ers_res; 149 + } 150 + 151 + return ers_res; 152 + } 153 + 154 + /* zpci_event_attempt_error_recovery - Try to recover the given PCI function 155 + * @pdev: PCI function to recover currently in the error state 156 + * 157 + * We follow the scheme outlined in Documentation/PCI/pci-error-recovery.rst. 158 + * With the simplification that recovery always happens per function 159 + * and the platform determines which functions are affected for 160 + * multi-function devices. 161 + */ 162 + static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) 163 + { 164 + pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT; 165 + struct pci_driver *driver; 166 + 167 + /* 168 + * Ensure that the PCI function is not removed concurrently, no driver 169 + * is unbound or probed and that userspace can't access its 170 + * configuration space while we perform recovery. 171 + */ 172 + pci_dev_lock(pdev); 173 + if (pdev->error_state == pci_channel_io_perm_failure) { 174 + ers_res = PCI_ERS_RESULT_DISCONNECT; 175 + goto out_unlock; 176 + } 177 + pdev->error_state = pci_channel_io_frozen; 178 + 179 + if (is_passed_through(to_zpci(pdev))) { 180 + pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n", 181 + pci_name(pdev)); 182 + goto out_unlock; 183 + } 184 + 185 + driver = to_pci_driver(pdev->dev.driver); 186 + if (!is_driver_supported(driver)) { 187 + if (!driver) 188 + pr_info("%s: Cannot be recovered because no driver is bound to the device\n", 189 + pci_name(pdev)); 190 + else 191 + pr_info("%s: The %s driver bound to the device does not support error recovery\n", 192 + pci_name(pdev), 193 + driver->name); 194 + goto out_unlock; 195 + } 196 + 197 + ers_res = zpci_event_notify_error_detected(pdev, driver); 198 + if (ers_result_indicates_abort(ers_res)) 199 + goto out_unlock; 200 + 201 + if (ers_res == PCI_ERS_RESULT_CAN_RECOVER) { 202 + ers_res = zpci_event_do_error_state_clear(pdev, driver); 203 + if (ers_result_indicates_abort(ers_res)) 204 + goto out_unlock; 205 + } 206 + 207 + if (ers_res == PCI_ERS_RESULT_NEED_RESET) 208 + ers_res = zpci_event_do_reset(pdev, driver); 209 + 210 + if (ers_res != PCI_ERS_RESULT_RECOVERED) { 211 + pr_err("%s: Automatic recovery failed; operator intervention is required\n", 212 + pci_name(pdev)); 213 + goto out_unlock; 214 + } 215 + 216 + pr_info("%s: The device is ready to resume operations\n", pci_name(pdev)); 217 + if (driver->err_handler->resume) 218 + driver->err_handler->resume(pdev); 219 + out_unlock: 220 + pci_dev_unlock(pdev); 221 + 222 + return ers_res; 223 + } 224 + 225 + /* zpci_event_io_failure - Report PCI channel failure state to driver 226 + * @pdev: PCI function for which to report 227 + * @es: PCI channel failure state to report 228 + */ 229 + static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es) 230 + { 231 + struct pci_driver *driver; 232 + 233 + pci_dev_lock(pdev); 234 + pdev->error_state = es; 235 + /** 236 + * While vfio-pci's error_detected callback notifies user-space QEMU 237 + * reacts to this by freezing the guest. In an s390 environment PCI 238 + * errors are rarely fatal so this is overkill. Instead in the future 239 + * we will inject the error event and let the guest recover the device 240 + * itself. 241 + */ 242 + if (is_passed_through(to_zpci(pdev))) 243 + goto out; 244 + driver = to_pci_driver(pdev->dev.driver); 245 + if (driver && driver->err_handler && driver->err_handler->error_detected) 246 + driver->err_handler->error_detected(pdev, pdev->error_state); 247 + out: 248 + pci_dev_unlock(pdev); 249 + } 250 + 50 251 static void __zpci_event_error(struct zpci_ccdf_err *ccdf) 51 252 { 52 253 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 53 254 struct pci_dev *pdev = NULL; 255 + pci_ers_result_t ers_res; 54 256 55 257 zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n", 56 258 ccdf->fid, ccdf->fh, ccdf->pec); 57 259 zpci_err("error CCDF:\n"); 58 260 zpci_err_hex(ccdf, sizeof(*ccdf)); 59 261 60 - if (zdev) 61 - pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); 262 + if (zdev) { 263 + zpci_update_fh(zdev, ccdf->fh); 264 + if (zdev->zbus->bus) 265 + pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); 266 + } 62 267 63 268 pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", 64 269 pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); ··· 271 66 if (!pdev) 272 67 return; 273 68 274 - pdev->error_state = pci_channel_io_perm_failure; 69 + switch (ccdf->pec) { 70 + case 0x003a: /* Service Action or Error Recovery Successful */ 71 + ers_res = zpci_event_attempt_error_recovery(pdev); 72 + if (ers_res != PCI_ERS_RESULT_RECOVERED) 73 + zpci_event_io_failure(pdev, pci_channel_io_perm_failure); 74 + break; 75 + default: 76 + /* 77 + * Mark as frozen not permanently failed because the device 78 + * could be subsequently recovered by the platform. 79 + */ 80 + zpci_event_io_failure(pdev, pci_channel_io_frozen); 81 + break; 82 + } 275 83 pci_dev_put(pdev); 276 84 } 277 85 ··· 296 78 297 79 static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh) 298 80 { 299 - zdev->fh = fh; 81 + zpci_update_fh(zdev, fh); 300 82 /* Give the driver a hint that the function is 301 83 * already unusable. 302 84 */ ··· 339 121 if (!zdev) 340 122 zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY); 341 123 else 342 - zdev->fh = ccdf->fh; 124 + zpci_update_fh(zdev, ccdf->fh); 343 125 break; 344 126 case 0x0303: /* Deconfiguration requested */ 345 127 if (zdev) { ··· 348 130 */ 349 131 if (zdev->state != ZPCI_FN_STATE_CONFIGURED) 350 132 break; 351 - zdev->fh = ccdf->fh; 133 + zpci_update_fh(zdev, ccdf->fh); 352 134 zpci_deconfigure_device(zdev); 353 135 } 354 136 break;
+2 -2
arch/s390/pci/pci_insn.c
··· 163 163 unsigned long len) 164 164 { 165 165 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; 166 - u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len); 166 + u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len); 167 167 168 168 return __zpci_load(data, req, ZPCI_OFFSET(addr)); 169 169 } ··· 244 244 unsigned long len) 245 245 { 246 246 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; 247 - u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len); 247 + u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len); 248 248 249 249 return __zpci_store(data, req, ZPCI_OFFSET(addr)); 250 250 }
+9
arch/s390/pci/pci_irq.c
··· 387 387 airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs); 388 388 } 389 389 390 + void arch_restore_msi_irqs(struct pci_dev *pdev) 391 + { 392 + struct zpci_dev *zdev = to_zpci(pdev); 393 + 394 + if (!zdev->irqs_registered) 395 + zpci_set_irq(zdev); 396 + default_restore_msi_irqs(pdev); 397 + } 398 + 390 399 static struct airq_struct zpci_airq = { 391 400 .handler = zpci_floating_irq_handler, 392 401 .isc = PCI_ISC,
+24
drivers/pci/hotplug/s390_pci_hpc.c
··· 57 57 return zpci_deconfigure_device(zdev); 58 58 } 59 59 60 + static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe) 61 + { 62 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, 63 + hotplug_slot); 64 + 65 + if (zdev->state != ZPCI_FN_STATE_CONFIGURED) 66 + return -EIO; 67 + /* 68 + * We can't take the zdev->lock as reset_slot may be called during 69 + * probing and/or device removal which already happens under the 70 + * zdev->lock. Instead the user should use the higher level 71 + * pci_reset_function() or pci_bus_reset() which hold the PCI device 72 + * lock preventing concurrent removal. If not using these functions 73 + * holding the PCI device lock is required. 74 + */ 75 + 76 + /* As long as the function is configured we can reset */ 77 + if (probe) 78 + return 0; 79 + 80 + return zpci_hot_reset_device(zdev); 81 + } 82 + 60 83 static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) 61 84 { 62 85 struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, ··· 99 76 static const struct hotplug_slot_ops s390_hotplug_slot_ops = { 100 77 .enable_slot = enable_slot, 101 78 .disable_slot = disable_slot, 79 + .reset_slot = reset_slot, 102 80 .get_power_status = get_power_status, 103 81 .get_adapter_status = get_adapter_status, 104 82 };
+2 -1
drivers/pci/pci.c
··· 5106 5106 return pci_parent_bus_reset(dev, probe); 5107 5107 } 5108 5108 5109 - static void pci_dev_lock(struct pci_dev *dev) 5109 + void pci_dev_lock(struct pci_dev *dev) 5110 5110 { 5111 5111 pci_cfg_access_lock(dev); 5112 5112 /* block PM suspend, driver probe, etc. */ 5113 5113 device_lock(&dev->dev); 5114 5114 } 5115 + EXPORT_SYMBOL_GPL(pci_dev_lock); 5115 5116 5116 5117 /* Return 1 on successful lock, 0 on contention */ 5117 5118 int pci_dev_trylock(struct pci_dev *dev)
+1 -2
drivers/s390/char/tape_std.c
··· 53 53 tape_std_assign(struct tape_device *device) 54 54 { 55 55 int rc; 56 - struct timer_list timeout; 57 56 struct tape_request *request; 58 57 59 58 request = tape_alloc_request(2, 11); ··· 69 70 * So we set up a timeout for this call. 70 71 */ 71 72 timer_setup(&request->timer, tape_std_assign_timeout, 0); 72 - mod_timer(&timeout, jiffies + 2 * HZ); 73 + mod_timer(&request->timer, jiffies + msecs_to_jiffies(2000)); 73 74 74 75 rc = tape_do_io_interruptible(device, request); 75 76
+2 -2
drivers/s390/cio/css.c
··· 437 437 struct subchannel *sch = to_subchannel(dev); 438 438 struct pmcw *pmcw = &sch->schib.pmcw; 439 439 440 - if ((pmcw->st == SUBCHANNEL_TYPE_IO || 441 - pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv) 440 + if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) || 441 + (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w)) 442 442 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid, 443 443 pmcw->dev); 444 444 else
+1
include/linux/pci.h
··· 1666 1666 bool pci_cfg_access_trylock(struct pci_dev *dev); 1667 1667 void pci_cfg_access_unlock(struct pci_dev *dev); 1668 1668 1669 + void pci_dev_lock(struct pci_dev *dev); 1669 1670 int pci_dev_trylock(struct pci_dev *dev); 1670 1671 void pci_dev_unlock(struct pci_dev *dev); 1671 1672