Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
"Improvements and bug fixes for 5.1-rc2:

- Fix early free of the channel program in vfio

- On AP device removal make sure that all messages are flushed with
the driver still attached that queued the message

- Limit brk randomization to 32MB to reduce the chance that the heap
of ld.so is placed after the main stack

- Add a rolling average for the steal time of a CPU, this will be
needed for KVM to decide when to do busy waiting

- Fix a warning in the CPU-MF code

- Add a notification handler for AP configuration change to react
faster to new AP devices"

* tag 's390-5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/cpumf: Fix warning from check_processor_id
zcrypt: handle AP Info notification from CHSC SEI command
vfio: ccw: only free cp on final interrupt
s390/vtime: steal time exponential moving average
s390/zcrypt: revisit ap device remove procedure
s390: limit brk randomization to 32MB

+155 -69
+11
arch/s390/include/asm/ap.h
··· 360 360 return reg1; 361 361 } 362 362 363 + /* 364 + * Interface to tell the AP bus code that a configuration 365 + * change has happened. The bus code should at least do 366 + * an ap bus resource rescan. 367 + */ 368 + #if IS_ENABLED(CONFIG_ZCRYPT) 369 + void ap_bus_cfg_chg(void); 370 + #else 371 + static inline void ap_bus_cfg_chg(void){}; 372 + #endif 373 + 363 374 #endif /* _ASM_S390_AP_H_ */
+7 -4
arch/s390/include/asm/elf.h
··· 252 252 253 253 /* 254 254 * Cache aliasing on the latest machines calls for a mapping granularity 255 - * of 512KB. For 64-bit processes use a 512KB alignment and a randomization 256 - * of up to 1GB. For 31-bit processes the virtual address space is limited, 257 - * use no alignment and limit the randomization to 8MB. 255 + * of 512KB for the anonymous mapping base. For 64-bit processes use a 256 + * 512KB alignment and a randomization of up to 1GB. For 31-bit processes 257 + * the virtual address space is limited, use no alignment and limit the 258 + * randomization to 8MB. 259 + * For the additional randomization of the program break use 32MB for 260 + * 64-bit and 8MB for 31-bit. 258 261 */ 259 - #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) 262 + #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) 260 263 #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 261 264 #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 262 265 #define STACK_RND_MASK MMAP_RND_MASK
+31 -30
arch/s390/include/asm/lowcore.h
··· 91 91 __u64 hardirq_timer; /* 0x02e8 */ 92 92 __u64 softirq_timer; /* 0x02f0 */ 93 93 __u64 steal_timer; /* 0x02f8 */ 94 - __u64 last_update_timer; /* 0x0300 */ 95 - __u64 last_update_clock; /* 0x0308 */ 96 - __u64 int_clock; /* 0x0310 */ 97 - __u64 mcck_clock; /* 0x0318 */ 98 - __u64 clock_comparator; /* 0x0320 */ 99 - __u64 boot_clock[2]; /* 0x0328 */ 94 + __u64 avg_steal_timer; /* 0x0300 */ 95 + __u64 last_update_timer; /* 0x0308 */ 96 + __u64 last_update_clock; /* 0x0310 */ 97 + __u64 int_clock; /* 0x0318*/ 98 + __u64 mcck_clock; /* 0x0320 */ 99 + __u64 clock_comparator; /* 0x0328 */ 100 + __u64 boot_clock[2]; /* 0x0330 */ 100 101 101 102 /* Current process. */ 102 - __u64 current_task; /* 0x0338 */ 103 - __u64 kernel_stack; /* 0x0340 */ 103 + __u64 current_task; /* 0x0340 */ 104 + __u64 kernel_stack; /* 0x0348 */ 104 105 105 106 /* Interrupt, DAT-off and restartstack. */ 106 - __u64 async_stack; /* 0x0348 */ 107 - __u64 nodat_stack; /* 0x0350 */ 108 - __u64 restart_stack; /* 0x0358 */ 107 + __u64 async_stack; /* 0x0350 */ 108 + __u64 nodat_stack; /* 0x0358 */ 109 + __u64 restart_stack; /* 0x0360 */ 109 110 110 111 /* Restart function and parameter. */ 111 - __u64 restart_fn; /* 0x0360 */ 112 - __u64 restart_data; /* 0x0368 */ 113 - __u64 restart_source; /* 0x0370 */ 112 + __u64 restart_fn; /* 0x0368 */ 113 + __u64 restart_data; /* 0x0370 */ 114 + __u64 restart_source; /* 0x0378 */ 114 115 115 116 /* Address space pointer. */ 116 - __u64 kernel_asce; /* 0x0378 */ 117 - __u64 user_asce; /* 0x0380 */ 118 - __u64 vdso_asce; /* 0x0388 */ 117 + __u64 kernel_asce; /* 0x0380 */ 118 + __u64 user_asce; /* 0x0388 */ 119 + __u64 vdso_asce; /* 0x0390 */ 119 120 120 121 /* 121 122 * The lpp and current_pid fields form a 122 123 * 64-bit value that is set as program 123 124 * parameter with the LPP instruction. 124 125 */ 125 - __u32 lpp; /* 0x0390 */ 126 - __u32 current_pid; /* 0x0394 */ 126 + __u32 lpp; /* 0x0398 */ 127 + __u32 current_pid; /* 0x039c */ 127 128 128 129 /* SMP info area */ 129 - __u32 cpu_nr; /* 0x0398 */ 130 - __u32 softirq_pending; /* 0x039c */ 131 - __u32 preempt_count; /* 0x03a0 */ 132 - __u32 spinlock_lockval; /* 0x03a4 */ 133 - __u32 spinlock_index; /* 0x03a8 */ 134 - __u32 fpu_flags; /* 0x03ac */ 135 - __u64 percpu_offset; /* 0x03b0 */ 136 - __u64 vdso_per_cpu_data; /* 0x03b8 */ 137 - __u64 machine_flags; /* 0x03c0 */ 138 - __u64 gmap; /* 0x03c8 */ 139 - __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ 130 + __u32 cpu_nr; /* 0x03a0 */ 131 + __u32 softirq_pending; /* 0x03a4 */ 132 + __u32 preempt_count; /* 0x03a8 */ 133 + __u32 spinlock_lockval; /* 0x03ac */ 134 + __u32 spinlock_index; /* 0x03b0 */ 135 + __u32 fpu_flags; /* 0x03b4 */ 136 + __u64 percpu_offset; /* 0x03b8 */ 137 + __u64 vdso_per_cpu_data; /* 0x03c0 */ 138 + __u64 machine_flags; /* 0x03c8 */ 139 + __u64 gmap; /* 0x03d0 */ 140 + __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ 140 141 141 142 /* br %r1 trampoline */ 142 143 __u16 br_r1_trampoline; /* 0x0400 */
+13 -6
arch/s390/kernel/perf_cpum_cf_diag.c
··· 196 196 */ 197 197 static int __hw_perf_event_init(struct perf_event *event) 198 198 { 199 - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); 200 199 struct perf_event_attr *attr = &event->attr; 200 + struct cpu_cf_events *cpuhw; 201 201 enum cpumf_ctr_set i; 202 202 int err = 0; 203 203 204 - debug_sprintf_event(cf_diag_dbg, 5, 205 - "%s event %p cpu %d authorized %#x\n", __func__, 206 - event, event->cpu, cpuhw->info.auth_ctl); 204 + debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__, 205 + event, event->cpu); 207 206 208 207 event->hw.config = attr->config; 209 208 event->hw.config_base = 0; 210 - local64_set(&event->count, 0); 211 209 212 - /* Add all authorized counter sets to config_base */ 210 + /* Add all authorized counter sets to config_base. The 211 + * the hardware init function is either called per-cpu or just once 212 + * for all CPUS (event->cpu == -1). This depends on the whether 213 + * counting is started for all CPUs or on a per workload base where 214 + * the perf event moves from one CPU to another CPU. 215 + * Checking the authorization on any CPU is fine as the hardware 216 + * applies the same authorization settings to all CPUs. 217 + */ 218 + cpuhw = &get_cpu_var(cpu_cf_events); 213 219 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) 214 220 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) 215 221 event->hw.config_base |= cpumf_ctr_ctl[i]; 222 + put_cpu_var(cpu_cf_events); 216 223 217 224 /* No authorized counter sets, nothing to count/sample */ 218 225 if (!event->hw.config_base) {
+2 -1
arch/s390/kernel/smp.c
··· 266 266 lc->percpu_offset = __per_cpu_offset[cpu]; 267 267 lc->kernel_asce = S390_lowcore.kernel_asce; 268 268 lc->machine_flags = S390_lowcore.machine_flags; 269 - lc->user_timer = lc->system_timer = lc->steal_timer = 0; 269 + lc->user_timer = lc->system_timer = 270 + lc->steal_timer = lc->avg_steal_timer = 0; 270 271 __ctl_store(lc->cregs_save_area, 0, 15); 271 272 save_access_regs((unsigned int *) lc->access_regs_save_area); 272 273 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+12 -7
arch/s390/kernel/vtime.c
··· 124 124 */ 125 125 static int do_account_vtime(struct task_struct *tsk) 126 126 { 127 - u64 timer, clock, user, guest, system, hardirq, softirq, steal; 127 + u64 timer, clock, user, guest, system, hardirq, softirq; 128 128 129 129 timer = S390_lowcore.last_update_timer; 130 130 clock = S390_lowcore.last_update_clock; ··· 182 182 if (softirq) 183 183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); 184 184 185 - steal = S390_lowcore.steal_timer; 186 - if ((s64) steal > 0) { 187 - S390_lowcore.steal_timer = 0; 188 - account_steal_time(cputime_to_nsecs(steal)); 189 - } 190 - 191 185 return virt_timer_forward(user + guest + system + hardirq + softirq); 192 186 } 193 187 ··· 207 213 */ 208 214 void vtime_flush(struct task_struct *tsk) 209 215 { 216 + u64 steal, avg_steal; 217 + 210 218 if (do_account_vtime(tsk)) 211 219 virt_timer_expire(); 220 + 221 + steal = S390_lowcore.steal_timer; 222 + avg_steal = S390_lowcore.avg_steal_timer / 2; 223 + if ((s64) steal > 0) { 224 + S390_lowcore.steal_timer = 0; 225 + account_steal_time(steal); 226 + avg_steal += steal; 227 + } 228 + S390_lowcore.avg_steal_timer = avg_steal; 212 229 } 213 230 214 231 /*
+13
drivers/s390/cio/chsc.c
··· 24 24 #include <asm/crw.h> 25 25 #include <asm/isc.h> 26 26 #include <asm/ebcdic.h> 27 + #include <asm/ap.h> 27 28 28 29 #include "css.h" 29 30 #include "cio.h" ··· 587 586 " failed (rc=%d).\n", ret); 588 587 } 589 588 589 + static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) 590 + { 591 + CIO_CRW_EVENT(3, "chsc: ap config changed\n"); 592 + if (sei_area->rs != 5) 593 + return; 594 + 595 + ap_bus_cfg_chg(); 596 + } 597 + 590 598 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 591 599 { 592 600 switch (sei_area->cc) { ··· 621 611 break; 622 612 case 2: /* i/o resource accessibility */ 623 613 chsc_process_sei_res_acc(sei_area); 614 + break; 615 + case 3: /* ap config changed */ 616 + chsc_process_sei_ap_cfg_chg(sei_area); 624 617 break; 625 618 case 7: /* channel-path-availability information */ 626 619 chsc_process_sei_chp_avail(sei_area);
+6 -2
drivers/s390/cio/vfio_ccw_drv.c
··· 72 72 { 73 73 struct vfio_ccw_private *private; 74 74 struct irb *irb; 75 + bool is_final; 75 76 76 77 private = container_of(work, struct vfio_ccw_private, io_work); 77 78 irb = &private->irb; 78 79 80 + is_final = !(scsw_actl(&irb->scsw) & 81 + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 79 82 if (scsw_is_solicited(&irb->scsw)) { 80 83 cp_update_scsw(&private->cp, &irb->scsw); 81 - cp_free(&private->cp); 84 + if (is_final) 85 + cp_free(&private->cp); 82 86 } 83 87 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 84 88 85 89 if (private->io_trigger) 86 90 eventfd_signal(private->io_trigger, 1); 87 91 88 - if (private->mdev) 92 + if (private->mdev && is_final) 89 93 private->state = VFIO_CCW_STATE_IDLE; 90 94 } 91 95
+18 -1
drivers/s390/crypto/ap_bus.c
··· 810 810 struct ap_device *ap_dev = to_ap_dev(dev); 811 811 struct ap_driver *ap_drv = ap_dev->drv; 812 812 813 + /* prepare ap queue device removal */ 813 814 if (is_queue_dev(dev)) 814 - ap_queue_remove(to_ap_queue(dev)); 815 + ap_queue_prepare_remove(to_ap_queue(dev)); 816 + 817 + /* driver's chance to clean up gracefully */ 815 818 if (ap_drv->remove) 816 819 ap_drv->remove(ap_dev); 820 + 821 + /* now do the ap queue device remove */ 822 + if (is_queue_dev(dev)) 823 + ap_queue_remove(to_ap_queue(dev)); 817 824 818 825 /* Remove queue/card from list of active queues/cards */ 819 826 spin_lock_bh(&ap_list_lock); ··· 866 859 flush_work(&ap_scan_work); 867 860 } 868 861 EXPORT_SYMBOL(ap_bus_force_rescan); 862 + 863 + /* 864 + * A config change has happened, force an ap bus rescan. 865 + */ 866 + void ap_bus_cfg_chg(void) 867 + { 868 + AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__); 869 + 870 + ap_bus_force_rescan(); 871 + } 869 872 870 873 /* 871 874 * hex2bitmap() - parse hex mask string and set bitmap.
+2
drivers/s390/crypto/ap_bus.h
··· 91 91 AP_STATE_WORKING, 92 92 AP_STATE_QUEUE_FULL, 93 93 AP_STATE_SUSPEND_WAIT, 94 + AP_STATE_REMOVE, /* about to be removed from driver */ 94 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 95 96 AP_STATE_BORKED, /* broken */ 96 97 NR_AP_STATES ··· 253 252 254 253 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 255 254 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 255 + void ap_queue_prepare_remove(struct ap_queue *aq); 256 256 void ap_queue_remove(struct ap_queue *aq); 257 257 void ap_queue_suspend(struct ap_device *ap_dev); 258 258 void ap_queue_resume(struct ap_device *ap_dev);
+22 -6
drivers/s390/crypto/ap_queue.c
··· 420 420 [AP_EVENT_POLL] = ap_sm_suspend_read, 421 421 [AP_EVENT_TIMEOUT] = ap_sm_nop, 422 422 }, 423 + [AP_STATE_REMOVE] = { 424 + [AP_EVENT_POLL] = ap_sm_nop, 425 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 426 + }, 423 427 [AP_STATE_UNBOUND] = { 424 428 [AP_EVENT_POLL] = ap_sm_nop, 425 429 [AP_EVENT_TIMEOUT] = ap_sm_nop, ··· 744 740 } 745 741 EXPORT_SYMBOL(ap_flush_queue); 746 742 743 + void ap_queue_prepare_remove(struct ap_queue *aq) 744 + { 745 + spin_lock_bh(&aq->lock); 746 + /* flush queue */ 747 + __ap_flush_queue(aq); 748 + /* set REMOVE state to prevent new messages are queued in */ 749 + aq->state = AP_STATE_REMOVE; 750 + del_timer_sync(&aq->timeout); 751 + spin_unlock_bh(&aq->lock); 752 + } 753 + 747 754 void ap_queue_remove(struct ap_queue *aq) 748 755 { 749 - ap_flush_queue(aq); 750 - del_timer_sync(&aq->timeout); 751 - 752 - /* reset with zero, also clears irq registration */ 756 + /* 757 + * all messages have been flushed and the state is 758 + * AP_STATE_REMOVE. Now reset with zero which also 759 + * clears the irq registration and move the state 760 + * to AP_STATE_UNBOUND to signal that this queue 761 + * is not used by any driver currently. 762 + */ 753 763 spin_lock_bh(&aq->lock); 754 764 ap_zapq(aq->qid); 755 765 aq->state = AP_STATE_UNBOUND; 756 766 spin_unlock_bh(&aq->lock); 757 767 } 758 - EXPORT_SYMBOL(ap_queue_remove); 759 768 760 769 void ap_queue_reinit_state(struct ap_queue *aq) 761 770 { ··· 777 760 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 778 761 spin_unlock_bh(&aq->lock); 779 762 } 780 - EXPORT_SYMBOL(ap_queue_reinit_state);
+18 -12
drivers/s390/crypto/zcrypt_api.c
··· 586 586 587 587 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 588 588 struct zcrypt_queue *zq, 589 + struct module **pmod, 589 590 unsigned int weight) 590 591 { 591 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) ··· 596 595 atomic_add(weight, &zc->load); 597 596 atomic_add(weight, &zq->load); 598 597 zq->request_count++; 598 + *pmod = zq->queue->ap_dev.drv->driver.owner; 599 599 return zq; 600 600 } 601 601 602 602 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 603 603 struct zcrypt_queue *zq, 604 + struct module *mod, 604 605 unsigned int weight) 605 606 { 606 - struct module *mod = zq->queue->ap_dev.drv->driver.owner; 607 - 608 607 zq->request_count--; 609 608 atomic_sub(weight, &zc->load); 610 609 atomic_sub(weight, &zq->load); ··· 654 653 unsigned int weight, pref_weight; 655 654 unsigned int func_code; 656 655 int qid = 0, rc = -ENODEV; 656 + struct module *mod; 657 657 658 658 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 659 659 ··· 708 706 pref_weight = weight; 709 707 } 710 708 } 711 - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 709 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 712 710 spin_unlock(&zcrypt_list_lock); 713 711 714 712 if (!pref_zq) { ··· 720 718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 721 719 722 720 spin_lock(&zcrypt_list_lock); 723 - zcrypt_drop_queue(pref_zc, pref_zq, weight); 721 + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 724 722 spin_unlock(&zcrypt_list_lock); 725 723 726 724 out: ··· 737 735 unsigned int weight, pref_weight; 738 736 unsigned int func_code; 739 737 int qid = 0, rc = -ENODEV; 738 + struct module *mod; 740 739 741 740 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 742 741 ··· 791 788 pref_weight = weight; 792 789 } 793 790 } 794 - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 791 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 795 792 spin_unlock(&zcrypt_list_lock); 796 793 797 794 if (!pref_zq) { ··· 803 800 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 804 801 805 802 spin_lock(&zcrypt_list_lock); 806 - zcrypt_drop_queue(pref_zc, pref_zq, weight); 803 + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 807 804 spin_unlock(&zcrypt_list_lock); 808 805 809 806 out: ··· 822 819 unsigned int func_code; 823 820 unsigned short *domain; 824 821 int qid = 0, rc = -ENODEV; 822 + struct module *mod; 825 823 826 824 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 827 825 ··· 869 865 pref_weight = weight; 870 866 } 871 867 } 872 - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 868 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 873 869 spin_unlock(&zcrypt_list_lock); 874 870 875 871 if (!pref_zq) { ··· 885 881 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 886 882 887 883 spin_lock(&zcrypt_list_lock); 888 - zcrypt_drop_queue(pref_zc, pref_zq, weight); 884 + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 889 885 spin_unlock(&zcrypt_list_lock); 890 886 891 887 out: ··· 936 932 unsigned int func_code; 937 933 struct ap_message ap_msg; 938 934 int qid = 0, rc = -ENODEV; 935 + struct module *mod; 939 936 940 937 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 941 938 ··· 1005 1000 pref_weight = weight; 1006 1001 } 1007 1002 } 1008 - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1003 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 1009 1004 spin_unlock(&zcrypt_list_lock); 1010 1005 1011 1006 if (!pref_zq) { ··· 1017 1012 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1018 1013 1019 1014 spin_lock(&zcrypt_list_lock); 1020 - zcrypt_drop_queue(pref_zc, pref_zq, weight); 1015 + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 1021 1016 spin_unlock(&zcrypt_list_lock); 1022 1017 1023 1018 out_free: ··· 1038 1033 struct ap_message ap_msg; 1039 1034 unsigned int domain; 1040 1035 int qid = 0, rc = -ENODEV; 1036 + struct module *mod; 1041 1037 1042 1038 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1043 1039 ··· 1070 1064 pref_weight = weight; 1071 1065 } 1072 1066 } 1073 - pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1067 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 1074 1068 spin_unlock(&zcrypt_list_lock); 1075 1069 1076 1070 if (!pref_zq) { ··· 1082 1076 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1083 1077 1084 1078 spin_lock(&zcrypt_list_lock); 1085 - zcrypt_drop_queue(pref_zc, pref_zq, weight); 1079 + zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 1086 1080 spin_unlock(&zcrypt_list_lock); 1087 1081 1088 1082 out: