Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Heiko Carstens:

- Farewell Martin Schwidefsky: add Martin to CREDITS and remove him
from MAINTAINERS

- Vasily Gorbik and Christian Borntraeger join as maintainers for s390

- Fix locking bug in ctr(aes) and ctr(des) s390 specific ciphers

- A rather large patch which fixes gcm-aes-s390 scatter gather handling

- Fix zcrypt wrong dispatching for control domain CPRBs

- Fix assignment of bus resources in PCI code

- Fix structure definition for set PCI function

- Fix one compile error and one compile warning seen when
CONFIG_OPTIMIZE_INLINING is enabled

* tag 's390-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390
MAINTAINERS: Farewell Martin Schwidefsky
s390/crypto: fix possible sleep during spinlock aquired
s390/crypto: fix gcm-aes-s390 selftest failures
s390/zcrypt: Fix wrong dispatching for control domain CPRBs
s390/pci: fix assignment of bus resources
s390/pci: fix struct definition for set PCI function
s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline
s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized

+195 -76
+8
CREDITS
··· 3364 3364 S: 31134 Hildesheim 3365 3365 S: Germany 3366 3366 3367 + N: Martin Schwidefsky 3368 + D: Martin was the most significant contributor to the initial s390 3369 + D: port of the Linux Kernel and later the maintainer of the s390 3370 + D: architecture backend for almost two decades. 3371 + D: He passed away in 2019, and will be greatly missed. 3372 + S: Germany 3373 + W: https://lwn.net/Articles/789028/ 3374 + 3367 3375 N: Marcel Selhorst 3368 3376 E: tpmdd@selhorst.net 3369 3377 D: TPM driver
+4 -2
MAINTAINERS
··· 3049 3049 F: arch/riscv/net/ 3050 3050 3051 3051 BPF JIT for S390 3052 - M: Martin Schwidefsky <schwidefsky@de.ibm.com> 3053 3052 M: Heiko Carstens <heiko.carstens@de.ibm.com> 3053 + M: Vasily Gorbik <gor@linux.ibm.com> 3054 + M: Christian Borntraeger <borntraeger@de.ibm.com> 3054 3055 L: netdev@vger.kernel.org 3055 3056 L: bpf@vger.kernel.org 3056 3057 S: Maintained ··· 13615 13614 F: drivers/video/fbdev/savage/ 13616 13615 13617 13616 S390 13618 - M: Martin Schwidefsky <schwidefsky@de.ibm.com> 13619 13617 M: Heiko Carstens <heiko.carstens@de.ibm.com> 13618 + M: Vasily Gorbik <gor@linux.ibm.com> 13619 + M: Christian Borntraeger <borntraeger@de.ibm.com> 13620 13620 L: linux-s390@vger.kernel.org 13621 13621 W: http://www.ibm.com/developerworks/linux/linux390/ 13622 13622 T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
+111 -45
arch/s390/crypto/aes_s390.c
··· 27 27 #include <linux/module.h> 28 28 #include <linux/cpufeature.h> 29 29 #include <linux/init.h> 30 - #include <linux/spinlock.h> 30 + #include <linux/mutex.h> 31 31 #include <linux/fips.h> 32 32 #include <linux/string.h> 33 33 #include <crypto/xts.h> 34 34 #include <asm/cpacf.h> 35 35 36 36 static u8 *ctrblk; 37 - static DEFINE_SPINLOCK(ctrblk_lock); 37 + static DEFINE_MUTEX(ctrblk_lock); 38 38 39 39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 40 40 kma_functions; ··· 698 698 unsigned int n, nbytes; 699 699 int ret, locked; 700 700 701 - locked = spin_trylock(&ctrblk_lock); 701 + locked = mutex_trylock(&ctrblk_lock); 702 702 703 703 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); 704 704 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { ··· 716 716 ret = blkcipher_walk_done(desc, walk, nbytes - n); 717 717 } 718 718 if (locked) 719 - spin_unlock(&ctrblk_lock); 719 + mutex_unlock(&ctrblk_lock); 720 720 /* 721 721 * final block may be < AES_BLOCK_SIZE, copy only nbytes 722 722 */ ··· 826 826 return 0; 827 827 } 828 828 829 - static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 830 - unsigned int len) 829 + static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 830 + unsigned int len) 831 831 { 832 832 memset(gw, 0, sizeof(*gw)); 833 833 gw->walk_bytes_remain = len; 834 834 scatterwalk_start(&gw->walk, sg); 835 835 } 836 836 837 - static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 837 + static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) 838 + { 839 + struct scatterlist *nextsg; 840 + 841 + gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); 842 + while (!gw->walk_bytes) { 843 + nextsg = sg_next(gw->walk.sg); 844 + if (!nextsg) 845 + return 0; 846 + scatterwalk_start(&gw->walk, nextsg); 847 + gw->walk_bytes = scatterwalk_clamp(&gw->walk, 848 + gw->walk_bytes_remain); 849 + } 850 + gw->walk_ptr = scatterwalk_map(&gw->walk); 851 + return gw->walk_bytes; 852 + } 853 + 854 + static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, 855 + unsigned int nbytes) 856 + { 857 + gw->walk_bytes_remain -= nbytes; 858 + scatterwalk_unmap(&gw->walk); 859 + scatterwalk_advance(&gw->walk, nbytes); 860 + scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 861 + gw->walk_ptr = NULL; 862 + } 863 + 864 + static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 838 865 { 839 866 int n; 840 867 841 - /* minbytesneeded <= AES_BLOCK_SIZE */ 842 868 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 843 869 gw->ptr = gw->buf; 844 870 gw->nbytes = gw->buf_bytes; ··· 877 851 goto out; 878 852 } 879 853 880 - gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); 881 - if (!gw->walk_bytes) { 882 - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); 883 - gw->walk_bytes = scatterwalk_clamp(&gw->walk, 884 - gw->walk_bytes_remain); 854 + if (!_gcm_sg_clamp_and_map(gw)) { 855 + gw->ptr = NULL; 856 + gw->nbytes = 0; 857 + goto out; 885 858 } 886 - gw->walk_ptr = scatterwalk_map(&gw->walk); 887 859 888 860 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 889 861 gw->ptr = gw->walk_ptr; ··· 893 869 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 894 870 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); 895 871 gw->buf_bytes += n; 896 - gw->walk_bytes_remain -= n; 897 - scatterwalk_unmap(&gw->walk); 898 - scatterwalk_advance(&gw->walk, n); 899 - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 900 - 872 + _gcm_sg_unmap_and_advance(gw, n); 901 873 if (gw->buf_bytes >= minbytesneeded) { 902 874 gw->ptr = gw->buf; 903 875 gw->nbytes = gw->buf_bytes; 904 876 goto out; 905 877 } 906 - 907 - gw->walk_bytes = scatterwalk_clamp(&gw->walk, 908 - gw->walk_bytes_remain); 909 - if (!gw->walk_bytes) { 910 - scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); 911 - gw->walk_bytes = scatterwalk_clamp(&gw->walk, 912 - gw->walk_bytes_remain); 878 + if (!_gcm_sg_clamp_and_map(gw)) { 879 + gw->ptr = NULL; 880 + gw->nbytes = 0; 881 + goto out; 913 882 } 914 - gw->walk_ptr = scatterwalk_map(&gw->walk); 915 883 } 916 884 917 885 out: 918 886 return gw->nbytes; 919 887 } 920 888 921 - static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 889 + static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 922 890 { 923 - int n; 891 + if (gw->walk_bytes_remain == 0) { 892 + gw->ptr = NULL; 893 + gw->nbytes = 0; 894 + goto out; 895 + } 924 896 897 + if (!_gcm_sg_clamp_and_map(gw)) { 898 + gw->ptr = NULL; 899 + gw->nbytes = 0; 900 + goto out; 901 + } 902 + 903 + if (gw->walk_bytes >= minbytesneeded) { 904 + gw->ptr = gw->walk_ptr; 905 + gw->nbytes = gw->walk_bytes; 906 + goto out; 907 + } 908 + 909 + scatterwalk_unmap(&gw->walk); 910 + gw->walk_ptr = NULL; 911 + 912 + gw->ptr = gw->buf; 913 + gw->nbytes = sizeof(gw->buf); 914 + 915 + out: 916 + return gw->nbytes; 917 + } 918 + 919 + static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 920 + { 925 921 if (gw->ptr == NULL) 926 - return; 922 + return 0; 927 923 928 924 if (gw->ptr == gw->buf) { 929 - n = gw->buf_bytes - bytesdone; 925 + int n = gw->buf_bytes - bytesdone; 930 926 if (n > 0) { 931 927 memmove(gw->buf, gw->buf + bytesdone, n); 932 - gw->buf_bytes -= n; 928 + gw->buf_bytes = n; 933 929 } else 934 930 gw->buf_bytes = 0; 935 - } else { 936 - gw->walk_bytes_remain -= bytesdone; 937 - scatterwalk_unmap(&gw->walk); 938 - scatterwalk_advance(&gw->walk, bytesdone); 939 - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 940 - } 931 + } else 932 + _gcm_sg_unmap_and_advance(gw, bytesdone); 933 + 934 + return bytesdone; 935 + } 936 + 937 + static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 938 + { 939 + int i, n; 940 + 941 + if (gw->ptr == NULL) 942 + return 0; 943 + 944 + if (gw->ptr == gw->buf) { 945 + for (i = 0; i < bytesdone; i += n) { 946 + if (!_gcm_sg_clamp_and_map(gw)) 947 + return i; 948 + n = min(gw->walk_bytes, bytesdone - i); 949 + memcpy(gw->walk_ptr, gw->buf + i, n); 950 + _gcm_sg_unmap_and_advance(gw, n); 951 + } 952 + } else 953 + _gcm_sg_unmap_and_advance(gw, bytesdone); 954 + 955 + return bytesdone; 941 956 } 942 957 943 958 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) ··· 989 926 unsigned int pclen = req->cryptlen; 990 927 int ret = 0; 991 928 992 - unsigned int len, in_bytes, out_bytes, 929 + unsigned int n, len, in_bytes, out_bytes, 993 930 min_bytes, bytes, aad_bytes, pc_bytes; 994 931 struct gcm_sg_walk gw_in, gw_out; 995 932 u8 tag[GHASH_DIGEST_SIZE]; ··· 1026 963 *(u32 *)(param.j0 + ivsize) = 1; 1027 964 memcpy(param.k, ctx->key, ctx->key_len); 1028 965 1029 - gcm_sg_walk_start(&gw_in, req->src, len); 1030 - gcm_sg_walk_start(&gw_out, req->dst, len); 966 + gcm_walk_start(&gw_in, req->src, len); 967 + gcm_walk_start(&gw_out, req->dst, len); 1031 968 1032 969 do { 1033 970 min_bytes = min_t(unsigned int, 1034 971 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 1035 - in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); 1036 - out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); 972 + in_bytes = gcm_in_walk_go(&gw_in, min_bytes); 973 + out_bytes = gcm_out_walk_go(&gw_out, min_bytes); 1037 974 bytes = min(in_bytes, out_bytes); 1038 975 1039 976 if (aadlen + pclen <= bytes) { ··· 1060 997 gw_in.ptr + aad_bytes, pc_bytes, 1061 998 gw_in.ptr, aad_bytes); 1062 999 1063 - gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); 1064 - gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); 1000 + n = aad_bytes + pc_bytes; 1001 + if (gcm_in_walk_done(&gw_in, n) != n) 1002 + return -ENOMEM; 1003 + if (gcm_out_walk_done(&gw_out, n) != n) 1004 + return -ENOMEM; 1065 1005 aadlen -= aad_bytes; 1066 1006 pclen -= pc_bytes; 1067 1007 } while (aadlen + pclen > 0);
+4 -3
arch/s390/crypto/des_s390.c
··· 14 14 #include <linux/cpufeature.h> 15 15 #include <linux/crypto.h> 16 16 #include <linux/fips.h> 17 + #include <linux/mutex.h> 17 18 #include <crypto/algapi.h> 18 19 #include <crypto/des.h> 19 20 #include <asm/cpacf.h> ··· 22 21 #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) 23 22 24 23 static u8 *ctrblk; 25 - static DEFINE_SPINLOCK(ctrblk_lock); 24 + static DEFINE_MUTEX(ctrblk_lock); 26 25 27 26 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 28 27 ··· 375 374 unsigned int n, nbytes; 376 375 int ret, locked; 377 376 378 - locked = spin_trylock(&ctrblk_lock); 377 + locked = mutex_trylock(&ctrblk_lock); 379 378 380 379 ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); 381 380 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { ··· 392 391 ret = blkcipher_walk_done(desc, walk, nbytes - n); 393 392 } 394 393 if (locked) 395 - spin_unlock(&ctrblk_lock); 394 + mutex_unlock(&ctrblk_lock); 396 395 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 397 396 if (nbytes) { 398 397 cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
+2 -2
arch/s390/include/asm/ap.h
··· 160 160 unsigned char Nd; /* max # of Domains - 1 */ 161 161 unsigned char _reserved3[10]; 162 162 unsigned int apm[8]; /* AP ID mask */ 163 - unsigned int aqm[8]; /* AP queue mask */ 164 - unsigned int adm[8]; /* AP domain mask */ 163 + unsigned int aqm[8]; /* AP (usage) queue mask */ 164 + unsigned int adm[8]; /* AP (control) domain mask */ 165 165 unsigned char _reserved4[16]; 166 166 } __aligned(8); 167 167
+2 -2
arch/s390/include/asm/cpacf.h
··· 178 178 : "cc"); 179 179 } 180 180 181 - static inline int __cpacf_check_opcode(unsigned int opcode) 181 + static __always_inline int __cpacf_check_opcode(unsigned int opcode) 182 182 { 183 183 switch (opcode) { 184 184 case CPACF_KMAC: ··· 218 218 return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; 219 219 } 220 220 221 - static inline int cpacf_query_func(unsigned int opcode, unsigned int func) 221 + static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func) 222 222 { 223 223 cpacf_mask_t mask; 224 224
+15 -10
arch/s390/include/asm/pci_clp.h
··· 70 70 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES]; 71 71 } __packed; 72 72 73 + struct mio_info { 74 + u32 valid : 6; 75 + u32 : 26; 76 + u32 : 32; 77 + struct { 78 + u64 wb; 79 + u64 wt; 80 + } addr[PCI_BAR_COUNT]; 81 + u32 reserved[6]; 82 + } __packed; 83 + 73 84 /* Query PCI function request */ 74 85 struct clp_req_query_pci { 75 86 struct clp_req_hdr hdr; ··· 111 100 u32 uid; /* user defined id */ 112 101 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ 113 102 u32 reserved2[16]; 114 - u32 mio_valid : 6; 115 - u32 : 26; 116 - u32 : 32; 117 - struct { 118 - u64 wb; 119 - u64 wt; 120 - } addr[PCI_BAR_COUNT]; 121 - u32 reserved3[6]; 103 + struct mio_info mio; 122 104 } __packed; 123 105 124 106 /* Query PCI function group request */ ··· 159 155 struct clp_rsp_set_pci { 160 156 struct clp_rsp_hdr hdr; 161 157 u32 fh; /* function handle */ 162 - u32 reserved3; 163 - u64 reserved4; 158 + u32 reserved1; 159 + u64 reserved2; 160 + struct mio_info mio; 164 161 } __packed; 165 162 166 163 /* Combined request/response block structures used by clp insn */
+3 -1
arch/s390/mm/fault.c
··· 85 85 * Find out which address space caused the exception. 86 86 * Access register mode is impossible, ignore space == 3. 87 87 */ 88 - static inline enum fault_type get_fault_type(struct pt_regs *regs) 88 + static enum fault_type get_fault_type(struct pt_regs *regs) 89 89 { 90 90 unsigned long trans_exc_code; 91 91 ··· 211 211 asce = S390_lowcore.kernel_asce; 212 212 pr_cont("kernel "); 213 213 break; 214 + default: 215 + unreachable(); 214 216 } 215 217 pr_cont("ASCE.\n"); 216 218 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
+4 -1
arch/s390/pci/pci.c
··· 528 528 if (zdev->bars[i].val & 4) 529 529 flags |= IORESOURCE_MEM_64; 530 530 531 - addr = ZPCI_ADDR(entry); 531 + if (static_branch_likely(&have_mio)) 532 + addr = (unsigned long) zdev->bars[i].mio_wb; 533 + else 534 + addr = ZPCI_ADDR(entry); 532 535 size = 1UL << zdev->bars[i].size; 533 536 534 537 res = __alloc_res(zdev, addr, size, flags);
+3 -3
arch/s390/pci/pci_clp.c
··· 165 165 } 166 166 zdev->mio_capable = response->mio_addr_avail; 167 167 for (i = 0; i < PCI_BAR_COUNT; i++) { 168 - if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1)))) 168 + if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1)))) 169 169 continue; 170 170 171 - zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb; 172 - zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt; 171 + zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb; 172 + zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt; 173 173 } 174 174 return 0; 175 175 }
+22 -4
drivers/s390/crypto/ap_bus.c
··· 254 254 } 255 255 256 256 /* 257 - * ap_test_config_domain(): Test, whether an AP usage domain is configured. 257 + * ap_test_config_usage_domain(): Test, whether an AP usage domain 258 + * is configured. 258 259 * @domain AP usage domain ID 259 260 * 260 261 * Returns 0 if the usage domain is not configured 261 262 * 1 if the usage domain is configured or 262 263 * if the configuration information is not available 263 264 */ 264 - static inline int ap_test_config_domain(unsigned int domain) 265 + int ap_test_config_usage_domain(unsigned int domain) 265 266 { 266 267 if (!ap_configuration) /* QCI not supported */ 267 268 return domain < 16; 268 269 return ap_test_config(ap_configuration->aqm, domain); 269 270 } 271 + EXPORT_SYMBOL(ap_test_config_usage_domain); 272 + 273 + /* 274 + * ap_test_config_ctrl_domain(): Test, whether an AP control domain 275 + * is configured. 276 + * @domain AP control domain ID 277 + * 278 + * Returns 1 if the control domain is configured 279 + * 0 in all other cases 280 + */ 281 + int ap_test_config_ctrl_domain(unsigned int domain) 282 + { 283 + if (!ap_configuration) /* QCI not supported */ 284 + return 0; 285 + return ap_test_config(ap_configuration->adm, domain); 286 + } 287 + EXPORT_SYMBOL(ap_test_config_ctrl_domain); 270 288 271 289 /** 272 290 * ap_query_queue(): Check if an AP queue is available. ··· 1285 1267 best_domain = -1; 1286 1268 max_count = 0; 1287 1269 for (i = 0; i < AP_DOMAINS; i++) { 1288 - if (!ap_test_config_domain(i) || 1270 + if (!ap_test_config_usage_domain(i) || 1289 1271 !test_bit_inv(i, ap_perms.aqm)) 1290 1272 continue; 1291 1273 count = 0; ··· 1460 1442 (void *)(long) qid, 1461 1443 __match_queue_device_with_qid); 1462 1444 aq = dev ? to_ap_queue(dev) : NULL; 1463 - if (!ap_test_config_domain(dom)) { 1445 + if (!ap_test_config_usage_domain(dom)) { 1464 1446 if (dev) { 1465 1447 /* Queue device exists but has been 1466 1448 * removed from configuration.
+3
drivers/s390/crypto/ap_bus.h
··· 251 251 void ap_request_timeout(struct timer_list *t); 252 252 void ap_bus_force_rescan(void); 253 253 254 + int ap_test_config_usage_domain(unsigned int domain); 255 + int ap_test_config_ctrl_domain(unsigned int domain); 256 + 254 257 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 255 258 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 256 259 void ap_queue_prepare_remove(struct ap_queue *aq);
+14 -3
drivers/s390/crypto/zcrypt_api.c
··· 822 822 struct ap_message ap_msg; 823 823 unsigned int weight, pref_weight; 824 824 unsigned int func_code; 825 - unsigned short *domain; 825 + unsigned short *domain, tdom; 826 826 int qid = 0, rc = -ENODEV; 827 827 struct module *mod; 828 828 ··· 833 833 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 834 834 if (rc) 835 835 goto out; 836 + 837 + /* 838 + * If a valid target domain is set and this domain is NOT a usage 839 + * domain but a control only domain, use the default domain as target. 840 + */ 841 + tdom = *domain; 842 + if (tdom >= 0 && tdom < AP_DOMAINS && 843 + !ap_test_config_usage_domain(tdom) && 844 + ap_test_config_ctrl_domain(tdom) && 845 + ap_domain_index >= 0) 846 + tdom = ap_domain_index; 836 847 837 848 pref_zc = NULL; 838 849 pref_zq = NULL; ··· 867 856 /* check if device is online and eligible */ 868 857 if (!zq->online || 869 858 !zq->ops->send_cprb || 870 - ((*domain != (unsigned short) AUTOSELECT) && 871 - (*domain != AP_QID_QUEUE(zq->queue->qid)))) 859 + (tdom != (unsigned short) AUTOSELECT && 860 + tdom != AP_QID_QUEUE(zq->queue->qid))) 872 861 continue; 873 862 /* check if device node has admission for this queue */ 874 863 if (!zcrypt_check_queue(perms,