Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

- Fix three kasan findings

- Add PERF_EVENT_IOC_PERIOD ioctl support

- Add Crypto Express7S support and extend sysfs attributes for pkey

- Minor common I/O layer documentation corrections

* tag 's390-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/cio: exclude subchannels with no parent from pseudo check
s390/cio: avoid calling strlen on null pointer
s390/topology: avoid firing events before kobjs are created
s390/cpumf: Remove mixed white space
s390/cpum_sf: Support ioctl PERF_EVENT_IOC_PERIOD
s390/zcrypt: CEX7S exploitation support
s390/cio: fix intparm documentation
s390/pkey: Add sysfs attributes to emit AES CIPHER key blobs

+334 -82
+5 -5
arch/s390/include/asm/cpu_mf.h
··· 70 70 unsigned long tear; /* 24-31: TEAR contents */ 71 71 unsigned long dear; /* 32-39: DEAR contents */ 72 72 unsigned int rsvrd0; /* 40-43: reserved */ 73 - unsigned int cpu_speed; /* 44-47: CPU speed */ 73 + unsigned int cpu_speed; /* 44-47: CPU speed */ 74 74 unsigned long long rsvrd1; /* 48-55: reserved */ 75 75 unsigned long long rsvrd2; /* 56-63: reserved */ 76 76 } __packed; ··· 89 89 unsigned long tear; /* 16-23: TEAR contents */ 90 90 unsigned long dear; /* 24-31: DEAR contents */ 91 91 /* 32-63: */ 92 - unsigned long rsvrd1; /* reserved */ 93 - unsigned long rsvrd2; /* reserved */ 94 - unsigned long rsvrd3; /* reserved */ 95 - unsigned long rsvrd4; /* reserved */ 92 + unsigned long rsvrd1; /* reserved */ 93 + unsigned long rsvrd2; /* reserved */ 94 + unsigned long rsvrd3; /* reserved */ 95 + unsigned long rsvrd4; /* reserved */ 96 96 } __packed; 97 97 98 98 struct hws_basic_entry {
+2
arch/s390/include/asm/perf_event.h
··· 60 60 #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ 61 61 PERF_CPUM_SF_DIAG_MODE) 62 62 #define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ 63 + #define PERF_CPUM_SF_FREQ_MODE 0x0008 /* Sampling with frequency */ 63 64 64 65 #define REG_NONE 0 65 66 #define REG_OVERFLOW 1 ··· 71 70 #define SAMPL_FLAGS(hwc) ((hwc)->config_base) 72 71 #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) 73 72 #define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) 73 + #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) 74 74 75 75 #endif /* _ASM_S390_PERF_EVENT_H */
+2 -2
arch/s390/include/uapi/asm/zcrypt.h
··· 4 4 * 5 5 * zcrypt 2.2.1 (user-visible header) 6 6 * 7 - * Copyright IBM Corp. 2001, 2018 7 + * Copyright IBM Corp. 2001, 2019 8 8 * Author(s): Robert Burroughs 9 9 * Eric Rossman (edrossma@us.ibm.com) 10 10 * ··· 286 286 * 0x08: CEX3A 287 287 * 0x0a: CEX4 288 288 * 0x0b: CEX5 289 - * 0x0c: CEX6 289 + * 0x0c: CEX6 and CEX7 290 290 * 0x0d: device is disabled 291 291 * 292 292 * ZCRYPT_QDEPTH_MASK
+125 -40
arch/s390/kernel/perf_cpum_sf.c
··· 673 673 rcu_read_unlock(); 674 674 } 675 675 676 + static unsigned long getrate(bool freq, unsigned long sample, 677 + struct hws_qsi_info_block *si) 678 + { 679 + unsigned long rate; 680 + 681 + if (freq) { 682 + rate = freq_to_sample_rate(si, sample); 683 + rate = hw_limit_rate(si, rate); 684 + } else { 685 + /* The min/max sampling rates specifies the valid range 686 + * of sample periods. If the specified sample period is 687 + * out of range, limit the period to the range boundary. 688 + */ 689 + rate = hw_limit_rate(si, sample); 690 + 691 + /* The perf core maintains a maximum sample rate that is 692 + * configurable through the sysctl interface. Ensure the 693 + * sampling rate does not exceed this value. This also helps 694 + * to avoid throttling when pushing samples with 695 + * perf_event_overflow(). 696 + */ 697 + if (sample_rate_to_freq(si, rate) > 698 + sysctl_perf_event_sample_rate) { 699 + debug_sprintf_event(sfdbg, 1, 700 + "Sampling rate exceeds maximum " 701 + "perf sample rate\n"); 702 + rate = 0; 703 + } 704 + } 705 + return rate; 706 + } 707 + 708 + /* The sampling information (si) contains information about the 709 + * min/max sampling intervals and the CPU speed. So calculate the 710 + * correct sampling interval and avoid the whole period adjust 711 + * feedback loop. 712 + * 713 + * Since the CPU Measurement sampling facility can not handle frequency 714 + * calculate the sampling interval when frequency is specified using 715 + * this formula: 716 + * interval := cpu_speed * 1000000 / sample_freq 717 + * 718 + * Returns errno on bad input and zero on success with parameter interval 719 + * set to the correct sampling rate. 720 + * 721 + * Note: This function turns off freq bit to avoid calling function 722 + * perf_adjust_period(). This causes frequency adjustment in the common 723 + * code part which causes tremendous variations in the counter values. 724 + */ 725 + static int __hw_perf_event_init_rate(struct perf_event *event, 726 + struct hws_qsi_info_block *si) 727 + { 728 + struct perf_event_attr *attr = &event->attr; 729 + struct hw_perf_event *hwc = &event->hw; 730 + unsigned long rate; 731 + 732 + if (attr->freq) { 733 + if (!attr->sample_freq) 734 + return -EINVAL; 735 + rate = getrate(attr->freq, attr->sample_freq, si); 736 + attr->freq = 0; /* Don't call perf_adjust_period() */ 737 + SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 738 + } else { 739 + rate = getrate(attr->freq, attr->sample_period, si); 740 + if (!rate) 741 + return -EINVAL; 742 + } 743 + attr->sample_period = rate; 744 + SAMPL_RATE(hwc) = rate; 745 + hw_init_period(hwc, SAMPL_RATE(hwc)); 746 + debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" 747 + "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, 748 + event->attr.sample_period, event->attr.freq, 749 + SAMPLE_FREQ_MODE(hwc)); 750 + return 0; 751 + } 752 + 676 753 static int __hw_perf_event_init(struct perf_event *event) 677 754 { 678 755 struct cpu_hw_sf *cpuhw; 679 756 struct hws_qsi_info_block si; 680 757 struct perf_event_attr *attr = &event->attr; 681 758 struct hw_perf_event *hwc = &event->hw; 682 - unsigned long rate; 683 759 int cpu, err; 684 760 685 761 /* Reserve CPU-measurement sampling facility */ ··· 821 745 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 822 746 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 823 747 824 - /* The sampling information (si) contains information about the 825 - * min/max sampling intervals and the CPU speed. So calculate the 826 - * correct sampling interval and avoid the whole period adjust 827 - * feedback loop. 828 - */ 829 - rate = 0; 830 - if (attr->freq) { 831 - if (!attr->sample_freq) { 832 - err = -EINVAL; 833 - goto out; 834 - } 835 - rate = freq_to_sample_rate(&si, attr->sample_freq); 836 - rate = hw_limit_rate(&si, rate); 837 - attr->freq = 0; 838 - attr->sample_period = rate; 839 - } else { 840 - /* The min/max sampling rates specifies the valid range 841 - * of sample periods. If the specified sample period is 842 - * out of range, limit the period to the range boundary. 843 - */ 844 - rate = hw_limit_rate(&si, hwc->sample_period); 845 - 846 - /* The perf core maintains a maximum sample rate that is 847 - * configurable through the sysctl interface. Ensure the 848 - * sampling rate does not exceed this value. This also helps 849 - * to avoid throttling when pushing samples with 850 - * perf_event_overflow(). 851 - */ 852 - if (sample_rate_to_freq(&si, rate) > 853 - sysctl_perf_event_sample_rate) { 854 - err = -EINVAL; 855 - debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n"); 856 - goto out; 857 - } 858 - } 859 - SAMPL_RATE(hwc) = rate; 860 - hw_init_period(hwc, SAMPL_RATE(hwc)); 748 + err = __hw_perf_event_init_rate(event, &si); 749 + if (err) 750 + goto out; 861 751 862 752 /* Initialize sample data overflow accounting */ 863 753 hwc->extra_reg.reg = REG_OVERFLOW; ··· 946 904 if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) 947 905 extend_sampling_buffer(&cpuhw->sfb, hwc); 948 906 } 907 + /* Rate may be adjusted with ioctl() */ 908 + cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 949 909 } 950 910 951 911 /* (Re)enable the PMU and sampling facility */ ··· 966 922 lpp(&S390_lowcore.lpp); 967 923 968 924 debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " 969 - "tear=%p dear=%p\n", cpuhw->lsctl.es, 970 - cpuhw->lsctl.cs, cpuhw->lsctl.ed, cpuhw->lsctl.cd, 925 + "interval:%lx tear=%p dear=%p\n", 926 + cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 927 + cpuhw->lsctl.cd, cpuhw->lsctl.interval, 971 928 (void *) cpuhw->lsctl.tear, 972 929 (void *) cpuhw->lsctl.dear); 973 930 } ··· 1762 1717 /* Nothing to do ... updates are interrupt-driven */ 1763 1718 } 1764 1719 1720 + /* Check if the new sampling period/freqeuncy is appropriate. 1721 + * 1722 + * Return non-zero on error and zero on passed checks. 1723 + */ 1724 + static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1725 + { 1726 + struct hws_qsi_info_block si; 1727 + unsigned long rate; 1728 + bool do_freq; 1729 + 1730 + memset(&si, 0, sizeof(si)); 1731 + if (event->cpu == -1) { 1732 + if (qsi(&si)) 1733 + return -ENODEV; 1734 + } else { 1735 + /* Event is pinned to a particular CPU, retrieve the per-CPU 1736 + * sampling structure for accessing the CPU-specific QSI. 1737 + */ 1738 + struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1739 + 1740 + si = cpuhw->qsi; 1741 + } 1742 + 1743 + do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1744 + rate = getrate(do_freq, value, &si); 1745 + if (!rate) 1746 + return -EINVAL; 1747 + 1748 + event->attr.sample_period = rate; 1749 + SAMPL_RATE(&event->hw) = rate; 1750 + hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1751 + debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" 1752 + "cpu:%d value:%llx period:%llx freq:%d\n", 1753 + event->cpu, value, 1754 + event->attr.sample_period, do_freq); 1755 + return 0; 1756 + } 1757 + 1765 1758 /* Activate sampling control. 1766 1759 * Next call of pmu_enable() starts sampling. 1767 1760 */ ··· 1991 1908 1992 1909 .setup_aux = aux_buffer_setup, 1993 1910 .free_aux = aux_buffer_free, 1911 + 1912 + .check_period = cpumsf_pmu_check_period, 1994 1913 }; 1995 1914 1996 1915 static void cpumf_measurement_alert(struct ext_code ext_code,
+2 -1
arch/s390/kernel/topology.c
··· 311 311 on_each_cpu(__arch_update_dedicated_flag, NULL, 0); 312 312 for_each_online_cpu(cpu) { 313 313 dev = get_cpu_device(cpu); 314 - kobject_uevent(&dev->kobj, KOBJ_CHANGE); 314 + if (dev) 315 + kobject_uevent(&dev->kobj, KOBJ_CHANGE); 315 316 } 316 317 return rc; 317 318 }
+1 -1
drivers/s390/cio/ccwgroup.c
··· 372 372 goto error; 373 373 } 374 374 /* Check for trailing stuff. */ 375 - if (i == num_devices && strlen(buf) > 0) { 375 + if (i == num_devices && buf && strlen(buf) > 0) { 376 376 rc = -EINVAL; 377 377 goto error; 378 378 }
+2
drivers/s390/cio/css.c
··· 1388 1388 1389 1389 int sch_is_pseudo_sch(struct subchannel *sch) 1390 1390 { 1391 + if (!sch->dev.parent) 1392 + return 0; 1391 1393 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1392 1394 } 1393 1395
+17 -6
drivers/s390/cio/device_ops.c
··· 124 124 /** 125 125 * ccw_device_clear() - terminate I/O request processing 126 126 * @cdev: target ccw device 127 - * @intparm: interruption parameter; value is only used if no I/O is 128 - * outstanding, otherwise the intparm associated with the I/O request 129 - * is returned 127 + * @intparm: interruption parameter to be returned upon conclusion of csch 130 128 * 131 129 * ccw_device_clear() calls csch on @cdev's subchannel. 132 130 * Returns: ··· 177 179 * completed during the time specified by @expires. If a timeout occurs, the 178 180 * channel program is terminated via xsch, hsch or csch, and the device's 179 181 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). 182 + * The interruption handler will echo back the @intparm specified here, unless 183 + * another interruption parameter is specified by a subsequent invocation of 184 + * ccw_device_halt() or ccw_device_clear(). 180 185 * Returns: 181 186 * %0, if the operation was successful; 182 187 * -%EBUSY, if the device is busy, or status pending; ··· 257 256 * Start a S/390 channel program. When the interrupt arrives, the 258 257 * IRQ handler is called, either immediately, delayed (dev-end missing, 259 258 * or sense required) or never (no IRQ handler registered). 259 + * The interruption handler will echo back the @intparm specified here, unless 260 + * another interruption parameter is specified by a subsequent invocation of 261 + * ccw_device_halt() or ccw_device_clear(). 260 262 * Returns: 261 263 * %0, if the operation was successful; 262 264 * -%EBUSY, if the device is busy, or status pending; ··· 291 287 * Start a S/390 channel program. When the interrupt arrives, the 292 288 * IRQ handler is called, either immediately, delayed (dev-end missing, 293 289 * or sense required) or never (no IRQ handler registered). 290 + * The interruption handler will echo back the @intparm specified here, unless 291 + * another interruption parameter is specified by a subsequent invocation of 292 + * ccw_device_halt() or ccw_device_clear(). 294 293 * Returns: 295 294 * %0, if the operation was successful; 296 295 * -%EBUSY, if the device is busy, or status pending; ··· 329 322 * completed during the time specified by @expires. If a timeout occurs, the 330 323 * channel program is terminated via xsch, hsch or csch, and the device's 331 324 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). 325 + * The interruption handler will echo back the @intparm specified here, unless 326 + * another interruption parameter is specified by a subsequent invocation of 327 + * ccw_device_halt() or ccw_device_clear(). 332 328 * Returns: 333 329 * %0, if the operation was successful; 334 330 * -%EBUSY, if the device is busy, or status pending; ··· 353 343 /** 354 344 * ccw_device_halt() - halt I/O request processing 355 345 * @cdev: target ccw device 356 - * @intparm: interruption parameter; value is only used if no I/O is 357 - * outstanding, otherwise the intparm associated with the I/O request 358 - * is returned 346 + * @intparm: interruption parameter to be returned upon conclusion of hsch 359 347 * 360 348 * ccw_device_halt() calls hsch on @cdev's subchannel. 349 + * The interruption handler will echo back the @intparm specified here, unless 350 + * another interruption parameter is specified by a subsequent invocation of 351 + * ccw_device_clear(). 361 352 * Returns: 362 353 * %0 on success, 363 354 * -%ENODEV on device not operational,
+6 -6
drivers/s390/crypto/ap_bus.c
··· 1322 1322 /* < CEX2A is not supported */ 1323 1323 if (rawtype < AP_DEVICE_TYPE_CEX2A) 1324 1324 return 0; 1325 - /* up to CEX6 known and fully supported */ 1326 - if (rawtype <= AP_DEVICE_TYPE_CEX6) 1325 + /* up to CEX7 known and fully supported */ 1326 + if (rawtype <= AP_DEVICE_TYPE_CEX7) 1327 1327 return rawtype; 1328 1328 /* 1329 - * unknown new type > CEX6, check for compatibility 1329 + * unknown new type > CEX7, check for compatibility 1330 1330 * to the highest known and supported type which is 1331 - * currently CEX6 with the help of the QACT function. 1331 + * currently CEX7 with the help of the QACT function. 1332 1332 */ 1333 1333 if (ap_qact_available()) { 1334 1334 struct ap_queue_status status; 1335 1335 union ap_qact_ap_info apinfo = {0}; 1336 1336 1337 1337 apinfo.mode = (func >> 26) & 0x07; 1338 - apinfo.cat = AP_DEVICE_TYPE_CEX6; 1338 + apinfo.cat = AP_DEVICE_TYPE_CEX7; 1339 1339 status = ap_qact(qid, 0, &apinfo); 1340 1340 if (status.response_code == AP_RESPONSE_NORMAL 1341 1341 && apinfo.cat >= AP_DEVICE_TYPE_CEX2A 1342 - && apinfo.cat <= AP_DEVICE_TYPE_CEX6) 1342 + && apinfo.cat <= AP_DEVICE_TYPE_CEX7) 1343 1343 comp_type = apinfo.cat; 1344 1344 } 1345 1345 if (!comp_type)
+2 -1
drivers/s390/crypto/ap_bus.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * Copyright IBM Corp. 2006, 2012 3 + * Copyright IBM Corp. 2006, 2019 4 4 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 5 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 6 * Ralph Wuerthner <rwuerthn@de.ibm.com> ··· 63 63 #define AP_DEVICE_TYPE_CEX4 10 64 64 #define AP_DEVICE_TYPE_CEX5 11 65 65 #define AP_DEVICE_TYPE_CEX6 12 66 + #define AP_DEVICE_TYPE_CEX7 13 66 67 67 68 /* 68 69 * Known function facilities
+113
drivers/s390/crypto/pkey_api.c
··· 1363 1363 .bin_attrs = ccadata_attrs, 1364 1364 }; 1365 1365 1366 + #define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80) 1367 + 1368 + /* 1369 + * Sysfs attribute read function for all secure key ccacipher binary attributes. 1370 + * The implementation can not deal with partial reads, because a new random 1371 + * secure key blob is generated with each read. In case of partial reads 1372 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 1373 + */ 1374 + static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits, 1375 + bool is_xts, char *buf, loff_t off, 1376 + size_t count) 1377 + { 1378 + size_t keysize; 1379 + int rc; 1380 + 1381 + if (off != 0 || count < CCACIPHERTOKENSIZE) 1382 + return -EINVAL; 1383 + if (is_xts) 1384 + if (count < 2 * CCACIPHERTOKENSIZE) 1385 + return -EINVAL; 1386 + 1387 + keysize = CCACIPHERTOKENSIZE; 1388 + rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize); 1389 + if (rc) 1390 + return rc; 1391 + memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize); 1392 + 1393 + if (is_xts) { 1394 + keysize = CCACIPHERTOKENSIZE; 1395 + rc = cca_gencipherkey(-1, -1, keybits, 0, 1396 + buf + CCACIPHERTOKENSIZE, &keysize); 1397 + if (rc) 1398 + return rc; 1399 + memset(buf + CCACIPHERTOKENSIZE + keysize, 0, 1400 + CCACIPHERTOKENSIZE - keysize); 1401 + 1402 + return 2 * CCACIPHERTOKENSIZE; 1403 + } 1404 + 1405 + return CCACIPHERTOKENSIZE; 1406 + } 1407 + 1408 + static ssize_t ccacipher_aes_128_read(struct file *filp, 1409 + struct kobject *kobj, 1410 + struct bin_attribute *attr, 1411 + char *buf, loff_t off, 1412 + size_t count) 1413 + { 1414 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf, 1415 + off, count); 1416 + } 1417 + 1418 + static ssize_t ccacipher_aes_192_read(struct file *filp, 1419 + struct kobject *kobj, 1420 + struct bin_attribute *attr, 1421 + char *buf, loff_t off, 1422 + size_t count) 1423 + { 1424 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf, 1425 + off, count); 1426 + } 1427 + 1428 + static ssize_t ccacipher_aes_256_read(struct file *filp, 1429 + struct kobject *kobj, 1430 + struct bin_attribute *attr, 1431 + char *buf, loff_t off, 1432 + size_t count) 1433 + { 1434 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf, 1435 + off, count); 1436 + } 1437 + 1438 + static ssize_t ccacipher_aes_128_xts_read(struct file *filp, 1439 + struct kobject *kobj, 1440 + struct bin_attribute *attr, 1441 + char *buf, loff_t off, 1442 + size_t count) 1443 + { 1444 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf, 1445 + off, count); 1446 + } 1447 + 1448 + static ssize_t ccacipher_aes_256_xts_read(struct file *filp, 1449 + struct kobject *kobj, 1450 + struct bin_attribute *attr, 1451 + char *buf, loff_t off, 1452 + size_t count) 1453 + { 1454 + return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf, 1455 + off, count); 1456 + } 1457 + 1458 + static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE); 1459 + static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE); 1460 + static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE); 1461 + static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE); 1462 + static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE); 1463 + 1464 + static struct bin_attribute *ccacipher_attrs[] = { 1465 + &bin_attr_ccacipher_aes_128, 1466 + &bin_attr_ccacipher_aes_192, 1467 + &bin_attr_ccacipher_aes_256, 1468 + &bin_attr_ccacipher_aes_128_xts, 1469 + &bin_attr_ccacipher_aes_256_xts, 1470 + NULL 1471 + }; 1472 + 1473 + static struct attribute_group ccacipher_attr_group = { 1474 + .name = "ccacipher", 1475 + .bin_attrs = ccacipher_attrs, 1476 + }; 1477 + 1366 1478 static const struct attribute_group *pkey_attr_groups[] = { 1367 1479 &protkey_attr_group, 1368 1480 &ccadata_attr_group, 1481 + &ccacipher_attr_group, 1369 1482 NULL, 1370 1483 }; 1371 1484
+2
drivers/s390/crypto/vfio_ap_drv.c
··· 36 36 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 37 37 { .dev_type = AP_DEVICE_TYPE_CEX6, 38 38 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 39 + { .dev_type = AP_DEVICE_TYPE_CEX7, 40 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 39 41 { /* end of sibling */ }, 40 42 }; 41 43
+2 -1
drivers/s390/crypto/zcrypt_api.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * Copyright IBM Corp. 2001, 2018 3 + * Copyright IBM Corp. 2001, 2019 4 4 * Author(s): Robert Burroughs 5 5 * Eric Rossman (edrossma@us.ibm.com) 6 6 * Cornelia Huck <cornelia.huck@de.ibm.com> ··· 29 29 #define ZCRYPT_CEX4 10 30 30 #define ZCRYPT_CEX5 11 31 31 #define ZCRYPT_CEX6 12 32 + #define ZCRYPT_CEX7 13 32 33 33 34 /** 34 35 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
+53 -19
drivers/s390/crypto/zcrypt_cex4.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright IBM Corp. 2012 3 + * Copyright IBM Corp. 2012, 2019 4 4 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> 5 5 */ 6 6 ··· 38 38 #define CEX4_CLEANUP_TIME (900*HZ) 39 39 40 40 MODULE_AUTHOR("IBM Corporation"); 41 - MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \ 42 - "Copyright IBM Corp. 2018"); 41 + MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \ 42 + "Copyright IBM Corp. 2019"); 43 43 MODULE_LICENSE("GPL"); 44 44 45 45 static struct ap_device_id zcrypt_cex4_card_ids[] = { ··· 48 48 { .dev_type = AP_DEVICE_TYPE_CEX5, 49 49 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 50 50 { .dev_type = AP_DEVICE_TYPE_CEX6, 51 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 52 + { .dev_type = AP_DEVICE_TYPE_CEX7, 51 53 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 52 54 { /* end of list */ }, 53 55 }; ··· 62 60 { .dev_type = AP_DEVICE_TYPE_CEX5, 63 61 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 64 62 { .dev_type = AP_DEVICE_TYPE_CEX6, 63 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 64 + { .dev_type = AP_DEVICE_TYPE_CEX7, 65 65 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 66 66 { /* end of list */ }, 67 67 }; ··· 150 146 }; 151 147 152 148 /** 153 - * Probe function for CEX4/CEX5/CEX6 card device. It always 149 + * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always 154 150 * accepts the AP device since the bus_match already checked 155 151 * the hardware type. 156 152 * @ap_dev: pointer to the AP device. ··· 162 158 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 163 159 */ 164 160 static const int CEX4A_SPEED_IDX[] = { 165 - 14, 19, 249, 42, 228, 1458, 0, 0}; 161 + 14, 19, 249, 42, 228, 1458, 0, 0}; 166 162 static const int CEX5A_SPEED_IDX[] = { 167 - 8, 9, 20, 18, 66, 458, 0, 0}; 163 + 8, 9, 20, 18, 66, 458, 0, 0}; 168 164 static const int CEX6A_SPEED_IDX[] = { 169 - 6, 9, 20, 17, 65, 438, 0, 0}; 165 + 6, 9, 20, 17, 65, 438, 0, 0}; 166 + static const int CEX7A_SPEED_IDX[] = { 167 + 6, 8, 17, 15, 54, 362, 0, 0}; 170 168 171 169 static const int CEX4C_SPEED_IDX[] = { 172 170 59, 69, 308, 83, 278, 2204, 209, 40}; 173 171 static const int CEX5C_SPEED_IDX[] = { 174 - 24, 31, 50, 37, 90, 479, 27, 10}; 172 + 24, 31, 50, 37, 90, 479, 27, 10}; 175 173 static const int CEX6C_SPEED_IDX[] = { 176 - 16, 20, 32, 27, 77, 455, 23, 9}; 174 + 16, 20, 32, 27, 77, 455, 24, 9}; 175 + static const int CEX7C_SPEED_IDX[] = { 176 + 14, 16, 26, 23, 64, 376, 23, 8}; 177 177 178 178 static const int CEX4P_SPEED_IDX[] = { 179 - 224, 313, 3560, 359, 605, 2827, 0, 50}; 179 + 0, 0, 0, 0, 0, 0, 0, 50}; 180 180 static const int CEX5P_SPEED_IDX[] = { 181 - 63, 84, 156, 83, 142, 533, 0, 10}; 181 + 0, 0, 0, 0, 0, 0, 0, 10}; 182 182 static const int CEX6P_SPEED_IDX[] = { 183 - 55, 70, 121, 73, 129, 522, 0, 9}; 183 + 0, 0, 0, 0, 0, 0, 0, 9}; 184 + static const int CEX7P_SPEED_IDX[] = { 185 + 0, 0, 0, 0, 0, 0, 0, 8}; 184 186 185 187 struct ap_card *ac = to_ap_card(&ap_dev->device); 186 188 struct zcrypt_card *zc; ··· 208 198 zc->user_space_type = ZCRYPT_CEX5; 209 199 memcpy(zc->speed_rating, CEX5A_SPEED_IDX, 210 200 sizeof(CEX5A_SPEED_IDX)); 211 - } else { 201 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { 212 202 zc->type_string = "CEX6A"; 213 203 zc->user_space_type = ZCRYPT_CEX6; 214 204 memcpy(zc->speed_rating, CEX6A_SPEED_IDX, 215 205 sizeof(CEX6A_SPEED_IDX)); 206 + } else { 207 + zc->type_string = "CEX7A"; 208 + /* wrong user space type, just for compatibility 209 + * with the ZCRYPT_STATUS_MASK ioctl. 210 + */ 211 + zc->user_space_type = ZCRYPT_CEX6; 212 + memcpy(zc->speed_rating, CEX7A_SPEED_IDX, 213 + sizeof(CEX7A_SPEED_IDX)); 216 214 } 217 215 zc->min_mod_size = CEX4A_MIN_MOD_SIZE; 218 216 if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && ··· 250 232 zc->user_space_type = ZCRYPT_CEX3C; 251 233 memcpy(zc->speed_rating, CEX5C_SPEED_IDX, 252 234 sizeof(CEX5C_SPEED_IDX)); 253 - } else { 235 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { 254 236 zc->type_string = "CEX6C"; 255 237 /* wrong user space type, must be CEX6 256 238 * just keep it for cca compatibility ··· 258 240 zc->user_space_type = ZCRYPT_CEX3C; 259 241 memcpy(zc->speed_rating, CEX6C_SPEED_IDX, 260 242 sizeof(CEX6C_SPEED_IDX)); 243 + } else { 244 + zc->type_string = "CEX7C"; 245 + /* wrong user space type, must be CEX7 246 + * just keep it for cca compatibility 247 + */ 248 + zc->user_space_type = ZCRYPT_CEX3C; 249 + memcpy(zc->speed_rating, CEX7C_SPEED_IDX, 250 + sizeof(CEX7C_SPEED_IDX)); 261 251 } 262 252 zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 263 253 zc->max_mod_size = CEX4C_MAX_MOD_SIZE; ··· 281 255 zc->user_space_type = ZCRYPT_CEX5; 282 256 memcpy(zc->speed_rating, CEX5P_SPEED_IDX, 283 257 sizeof(CEX5P_SPEED_IDX)); 284 - } else { 258 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { 285 259 zc->type_string = "CEX6P"; 286 260 zc->user_space_type = ZCRYPT_CEX6; 287 261 memcpy(zc->speed_rating, CEX6P_SPEED_IDX, 288 262 sizeof(CEX6P_SPEED_IDX)); 263 + } else { 264 + zc->type_string = "CEX7P"; 265 + /* wrong user space type, just for compatibility 266 + * with the ZCRYPT_STATUS_MASK ioctl. 267 + */ 268 + zc->user_space_type = ZCRYPT_CEX6; 269 + memcpy(zc->speed_rating, CEX7P_SPEED_IDX, 270 + sizeof(CEX7P_SPEED_IDX)); 289 271 } 290 272 zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 291 273 zc->max_mod_size = CEX4C_MAX_MOD_SIZE; ··· 323 289 } 324 290 325 291 /** 326 - * This is called to remove the CEX4/CEX5/CEX6 card driver information 327 - * if an AP card device is removed. 292 + * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver 293 + * information if an AP card device is removed. 328 294 */ 329 295 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) 330 296 { ··· 345 311 }; 346 312 347 313 /** 348 - * Probe function for CEX4/CEX5/CEX6 queue device. It always 314 + * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always 349 315 * accepts the AP device since the bus_match already checked 350 316 * the hardware type. 351 317 * @ap_dev: pointer to the AP device. ··· 403 369 } 404 370 405 371 /** 406 - * This is called to remove the CEX4/CEX5/CEX6 queue driver 372 + * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver 407 373 * information if an AP queue device is removed. 408 374 */ 409 375 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)