Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

- Fix integer overflow during stack frame unwind with invalid
backchain.

- Cleanup unused symbol export in zcrypt code.

- Fix MIO addressing control activation in PCI code and expose its
usage via sysfs.

- Fix kernel image signature verification report presence detection.

- Fix irq registration in vfio-ap code.

- Add CPU measurement counters for newer machines.

- Add base DASD thin provisioning support and code cleanups.

* tag 's390-5.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (21 commits)
s390/unwind: avoid int overflow in outside_of_stack
s390/zcrypt: remove the exporting of ap_query_configuration
s390/pci: add mio_enabled attribute
s390: fix setting of mio addressing control
s390/ipl: Fix detection of has_secure attribute
s390: vfio-ap: fix irq registration
s390/cpumf: Add extended counter set definitions for model 8561 and 8562
s390/dasd: Handle out-of-space constraint
s390/dasd: Add discard support for ESE volumes
s390/dasd: Use ALIGN_DOWN macro
s390/dasd: Make dasd_setup_queue() a discipline function
s390/dasd: Add new ioctl to release space
s390/dasd: Add dasd_sleep_on_queue_interruptible()
s390/dasd: Add missing intensity definition
s390/dasd: Fix whitespace
s390/dasd: Add dynamic formatting support for ESE volumes
s390/dasd: Recognise data for ESE volumes
s390/dasd: Put sub-order definitions in a separate section
s390/dasd: Make layout analysis ESE compatible
s390/dasd: Remove old defines and function
...

+1549 -258
-10
arch/s390/include/asm/pci_insn.h
··· 143 143 return __zpci_set_irq_ctrl(ctl, isc, &iib); 144 144 } 145 145 146 - #ifdef CONFIG_PCI 147 - static inline void enable_mio_ctl(void) 148 - { 149 - if (static_branch_likely(&have_mio)) 150 - __ctl_set_bit(2, 5); 151 - } 152 - #else /* CONFIG_PCI */ 153 - static inline void enable_mio_ctl(void) {} 154 - #endif /* CONFIG_PCI */ 155 - 156 146 #endif
-1
arch/s390/include/asm/sclp.h
··· 80 80 unsigned char has_gisaf : 1; 81 81 unsigned char has_diag318 : 1; 82 82 unsigned char has_sipl : 1; 83 - unsigned char has_sipl_g2 : 1; 84 83 unsigned char has_dirq : 1; 85 84 unsigned int ibc; 86 85 unsigned int mtid;
+79 -75
arch/s390/include/uapi/asm/dasd.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 - /* 2 + /* 3 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 4 * Bugreports.to..: <Linux390@de.ibm.com> 5 5 * Copyright IBM Corp. 1999, 2000 ··· 21 21 22 22 #define DASD_API_VERSION 6 23 23 24 - /* 24 + /* 25 25 * struct dasd_information2_t 26 26 * represents any data about the device, which is visible to userspace. 27 27 * including foramt and featueres. 28 28 */ 29 29 typedef struct dasd_information2_t { 30 - unsigned int devno; /* S/390 devno */ 31 - unsigned int real_devno; /* for aliases */ 32 - unsigned int schid; /* S/390 subchannel identifier */ 33 - unsigned int cu_type : 16; /* from SenseID */ 34 - unsigned int cu_model : 8; /* from SenseID */ 35 - unsigned int dev_type : 16; /* from SenseID */ 36 - unsigned int dev_model : 8; /* from SenseID */ 37 - unsigned int open_count; 38 - unsigned int req_queue_len; 39 - unsigned int chanq_len; /* length of chanq */ 40 - char type[4]; /* from discipline.name, 'none' for unknown */ 41 - unsigned int status; /* current device level */ 42 - unsigned int label_block; /* where to find the VOLSER */ 43 - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ 44 - unsigned int characteristics_size; 45 - unsigned int confdata_size; 46 - char characteristics[64]; /* from read_device_characteristics */ 47 - char configuration_data[256]; /* from read_configuration_data */ 48 - unsigned int format; /* format info like formatted/cdl/ldl/... */ 49 - unsigned int features; /* dasd features like 'ro',... */ 50 - unsigned int reserved0; /* reserved for further use ,... */ 51 - unsigned int reserved1; /* reserved for further use ,... */ 52 - unsigned int reserved2; /* reserved for further use ,... */ 53 - unsigned int reserved3; /* reserved for further use ,... */ 54 - unsigned int reserved4; /* reserved for further use ,... */ 55 - unsigned int reserved5; /* reserved for further use ,... */ 56 - unsigned int reserved6; /* reserved for further use ,... */ 57 - unsigned int reserved7; /* reserved for further use ,... */ 30 + unsigned int devno; /* S/390 devno */ 31 + unsigned int real_devno; /* for aliases */ 32 + unsigned int schid; /* S/390 subchannel identifier */ 33 + unsigned int cu_type : 16; /* from SenseID */ 34 + unsigned int cu_model : 8; /* from SenseID */ 35 + unsigned int dev_type : 16; /* from SenseID */ 36 + unsigned int dev_model : 8; /* from SenseID */ 37 + unsigned int open_count; 38 + unsigned int req_queue_len; 39 + unsigned int chanq_len; /* length of chanq */ 40 + char type[4]; /* from discipline.name, 'none' for unknown */ 41 + unsigned int status; /* current device level */ 42 + unsigned int label_block; /* where to find the VOLSER */ 43 + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ 44 + unsigned int characteristics_size; 45 + unsigned int confdata_size; 46 + char characteristics[64]; /* from read_device_characteristics */ 47 + char configuration_data[256]; /* from read_configuration_data */ 48 + unsigned int format; /* format info like formatted/cdl/ldl/... */ 49 + unsigned int features; /* dasd features like 'ro',... */ 50 + unsigned int reserved0; /* reserved for further use ,... */ 51 + unsigned int reserved1; /* reserved for further use ,... */ 52 + unsigned int reserved2; /* reserved for further use ,... */ 53 + unsigned int reserved3; /* reserved for further use ,... */ 54 + unsigned int reserved4; /* reserved for further use ,... */ 55 + unsigned int reserved5; /* reserved for further use ,... */ 56 + unsigned int reserved6; /* reserved for further use ,... */ 57 + unsigned int reserved7; /* reserved for further use ,... */ 58 58 } dasd_information2_t; 59 59 60 60 /* ··· 92 92 93 93 #define DASD_PARTN_BITS 2 94 94 95 - /* 95 + /* 96 96 * struct dasd_information_t 97 97 * represents any data about the data, which is visible to userspace 98 98 */ 99 99 typedef struct dasd_information_t { 100 - unsigned int devno; /* S/390 devno */ 101 - unsigned int real_devno; /* for aliases */ 102 - unsigned int schid; /* S/390 subchannel identifier */ 103 - unsigned int cu_type : 16; /* from SenseID */ 104 - unsigned int cu_model : 8; /* from SenseID */ 105 - unsigned int dev_type : 16; /* from SenseID */ 106 - unsigned int dev_model : 8; /* from SenseID */ 107 - unsigned int open_count; 108 - unsigned int req_queue_len; 109 - unsigned int chanq_len; /* length of chanq */ 110 - char type[4]; /* from discipline.name, 'none' for unknown */ 111 - unsigned int status; /* current device level */ 112 - unsigned int label_block; /* where to find the VOLSER */ 113 - unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ 114 - unsigned int characteristics_size; 115 - unsigned int confdata_size; 116 - char characteristics[64]; /* from read_device_characteristics */ 117 - char configuration_data[256]; /* from read_configuration_data */ 100 + unsigned int devno; /* S/390 devno */ 101 + unsigned int real_devno; /* for aliases */ 102 + unsigned int schid; /* S/390 subchannel identifier */ 103 + unsigned int cu_type : 16; /* from SenseID */ 104 + unsigned int cu_model : 8; /* from SenseID */ 105 + unsigned int dev_type : 16; /* from SenseID */ 106 + unsigned int dev_model : 8; /* from SenseID */ 107 + unsigned int open_count; 108 + unsigned int req_queue_len; 109 + unsigned int chanq_len; /* length of chanq */ 110 + char type[4]; /* from discipline.name, 'none' for unknown */ 111 + unsigned int status; /* current device level */ 112 + unsigned int label_block; /* where to find the VOLSER */ 113 + unsigned int FBA_layout; /* fixed block size (like AIXVOL) */ 114 + unsigned int characteristics_size; 115 + unsigned int confdata_size; 116 + char characteristics[64]; /* from read_device_characteristics */ 117 + char configuration_data[256]; /* from read_configuration_data */ 118 118 } dasd_information_t; 119 119 120 120 /* 121 121 * Read Subsystem Data - Performance Statistics 122 - */ 122 + */ 123 123 typedef struct dasd_rssd_perf_stats_t { 124 124 unsigned char invalid:1; 125 125 unsigned char format:3; ··· 154 154 unsigned char reseved2[96]; 155 155 } __attribute__((packed)) dasd_rssd_perf_stats_t; 156 156 157 - /* 157 + /* 158 158 * struct profile_info_t 159 - * holds the profinling information 159 + * holds the profinling information 160 160 */ 161 161 typedef struct dasd_profile_info_t { 162 - unsigned int dasd_io_reqs; /* number of requests processed at all */ 163 - unsigned int dasd_io_sects; /* number of sectors processed at all */ 164 - unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ 165 - unsigned int dasd_io_times[32]; /* histogram of requests's times */ 166 - unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */ 167 - unsigned int dasd_io_time1[32]; /* histogram of time from build to start */ 168 - unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */ 169 - unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */ 170 - unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */ 171 - unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ 162 + unsigned int dasd_io_reqs; /* number of requests processed at all */ 163 + unsigned int dasd_io_sects; /* number of sectors processed at all */ 164 + unsigned int dasd_io_secs[32]; /* histogram of request's sizes */ 165 + unsigned int dasd_io_times[32]; /* histogram of requests's times */ 166 + unsigned int dasd_io_timps[32]; /* histogram of requests's times per sector */ 167 + unsigned int dasd_io_time1[32]; /* histogram of time from build to start */ 168 + unsigned int dasd_io_time2[32]; /* histogram of time from start to irq */ 169 + unsigned int dasd_io_time2ps[32]; /* histogram of time from start to irq */ 170 + unsigned int dasd_io_time3[32]; /* histogram of time from irq to end */ 171 + unsigned int dasd_io_nr_req[32]; /* histogram of # of requests in chanq */ 172 172 } dasd_profile_info_t; 173 173 174 174 /* ··· 189 189 * 3/11: also write home address 190 190 * 4/12: invalidate track 191 191 */ 192 - #define DASD_FMT_INT_FMT_R0 1 /* write record zero */ 193 - #define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */ 194 - #define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ 195 - #define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ 192 + #define DASD_FMT_INT_FMT_R0 1 /* write record zero */ 193 + #define DASD_FMT_INT_FMT_HA 2 /* write home address, also set FMT_R0 ! */ 194 + #define DASD_FMT_INT_INVAL 4 /* invalidate tracks */ 195 + #define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */ 196 + #define DASD_FMT_INT_FMT_NOR0 16 /* remove permission to write record zero */ 197 + #define DASD_FMT_INT_ESE_FULL 32 /* release space for entire volume */ 196 198 197 199 /* 198 200 * struct format_check_t ··· 227 225 /* If key-length was != 0 */ 228 226 #define DASD_FMT_ERR_KEY_LENGTH 5 229 227 230 - /* 228 + /* 231 229 * struct attrib_data_t 232 230 * represents the operation (cache) bits for the device. 233 231 * Used in DE to influence caching of the DASD. ··· 283 281 * Here ist how the ioctl-nr should be used: 284 282 * 0 - 31 DASD driver itself 285 283 * 32 - 239 still open 286 - * 240 - 255 reserved for EMC 284 + * 240 - 255 reserved for EMC 287 285 *******************************************************************************/ 288 286 289 287 /* Disable the volume (for Linux) */ 290 - #define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) 288 + #define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0) 291 289 /* Enable the volume (for Linux) */ 292 - #define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1) 290 + #define BIODASDENABLE _IO(DASD_IOCTL_LETTER,1) 293 291 /* Issue a reserve/release command, rsp. */ 294 292 #define BIODASDRSRV _IO(DASD_IOCTL_LETTER,2) /* reserve */ 295 293 #define BIODASDRLSE _IO(DASD_IOCTL_LETTER,3) /* release */ ··· 297 295 /* reset profiling information of a device */ 298 296 #define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5) 299 297 /* Quiesce IO on device */ 300 - #define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) 298 + #define BIODASDQUIESCE _IO(DASD_IOCTL_LETTER,6) 301 299 /* Resume IO on device */ 302 - #define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) 300 + #define BIODASDRESUME _IO(DASD_IOCTL_LETTER,7) 303 301 /* Abort all I/O on a device */ 304 302 #define BIODASDABORTIO _IO(DASD_IOCTL_LETTER, 240) 305 303 /* Allow I/O on a device */ ··· 317 315 /* Performance Statistics Read */ 318 316 #define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) 319 317 /* Get Attributes (cache operations) */ 320 - #define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) 318 + #define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) 321 319 322 320 323 321 /* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ 324 - #define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) 322 + #define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) 325 323 /* Set Attributes (cache operations) */ 326 - #define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) 324 + #define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) 325 + /* Release Allocated Space */ 326 + #define BIODASDRAS _IOW(DASD_IOCTL_LETTER, 3, format_data_t) 327 327 328 328 /* Get Sense Path Group ID (SNID) data */ 329 329 #define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
-2
arch/s390/kernel/early.c
··· 30 30 #include <asm/sclp.h> 31 31 #include <asm/facility.h> 32 32 #include <asm/boot_data.h> 33 - #include <asm/pci_insn.h> 34 33 #include "entry.h" 35 34 36 35 /* ··· 235 236 clock_comparator_max = -1ULL >> 1; 236 237 __ctl_set_bit(0, 53); 237 238 } 238 - enable_mio_ctl(); 239 239 } 240 240 241 241 static inline void save_vector_registers(void)
+1 -6
arch/s390/kernel/ipl.c
··· 286 286 static ssize_t ipl_has_secure_show(struct kobject *kobj, 287 287 struct kobj_attribute *attr, char *page) 288 288 { 289 - if (MACHINE_IS_LPAR) 290 - return sprintf(page, "%i\n", !!sclp.has_sipl); 291 - else if (MACHINE_IS_VM) 292 - return sprintf(page, "%i\n", !!sclp.has_sipl_g2); 293 - else 294 - return sprintf(page, "%i\n", 0); 289 + return sprintf(page, "%i\n", !!sclp.has_sipl); 295 290 } 296 291 297 292 static struct kobj_attribute sys_ipl_has_secure_attr =
+2
arch/s390/kernel/perf_cpum_cf_events.c
··· 624 624 break; 625 625 case 0x3906: 626 626 case 0x3907: 627 + case 0x8561: 628 + case 0x8562: 627 629 model = cpumcf_z14_pmu_event_attr; 628 630 break; 629 631 default:
+1 -1
arch/s390/kernel/unwind_bc.c
··· 20 20 static bool outside_of_stack(struct unwind_state *state, unsigned long sp) 21 21 { 22 22 return (sp <= state->sp) || 23 - (sp + sizeof(struct stack_frame) > state->stack_info.end); 23 + (sp > state->stack_info.end - sizeof(struct stack_frame)); 24 24 } 25 25 26 26 static bool update_stack_info(struct unwind_state *state, unsigned long sp)
+3 -1
arch/s390/pci/pci.c
··· 890 890 if (!test_facility(69) || !test_facility(71)) 891 891 return 0; 892 892 893 - if (test_facility(153) && !s390_pci_no_mio) 893 + if (test_facility(153) && !s390_pci_no_mio) { 894 894 static_branch_enable(&have_mio); 895 + ctl_set_bit(2, 5); 896 + } 895 897 896 898 rc = zpci_debug_init(); 897 899 if (rc)
+10
arch/s390/pci/pci_sysfs.c
··· 37 37 zpci_attr(segment2, "0x%02x\n", pfip[2]); 38 38 zpci_attr(segment3, "0x%02x\n", pfip[3]); 39 39 40 + static ssize_t mio_enabled_show(struct device *dev, 41 + struct device_attribute *attr, char *buf) 42 + { 43 + struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 44 + 45 + return sprintf(buf, zpci_use_mio(zdev) ? "1\n" : "0\n"); 46 + } 47 + static DEVICE_ATTR_RO(mio_enabled); 48 + 40 49 static ssize_t recover_store(struct device *dev, struct device_attribute *attr, 41 50 const char *buf, size_t count) 42 51 { ··· 124 115 &dev_attr_vfn.attr, 125 116 &dev_attr_uid.attr, 126 117 &dev_attr_recover.attr, 118 + &dev_attr_mio_enabled.attr, 127 119 NULL, 128 120 }; 129 121 static struct attribute_group zpci_attr_group = {
+180 -53
drivers/s390/block/dasd.c
··· 70 70 * SECTION: prototypes for static functions of dasd.c 71 71 */ 72 72 static int dasd_alloc_queue(struct dasd_block *); 73 - static void dasd_setup_queue(struct dasd_block *); 74 73 static void dasd_free_queue(struct dasd_block *); 75 74 static int dasd_flush_block_queue(struct dasd_block *); 76 75 static void dasd_device_tasklet(unsigned long); ··· 119 120 kfree(device); 120 121 return ERR_PTR(-ENOMEM); 121 122 } 123 + /* Get two pages for ese format. */ 124 + device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 125 + if (!device->ese_mem) { 126 + free_page((unsigned long) device->erp_mem); 127 + free_pages((unsigned long) device->ccw_mem, 1); 128 + kfree(device); 129 + return ERR_PTR(-ENOMEM); 130 + } 122 131 123 132 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 124 133 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 134 + dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 125 135 spin_lock_init(&device->mem_lock); 126 136 atomic_set(&device->tasklet_scheduled, 0); 127 137 tasklet_init(&device->tasklet, dasd_device_tasklet, ··· 154 146 void dasd_free_device(struct dasd_device *device) 155 147 { 156 148 kfree(device->private); 149 + free_pages((unsigned long) device->ese_mem, 1); 157 150 free_page((unsigned long) device->erp_mem); 158 151 free_pages((unsigned long) device->ccw_mem, 1); 159 152 kfree(device); ··· 357 348 } 358 349 return rc; 359 350 } 360 - dasd_setup_queue(block); 351 + if (device->discipline->setup_blk_queue) 352 + device->discipline->setup_blk_queue(block); 361 353 set_capacity(block->gdp, 362 354 block->blocks << block->s2b_shift); 363 355 device->state = DASD_STATE_READY; ··· 1268 1258 } 1269 1259 EXPORT_SYMBOL(dasd_smalloc_request); 1270 1260 1261 + struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1262 + int datasize, 1263 + struct dasd_device *device) 1264 + { 1265 + struct dasd_ccw_req *cqr; 1266 + unsigned long flags; 1267 + int size, cqr_size; 1268 + char *data; 1269 + 1270 + cqr_size = (sizeof(*cqr) + 7L) & -8L; 1271 + size = cqr_size; 1272 + if (cplength > 0) 1273 + size += cplength * sizeof(struct ccw1); 1274 + if (datasize > 0) 1275 + size += datasize; 1276 + 1277 + spin_lock_irqsave(&device->mem_lock, flags); 1278 + cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1279 + spin_unlock_irqrestore(&device->mem_lock, flags); 1280 + if (!cqr) 1281 + return ERR_PTR(-ENOMEM); 1282 + memset(cqr, 0, sizeof(*cqr)); 1283 + data = (char *)cqr + cqr_size; 1284 + cqr->cpaddr = NULL; 1285 + if (cplength > 0) { 1286 + cqr->cpaddr = data; 1287 + data += cplength * sizeof(struct ccw1); 1288 + memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1289 + } 1290 + cqr->data = NULL; 1291 + if (datasize > 0) { 1292 + cqr->data = data; 1293 + memset(cqr->data, 0, datasize); 1294 + } 1295 + 1296 + cqr->magic = magic; 1297 + set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1298 + dasd_get_device(device); 1299 + 1300 + return cqr; 1301 + } 1302 + EXPORT_SYMBOL(dasd_fmalloc_request); 1303 + 1271 1304 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1272 1305 { 1273 1306 unsigned long flags; ··· 1321 1268 dasd_put_device(device); 1322 1269 } 1323 1270 EXPORT_SYMBOL(dasd_sfree_request); 1271 + 1272 + void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1273 + { 1274 + unsigned long flags; 1275 + 1276 + spin_lock_irqsave(&device->mem_lock, flags); 1277 + dasd_free_chunk(&device->ese_chunks, cqr); 1278 + spin_unlock_irqrestore(&device->mem_lock, flags); 1279 + dasd_put_device(device); 1280 + } 1281 + EXPORT_SYMBOL(dasd_ffree_request); 1324 1282 1325 1283 /* 1326 1284 * Check discipline magic in cqr. ··· 1637 1573 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1638 1574 } 1639 1575 1576 + static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1577 + { 1578 + struct dasd_device *device = NULL; 1579 + u8 *sense = NULL; 1580 + 1581 + if (!block) 1582 + return 0; 1583 + device = block->base; 1584 + if (!device || !device->discipline->is_ese) 1585 + return 0; 1586 + if (!device->discipline->is_ese(device)) 1587 + return 0; 1588 + 1589 + sense = dasd_get_sense(irb); 1590 + if (!sense) 1591 + return 0; 1592 + 1593 + return !!(sense[1] & SNS1_NO_REC_FOUND) || 1594 + !!(sense[1] & SNS1_FILE_PROTECTED) || 1595 + scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1596 + } 1597 + 1598 + static int dasd_ese_oos_cond(u8 *sense) 1599 + { 1600 + return sense[0] & SNS0_EQUIPMENT_CHECK && 1601 + sense[1] & SNS1_PERM_ERR && 1602 + sense[1] & SNS1_WRITE_INHIBITED && 1603 + sense[25] == 0x01; 1604 + } 1605 + 1640 1606 /* 1641 1607 * Interrupt handler for "normal" ssch-io based dasd devices. 1642 1608 */ 1643 1609 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1644 1610 struct irb *irb) 1645 1611 { 1646 - struct dasd_ccw_req *cqr, *next; 1612 + struct dasd_ccw_req *cqr, *next, *fcqr; 1647 1613 struct dasd_device *device; 1648 1614 unsigned long now; 1649 1615 int nrf_suppressed = 0; ··· 1735 1641 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1736 1642 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1737 1643 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1644 + 1645 + /* 1646 + * Extent pool probably out-of-space. 1647 + * Stop device and check exhaust level. 1648 + */ 1649 + if (dasd_ese_oos_cond(sense)) { 1650 + dasd_generic_space_exhaust(device, cqr); 1651 + device->discipline->ext_pool_exhaust(device, cqr); 1652 + dasd_put_device(device); 1653 + return; 1654 + } 1738 1655 } 1739 1656 if (!(fp_suppressed || nrf_suppressed)) 1740 1657 device->discipline->dump_sense_dbf(device, irb, "int"); ··· 1775 1670 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1776 1671 "invalid device in request"); 1777 1672 return; 1673 + } 1674 + 1675 + if (dasd_ese_needs_format(cqr->block, irb)) { 1676 + if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1677 + device->discipline->ese_read(cqr); 1678 + cqr->status = DASD_CQR_SUCCESS; 1679 + cqr->stopclk = now; 1680 + dasd_device_clear_timer(device); 1681 + dasd_schedule_device_bh(device); 1682 + return; 1683 + } 1684 + fcqr = device->discipline->ese_format(device, cqr); 1685 + if (IS_ERR(fcqr)) { 1686 + /* 1687 + * If we can't format now, let the request go 1688 + * one extra round. Maybe we can format later. 1689 + */ 1690 + cqr->status = DASD_CQR_QUEUED; 1691 + } else { 1692 + fcqr->status = DASD_CQR_QUEUED; 1693 + cqr->status = DASD_CQR_QUEUED; 1694 + list_add(&fcqr->devlist, &device->ccw_queue); 1695 + dasd_schedule_device_bh(device); 1696 + return; 1697 + } 1778 1698 } 1779 1699 1780 1700 /* Check for clear pending */ ··· 2040 1910 static int __dasd_device_is_unusable(struct dasd_device *device, 2041 1911 struct dasd_ccw_req *cqr) 2042 1912 { 2043 - int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1913 + int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); 2044 1914 2045 1915 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2046 1916 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { ··· 2540 2410 return _dasd_sleep_on_queue(ccw_queue, 0); 2541 2411 } 2542 2412 EXPORT_SYMBOL(dasd_sleep_on_queue); 2413 + 2414 + /* 2415 + * Start requests from a ccw_queue and wait interruptible for their completion. 2416 + */ 2417 + int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2418 + { 2419 + return _dasd_sleep_on_queue(ccw_queue, 1); 2420 + } 2421 + EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2543 2422 2544 2423 /* 2545 2424 * Queue a request to the tail of the device ccw_queue and wait ··· 3269 3130 } 3270 3131 3271 3132 /* 3272 - * Allocate and initialize request queue. 3273 - */ 3274 - static void dasd_setup_queue(struct dasd_block *block) 3275 - { 3276 - unsigned int logical_block_size = block->bp_block; 3277 - struct request_queue *q = block->request_queue; 3278 - unsigned int max_bytes, max_discard_sectors; 3279 - int max; 3280 - 3281 - if (block->base->features & DASD_FEATURE_USERAW) { 3282 - /* 3283 - * the max_blocks value for raw_track access is 256 3284 - * it is higher than the native ECKD value because we 3285 - * only need one ccw per track 3286 - * so the max_hw_sectors are 3287 - * 2048 x 512B = 1024kB = 16 tracks 3288 - */ 3289 - max = 2048; 3290 - } else { 3291 - max = block->base->discipline->max_blocks << block->s2b_shift; 3292 - } 3293 - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 3294 - q->limits.max_dev_sectors = max; 3295 - blk_queue_logical_block_size(q, logical_block_size); 3296 - blk_queue_max_hw_sectors(q, max); 3297 - blk_queue_max_segments(q, USHRT_MAX); 3298 - /* with page sized segments we can translate each segement into 3299 - * one idaw/tidaw 3300 - */ 3301 - blk_queue_max_segment_size(q, PAGE_SIZE); 3302 - blk_queue_segment_boundary(q, PAGE_SIZE - 1); 3303 - 3304 - /* Only activate blocklayer discard support for devices that support it */ 3305 - if (block->base->features & DASD_FEATURE_DISCARD) { 3306 - q->limits.discard_granularity = logical_block_size; 3307 - q->limits.discard_alignment = PAGE_SIZE; 3308 - 3309 - /* Calculate max_discard_sectors and make it PAGE aligned */ 3310 - max_bytes = USHRT_MAX * logical_block_size; 3311 - max_bytes = ALIGN(max_bytes, PAGE_SIZE) - PAGE_SIZE; 3312 - max_discard_sectors = max_bytes / logical_block_size; 3313 - 3314 - blk_queue_max_discard_sectors(q, max_discard_sectors); 3315 - blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); 3316 - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 3317 - } 3318 - } 3319 - 3320 - /* 3321 3133 * Deactivate and free request queue. 3322 3134 */ 3323 3135 static void dasd_free_queue(struct dasd_block *block) ··· 3895 3805 return 0; 3896 3806 } 3897 3807 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3808 + 3809 + void dasd_generic_space_exhaust(struct dasd_device *device, 3810 + struct dasd_ccw_req *cqr) 3811 + { 3812 + dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3813 + 3814 + if (device->state < DASD_STATE_BASIC) 3815 + return; 3816 + 3817 + if (cqr->status == DASD_CQR_IN_IO || 3818 + cqr->status == DASD_CQR_CLEAR_PENDING) { 3819 + cqr->status = DASD_CQR_QUEUED; 3820 + cqr->retries++; 3821 + } 3822 + dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3823 + dasd_device_clear_timer(device); 3824 + dasd_schedule_device_bh(device); 3825 + } 3826 + EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3827 + 3828 + void dasd_generic_space_avail(struct dasd_device *device) 3829 + { 3830 + dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3831 + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3832 + 3833 + dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3834 + dasd_schedule_device_bh(device); 3835 + 3836 + if (device->block) { 3837 + dasd_schedule_block_bh(device->block); 3838 + if (device->block->request_queue) 3839 + blk_mq_run_hw_queues(device->block->request_queue, true); 3840 + } 3841 + if (!device->stopped) 3842 + wake_up(&generic_waitq); 3843 + } 3844 + EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3898 3845 3899 3846 /* 3900 3847 * clear active requests and requeue them to block layer if possible
+66 -4
drivers/s390/block/dasd_devmap.c
··· 1642 1642 dasd_path_interval_store); 1643 1643 1644 1644 1645 + #define DASD_DEFINE_ATTR(_name, _func) \ 1646 + static ssize_t dasd_##_name##_show(struct device *dev, \ 1647 + struct device_attribute *attr, \ 1648 + char *buf) \ 1649 + { \ 1650 + struct ccw_device *cdev = to_ccwdev(dev); \ 1651 + struct dasd_device *device = dasd_device_from_cdev(cdev); \ 1652 + int val = 0; \ 1653 + \ 1654 + if (IS_ERR(device)) \ 1655 + return -ENODEV; \ 1656 + if (device->discipline && _func) \ 1657 + val = _func(device); \ 1658 + dasd_put_device(device); \ 1659 + \ 1660 + return snprintf(buf, PAGE_SIZE, "%d\n", val); \ 1661 + } \ 1662 + static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \ 1663 + 1664 + DASD_DEFINE_ATTR(ese, device->discipline->is_ese); 1665 + DASD_DEFINE_ATTR(extent_size, device->discipline->ext_size); 1666 + DASD_DEFINE_ATTR(pool_id, device->discipline->ext_pool_id); 1667 + DASD_DEFINE_ATTR(space_configured, device->discipline->space_configured); 1668 + DASD_DEFINE_ATTR(space_allocated, device->discipline->space_allocated); 1669 + DASD_DEFINE_ATTR(logical_capacity, device->discipline->logical_capacity); 1670 + DASD_DEFINE_ATTR(warn_threshold, device->discipline->ext_pool_warn_thrshld); 1671 + DASD_DEFINE_ATTR(cap_at_warnlevel, device->discipline->ext_pool_cap_at_warnlevel); 1672 + DASD_DEFINE_ATTR(pool_oos, device->discipline->ext_pool_oos); 1673 + 1645 1674 static struct attribute * dasd_attrs[] = { 1646 1675 &dev_attr_readonly.attr, 1647 1676 &dev_attr_discipline.attr, ··· 1696 1667 &dev_attr_path_interval.attr, 1697 1668 &dev_attr_path_reset.attr, 1698 1669 &dev_attr_hpf.attr, 1670 + &dev_attr_ese.attr, 1699 1671 NULL, 1700 1672 }; 1701 1673 1702 1674 static const struct attribute_group dasd_attr_group = { 1703 1675 .attrs = dasd_attrs, 1676 + }; 1677 + 1678 + static struct attribute *capacity_attrs[] = { 1679 + &dev_attr_space_configured.attr, 1680 + &dev_attr_space_allocated.attr, 1681 + &dev_attr_logical_capacity.attr, 1682 + NULL, 1683 + }; 1684 + 1685 + static const struct attribute_group capacity_attr_group = { 1686 + .name = "capacity", 1687 + .attrs = capacity_attrs, 1688 + }; 1689 + 1690 + static struct attribute *ext_pool_attrs[] = { 1691 + &dev_attr_pool_id.attr, 1692 + &dev_attr_extent_size.attr, 1693 + &dev_attr_warn_threshold.attr, 1694 + &dev_attr_cap_at_warnlevel.attr, 1695 + &dev_attr_pool_oos.attr, 1696 + NULL, 1697 + }; 1698 + 1699 + static const struct attribute_group ext_pool_attr_group = { 1700 + .name = "extent_pool", 1701 + .attrs = ext_pool_attrs, 1702 + }; 1703 + 1704 + static const struct attribute_group *dasd_attr_groups[] = { 1705 + &dasd_attr_group, 1706 + &capacity_attr_group, 1707 + &ext_pool_attr_group, 1708 + NULL, 1704 1709 }; 1705 1710 1706 1711 /* ··· 1778 1715 EXPORT_SYMBOL(dasd_set_feature); 1779 1716 1780 1717 1781 - int 1782 - dasd_add_sysfs_files(struct ccw_device *cdev) 1718 + int dasd_add_sysfs_files(struct ccw_device *cdev) 1783 1719 { 1784 - return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group); 1720 + return sysfs_create_groups(&cdev->dev.kobj, dasd_attr_groups); 1785 1721 } 1786 1722 1787 1723 void 1788 1724 dasd_remove_sysfs_files(struct ccw_device *cdev) 1789 1725 { 1790 - sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group); 1726 + sysfs_remove_groups(&cdev->dev.kobj, dasd_attr_groups); 1791 1727 } 1792 1728 1793 1729
+21 -1
drivers/s390/block/dasd_diag.c
··· 615 615 "dump sense not available for DIAG data"); 616 616 } 617 617 618 + /* 619 + * Initialize block layer request queue. 620 + */ 621 + static void dasd_diag_setup_blk_queue(struct dasd_block *block) 622 + { 623 + unsigned int logical_block_size = block->bp_block; 624 + struct request_queue *q = block->request_queue; 625 + int max; 626 + 627 + max = DIAG_MAX_BLOCKS << block->s2b_shift; 628 + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 629 + q->limits.max_dev_sectors = max; 630 + blk_queue_logical_block_size(q, logical_block_size); 631 + blk_queue_max_hw_sectors(q, max); 632 + blk_queue_max_segments(q, USHRT_MAX); 633 + /* With page sized segments each segment can be translated into one idaw/tidaw */ 634 + blk_queue_max_segment_size(q, PAGE_SIZE); 635 + blk_queue_segment_boundary(q, PAGE_SIZE - 1); 636 + } 637 + 618 638 static struct dasd_discipline dasd_diag_discipline = { 619 639 .owner = THIS_MODULE, 620 640 .name = "DIAG", 621 641 .ebcname = "DIAG", 622 - .max_blocks = DIAG_MAX_BLOCKS, 623 642 .check_device = dasd_diag_check_device, 624 643 .verify_path = dasd_generic_verify_path, 625 644 .fill_geometry = dasd_diag_fill_geometry, 645 + .setup_blk_queue = dasd_diag_setup_blk_queue, 626 646 .start_IO = dasd_start_diag, 627 647 .term_IO = dasd_diag_term_IO, 628 648 .handle_terminated_request = dasd_diag_handle_terminated_request,
+909 -57
drivers/s390/block/dasd_eckd.c
··· 42 42 #endif /* PRINTK_HEADER */ 43 43 #define PRINTK_HEADER "dasd(eckd):" 44 44 45 - #define ECKD_C0(i) (i->home_bytes) 46 - #define ECKD_F(i) (i->formula) 47 - #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 48 - (i->factors.f_0x02.f1)) 49 - #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 50 - (i->factors.f_0x02.f2)) 51 - #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 52 - (i->factors.f_0x02.f3)) 53 - #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 54 - #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 55 - #define ECKD_F6(i) (i->factor6) 56 - #define ECKD_F7(i) (i->factor7) 57 - #define ECKD_F8(i) (i->factor8) 58 - 59 45 /* 60 46 * raw track access always map to 64k in memory 61 47 * so it maps to 16 blocks of 4k per track ··· 89 103 } *dasd_reserve_req; 90 104 static DEFINE_MUTEX(dasd_reserve_mutex); 91 105 106 + static struct { 107 + struct dasd_ccw_req cqr; 108 + struct ccw1 ccw[2]; 109 + char data[40]; 110 + } *dasd_vol_info_req; 111 + static DEFINE_MUTEX(dasd_vol_info_mutex); 112 + 113 + struct ext_pool_exhaust_work_data { 114 + struct work_struct worker; 115 + struct dasd_device *device; 116 + struct dasd_device *base; 117 + }; 118 + 92 119 /* definitions for the path verification worker */ 93 120 struct path_verification_work_data { 94 121 struct work_struct worker; ··· 121 122 __u8 lpum; 122 123 }; 123 124 125 + static int dasd_eckd_ext_pool_id(struct dasd_device *); 124 126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 125 127 struct dasd_device *, struct dasd_device *, 126 128 unsigned int, int, unsigned int, unsigned int, ··· 157 157 #define LABEL_SIZE 140 158 158 159 159 /* head and record addresses of count_area read in analysis ccw */ 160 - static const int count_area_head[] = { 0, 0, 0, 0, 2 }; 160 + static const int count_area_head[] = { 0, 0, 0, 0, 1 }; 161 161 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 162 - 163 - static inline unsigned int 164 - round_up_multiple(unsigned int no, unsigned int mult) 165 - { 166 - int rem = no % mult; 167 - return (rem ? no - rem + mult : no); 168 - } 169 162 170 163 static inline unsigned int 171 164 ceil_quot(unsigned int d1, unsigned int d2) ··· 1484 1491 return rc; 1485 1492 } 1486 1493 1494 + /* Read Volume Information - Volume Storage Query */ 1495 + static int dasd_eckd_read_vol_info(struct dasd_device *device) 1496 + { 1497 + struct dasd_eckd_private *private = device->private; 1498 + struct dasd_psf_prssd_data *prssdp; 1499 + struct dasd_rssd_vsq *vsq; 1500 + struct dasd_ccw_req *cqr; 1501 + struct ccw1 *ccw; 1502 + int useglobal; 1503 + int rc; 1504 + 1505 + /* This command cannot be executed on an alias device */ 1506 + if (private->uid.type == UA_BASE_PAV_ALIAS || 1507 + private->uid.type == UA_HYPER_PAV_ALIAS) 1508 + return 0; 1509 + 1510 + useglobal = 0; 1511 + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1512 + sizeof(*prssdp) + sizeof(*vsq), device, NULL); 1513 + if (IS_ERR(cqr)) { 1514 + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1515 + "Could not allocate initialization request"); 1516 + mutex_lock(&dasd_vol_info_mutex); 1517 + useglobal = 1; 1518 + cqr = &dasd_vol_info_req->cqr; 1519 + memset(cqr, 0, sizeof(*cqr)); 1520 + memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); 1521 + cqr->cpaddr = &dasd_vol_info_req->ccw; 1522 + cqr->data = &dasd_vol_info_req->data; 1523 + cqr->magic = DASD_ECKD_MAGIC; 1524 + } 1525 + 1526 + /* Prepare for Read Subsystem Data */ 1527 + prssdp = cqr->data; 1528 + prssdp->order = PSF_ORDER_PRSSD; 1529 + prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ 1530 + prssdp->lss = private->ned->ID; 1531 + prssdp->volume = private->ned->unit_addr; 1532 + 1533 + ccw = cqr->cpaddr; 1534 + ccw->cmd_code = DASD_ECKD_CCW_PSF; 1535 + ccw->count = sizeof(*prssdp); 1536 + ccw->flags |= CCW_FLAG_CC; 1537 + ccw->cda = (__u32)(addr_t)prssdp; 1538 + 1539 + /* Read Subsystem Data - Volume Storage Query */ 1540 + vsq = (struct dasd_rssd_vsq *)(prssdp + 1); 1541 + memset(vsq, 0, sizeof(*vsq)); 1542 + 1543 + ccw++; 1544 + ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1545 + ccw->count = sizeof(*vsq); 1546 + ccw->flags |= CCW_FLAG_SLI; 1547 + ccw->cda = (__u32)(addr_t)vsq; 1548 + 1549 + cqr->buildclk = get_tod_clock(); 1550 + cqr->status = DASD_CQR_FILLED; 1551 + cqr->startdev = device; 1552 + cqr->memdev = device; 1553 + cqr->block = NULL; 1554 + cqr->retries = 256; 1555 + cqr->expires = device->default_expires * HZ; 1556 + /* The command might not be supported. Suppress the error output */ 1557 + __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1558 + 1559 + rc = dasd_sleep_on_interruptible(cqr); 1560 + if (rc == 0) { 1561 + memcpy(&private->vsq, vsq, sizeof(*vsq)); 1562 + } else { 1563 + dev_warn(&device->cdev->dev, 1564 + "Reading the volume storage information failed with rc=%d\n", rc); 1565 + } 1566 + 1567 + if (useglobal) 1568 + mutex_unlock(&dasd_vol_info_mutex); 1569 + else 1570 + dasd_sfree_request(cqr, cqr->memdev); 1571 + 1572 + return rc; 1573 + } 1574 + 1575 + static int dasd_eckd_is_ese(struct dasd_device *device) 1576 + { 1577 + struct dasd_eckd_private *private = device->private; 1578 + 1579 + return private->vsq.vol_info.ese; 1580 + } 1581 + 1582 + static int dasd_eckd_ext_pool_id(struct dasd_device *device) 1583 + { 1584 + struct dasd_eckd_private *private = device->private; 1585 + 1586 + return private->vsq.extent_pool_id; 1587 + } 1588 + 1589 + /* 1590 + * This value represents the total amount of available space. As more space is 1591 + * allocated by ESE volumes, this value will decrease. 1592 + * The data for this value is therefore updated on any call. 1593 + */ 1594 + static int dasd_eckd_space_configured(struct dasd_device *device) 1595 + { 1596 + struct dasd_eckd_private *private = device->private; 1597 + int rc; 1598 + 1599 + rc = dasd_eckd_read_vol_info(device); 1600 + 1601 + return rc ? : private->vsq.space_configured; 1602 + } 1603 + 1604 + /* 1605 + * The value of space allocated by an ESE volume may have changed and is 1606 + * therefore updated on any call. 1607 + */ 1608 + static int dasd_eckd_space_allocated(struct dasd_device *device) 1609 + { 1610 + struct dasd_eckd_private *private = device->private; 1611 + int rc; 1612 + 1613 + rc = dasd_eckd_read_vol_info(device); 1614 + 1615 + return rc ? : private->vsq.space_allocated; 1616 + } 1617 + 1618 + static int dasd_eckd_logical_capacity(struct dasd_device *device) 1619 + { 1620 + struct dasd_eckd_private *private = device->private; 1621 + 1622 + return private->vsq.logical_capacity; 1623 + } 1624 + 1625 + static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) 1626 + { 1627 + struct ext_pool_exhaust_work_data *data; 1628 + struct dasd_device *device; 1629 + struct dasd_device *base; 1630 + 1631 + data = container_of(work, struct ext_pool_exhaust_work_data, worker); 1632 + device = data->device; 1633 + base = data->base; 1634 + 1635 + if (!base) 1636 + base = device; 1637 + if (dasd_eckd_space_configured(base) != 0) { 1638 + dasd_generic_space_avail(device); 1639 + } else { 1640 + dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); 1641 + DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); 1642 + } 1643 + 1644 + dasd_put_device(device); 1645 + kfree(data); 1646 + } 1647 + 1648 + static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, 1649 + struct dasd_ccw_req *cqr) 1650 + { 1651 + struct ext_pool_exhaust_work_data *data; 1652 + 1653 + data = kzalloc(sizeof(*data), GFP_ATOMIC); 1654 + if (!data) 1655 + return -ENOMEM; 1656 + INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); 1657 + dasd_get_device(device); 1658 + data->device = device; 1659 + 1660 + if (cqr->block) 1661 + data->base = cqr->block->base; 1662 + else if (cqr->basedev) 1663 + data->base = cqr->basedev; 1664 + else 1665 + data->base = NULL; 1666 + 1667 + schedule_work(&data->worker); 1668 + 1669 + return 0; 1670 + } 1671 + 1672 + static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, 1673 + struct dasd_rssd_lcq *lcq) 1674 + { 1675 + struct dasd_eckd_private *private = device->private; 1676 + int pool_id = dasd_eckd_ext_pool_id(device); 1677 + struct dasd_ext_pool_sum eps; 1678 + int i; 1679 + 1680 + for (i = 0; i < lcq->pool_count; i++) { 1681 + eps = lcq->ext_pool_sum[i]; 1682 + if (eps.pool_id == pool_id) { 1683 + memcpy(&private->eps, &eps, 1684 + sizeof(struct dasd_ext_pool_sum)); 1685 + } 1686 + } 1687 + } 1688 + 1689 + /* Read Extent Pool Information - Logical Configuration Query */ 1690 + static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) 1691 + { 1692 + struct dasd_eckd_private *private = device->private; 1693 + struct dasd_psf_prssd_data *prssdp; 1694 + struct dasd_rssd_lcq *lcq; 1695 + struct dasd_ccw_req *cqr; 1696 + struct ccw1 *ccw; 1697 + int rc; 1698 + 1699 + /* This command cannot be executed on an alias device */ 1700 + if (private->uid.type == UA_BASE_PAV_ALIAS || 1701 + private->uid.type == UA_HYPER_PAV_ALIAS) 1702 + return 0; 1703 + 1704 + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1705 + sizeof(*prssdp) + sizeof(*lcq), device, NULL); 1706 + if (IS_ERR(cqr)) { 1707 + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1708 + "Could not allocate initialization request"); 1709 + return PTR_ERR(cqr); 1710 + } 1711 + 1712 + /* Prepare for Read Subsystem Data */ 1713 + prssdp = cqr->data; 1714 + memset(prssdp, 0, sizeof(*prssdp)); 1715 + prssdp->order = PSF_ORDER_PRSSD; 1716 + prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ 1717 + 1718 + ccw = cqr->cpaddr; 1719 + ccw->cmd_code = DASD_ECKD_CCW_PSF; 1720 + ccw->count = sizeof(*prssdp); 1721 + ccw->flags |= CCW_FLAG_CC; 1722 + ccw->cda = (__u32)(addr_t)prssdp; 1723 + 1724 + lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1725 + memset(lcq, 0, sizeof(*lcq)); 1726 + 1727 + ccw++; 1728 + ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1729 + ccw->count = sizeof(*lcq); 1730 + ccw->flags |= CCW_FLAG_SLI; 1731 + ccw->cda = (__u32)(addr_t)lcq; 1732 + 1733 + cqr->buildclk = get_tod_clock(); 1734 + cqr->status = DASD_CQR_FILLED; 1735 + cqr->startdev = device; 1736 + cqr->memdev = device; 1737 + cqr->block = NULL; 1738 + cqr->retries = 256; 1739 + cqr->expires = device->default_expires * HZ; 1740 + /* The command might not be supported. Suppress the error output */ 1741 + __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1742 + 1743 + rc = dasd_sleep_on_interruptible(cqr); 1744 + if (rc == 0) { 1745 + dasd_eckd_cpy_ext_pool_data(device, lcq); 1746 + } else { 1747 + dev_warn(&device->cdev->dev, 1748 + "Reading the logical configuration failed with rc=%d\n", rc); 1749 + } 1750 + 1751 + dasd_sfree_request(cqr, cqr->memdev); 1752 + 1753 + return rc; 1754 + } 1755 + 1756 + /* 1757 + * Depending on the device type, the extent size is specified either as 1758 + * cylinders per extent (CKD) or size per extent (FBA) 1759 + * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. 1760 + */ 1761 + static int dasd_eckd_ext_size(struct dasd_device *device) 1762 + { 1763 + struct dasd_eckd_private *private = device->private; 1764 + struct dasd_ext_pool_sum eps = private->eps; 1765 + 1766 + if (!eps.flags.extent_size_valid) 1767 + return 0; 1768 + if (eps.extent_size.size_1G) 1769 + return 1113; 1770 + if (eps.extent_size.size_16M) 1771 + return 21; 1772 + 1773 + return 0; 1774 + } 1775 + 1776 + static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) 1777 + { 1778 + struct dasd_eckd_private *private = device->private; 1779 + 1780 + return private->eps.warn_thrshld; 1781 + } 1782 + 1783 + static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) 1784 + { 1785 + struct dasd_eckd_private *private = device->private; 1786 + 1787 + return private->eps.flags.capacity_at_warnlevel; 1788 + } 1789 + 1790 + /* 1791 + * Extent Pool out of space 1792 + */ 1793 + static int dasd_eckd_ext_pool_oos(struct dasd_device *device) 1794 + { 1795 + struct dasd_eckd_private *private = device->private; 1796 + 1797 + return private->eps.flags.pool_oos; 1798 + } 1487 1799 1488 1800 /* 1489 1801 * Build CP for Perform Subsystem Function - SSC. ··· 2019 1721 /* Read Feature Codes */ 2020 1722 dasd_eckd_read_features(device); 2021 1723 1724 + /* Read Volume Information */ 1725 + rc = dasd_eckd_read_vol_info(device); 1726 + if (rc) 1727 + goto out_err3; 1728 + 1729 + /* Read Extent Pool Information */ 1730 + rc = dasd_eckd_read_ext_pool_info(device); 1731 + if (rc) 1732 + goto out_err3; 1733 + 2022 1734 /* Read Device Characteristics */ 2023 1735 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2024 1736 &private->rdc_data, 64); ··· 2058 1750 readonly = dasd_device_is_ro(device); 2059 1751 if (readonly) 2060 1752 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 1753 + 1754 + if (dasd_eckd_is_ese(device)) 1755 + dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1); 2061 1756 2062 1757 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2063 1758 "with %d cylinders, %d heads, %d sectors%s\n", ··· 2134 1823 if (IS_ERR(cqr)) 2135 1824 return cqr; 2136 1825 ccw = cqr->cpaddr; 2137 - /* Define extent for the first 3 tracks. */ 2138 - define_extent(ccw++, cqr->data, 0, 2, 1826 + /* Define extent for the first 2 tracks. */ 1827 + define_extent(ccw++, cqr->data, 0, 1, 2139 1828 DASD_ECKD_CCW_READ_COUNT, device, 0); 2140 1829 LO_data = cqr->data + sizeof(struct DE_eckd_data); 2141 1830 /* Locate record for the first 4 records on track 0. */ ··· 2154 1843 count_data++; 2155 1844 } 2156 1845 2157 - /* Locate record for the first record on track 2. */ 1846 + /* Locate record for the first record on track 1. */ 2158 1847 ccw[-1].flags |= CCW_FLAG_CC; 2159 - locate_record(ccw++, LO_data++, 2, 0, 1, 1848 + locate_record(ccw++, LO_data++, 1, 0, 1, 2160 1849 DASD_ECKD_CCW_READ_COUNT, device, 0); 2161 1850 /* Read count ccw. */ 2162 1851 ccw[-1].flags |= CCW_FLAG_CC; ··· 2171 1860 cqr->retries = 255; 2172 1861 cqr->buildclk = get_tod_clock(); 2173 1862 cqr->status = DASD_CQR_FILLED; 1863 + /* Set flags to suppress output for expected errors */ 1864 + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1865 + 2174 1866 return cqr; 2175 1867 } 2176 1868 ··· 2281 1967 } 2282 1968 } 2283 1969 if (i == 3) 2284 - count_area = &private->count_area[4]; 1970 + count_area = &private->count_area[3]; 2285 1971 2286 1972 if (private->uses_cdl == 0) { 2287 1973 for (i = 0; i < 5; i++) { ··· 2413 2099 */ 2414 2100 itcw_size = itcw_calc_size(0, count, 0); 2415 2101 2416 - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 2417 - NULL); 2102 + cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2418 2103 if (IS_ERR(cqr)) 2419 2104 return cqr; 2420 2105 ··· 2506 2193 } 2507 2194 cplength += count; 2508 2195 2509 - cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 2510 - startdev, NULL); 2196 + cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2511 2197 if (IS_ERR(cqr)) 2512 2198 return cqr; 2513 2199 ··· 2553 2241 } 2554 2242 2555 2243 static struct dasd_ccw_req * 2556 - dasd_eckd_build_format(struct dasd_device *base, 2557 - struct format_data_t *fdata, 2558 - int enable_pav) 2244 + dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, 2245 + struct format_data_t *fdata, int enable_pav) 2559 2246 { 2560 2247 struct dasd_eckd_private *base_priv; 2561 2248 struct dasd_eckd_private *start_priv; 2562 - struct dasd_device *startdev = NULL; 2563 2249 struct dasd_ccw_req *fcp; 2564 2250 struct eckd_count *ect; 2565 2251 struct ch_t address; ··· 2648 2338 fdata->intensity); 2649 2339 return ERR_PTR(-EINVAL); 2650 2340 } 2651 - /* Allocate the format ccw request. */ 2652 - fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 2653 - datasize, startdev, NULL); 2341 + 2342 + fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2654 2343 if (IS_ERR(fcp)) 2655 2344 return fcp; 2656 2345 ··· 2822 2513 struct dasd_ccw_req *ccw_req; 2823 2514 2824 2515 if (!fmt_buffer) { 2825 - ccw_req = dasd_eckd_build_format(base, fdata, enable_pav); 2516 + ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); 2826 2517 } else { 2827 2518 if (tpm) 2828 2519 ccw_req = dasd_eckd_build_check_tcw(base, fdata, ··· 2968 2659 rc = -EIO; 2969 2660 } 2970 2661 list_del_init(&cqr->blocklist); 2971 - dasd_sfree_request(cqr, device); 2662 + dasd_ffree_request(cqr, device); 2972 2663 private->count--; 2973 2664 } 2974 2665 ··· 3005 2696 { 3006 2697 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 3007 2698 0, NULL); 2699 + } 2700 + 2701 + /* 2702 + * Callback function to free ESE format requests. 2703 + */ 2704 + static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) 2705 + { 2706 + struct dasd_device *device = cqr->startdev; 2707 + struct dasd_eckd_private *private = device->private; 2708 + 2709 + private->count--; 2710 + dasd_ffree_request(cqr, device); 2711 + } 2712 + 2713 + static struct dasd_ccw_req * 2714 + dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) 2715 + { 2716 + struct dasd_eckd_private *private; 2717 + struct format_data_t fdata; 2718 + unsigned int recs_per_trk; 2719 + struct dasd_ccw_req *fcqr; 2720 + struct dasd_device *base; 2721 + struct dasd_block *block; 2722 + unsigned int blksize; 2723 + struct request *req; 2724 + sector_t first_trk; 2725 + sector_t last_trk; 2726 + int rc; 2727 + 2728 + req = cqr->callback_data; 2729 + base = cqr->block->base; 2730 + private = base->private; 2731 + block = base->block; 2732 + blksize = block->bp_block; 2733 + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2734 + 2735 + first_trk = blk_rq_pos(req) >> block->s2b_shift; 2736 + sector_div(first_trk, recs_per_trk); 2737 + last_trk = 2738 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 2739 + sector_div(last_trk, recs_per_trk); 2740 + 2741 + fdata.start_unit = first_trk; 2742 + fdata.stop_unit = last_trk; 2743 + fdata.blksize = blksize; 2744 + fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; 2745 + 2746 + rc = dasd_eckd_format_sanity_checks(base, &fdata); 2747 + if (rc) 2748 + return ERR_PTR(-EINVAL); 2749 + 2750 + /* 2751 + * We're building the request with PAV disabled as we're reusing 2752 + * the former startdev. 2753 + */ 2754 + fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); 2755 + if (IS_ERR(fcqr)) 2756 + return fcqr; 2757 + 2758 + fcqr->callback = dasd_eckd_ese_format_cb; 2759 + 2760 + return fcqr; 2761 + } 2762 + 2763 + /* 2764 + * When data is read from an unformatted area of an ESE volume, this function 2765 + * returns zeroed data and thereby mimics a read of zero data. 2766 + */ 2767 + static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr) 2768 + { 2769 + unsigned int blksize, off; 2770 + struct dasd_device *base; 2771 + struct req_iterator iter; 2772 + struct request *req; 2773 + struct bio_vec bv; 2774 + char *dst; 2775 + 2776 + req = (struct request *) cqr->callback_data; 2777 + base = cqr->block->base; 2778 + blksize = base->block->bp_block; 2779 + 2780 + rq_for_each_segment(bv, req, iter) { 2781 + dst = page_address(bv.bv_page) + bv.bv_offset; 2782 + for (off = 0; off < bv.bv_len; off += blksize) { 2783 + if (dst && rq_data_dir(req) == READ) { 2784 + dst += off; 2785 + memset(dst, 0, blksize); 2786 + } 2787 + } 2788 + } 3008 2789 } 3009 2790 3010 2791 /* ··· 3432 3033 } 3433 3034 } 3434 3035 3036 + static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, 3037 + unsigned int first_trk, 3038 + unsigned int last_trk) 3039 + { 3040 + struct dasd_eckd_private *private = device->private; 3041 + unsigned int trks_per_vol; 3042 + int rc = 0; 3043 + 3044 + trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; 3045 + 3046 + if (first_trk >= trks_per_vol) { 3047 + dev_warn(&device->cdev->dev, 3048 + "Start track number %u used in the space release command is too big\n", 3049 + first_trk); 3050 + rc = -EINVAL; 3051 + } else if (last_trk >= trks_per_vol) { 3052 + dev_warn(&device->cdev->dev, 3053 + "Stop track number %u used in the space release command is too big\n", 3054 + last_trk); 3055 + rc = -EINVAL; 3056 + } else if (first_trk > last_trk) { 3057 + dev_warn(&device->cdev->dev, 3058 + "Start track %u used in the space release command exceeds the end track\n", 3059 + first_trk); 3060 + rc = -EINVAL; 3061 + } 3062 + return rc; 3063 + } 3064 + 3065 + /* 3066 + * Helper function to count the amount of involved extents within a given range 3067 + * with extent alignment in mind. 3068 + */ 3069 + static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) 3070 + { 3071 + int cur_pos = 0; 3072 + int count = 0; 3073 + int tmp; 3074 + 3075 + if (from == to) 3076 + return 1; 3077 + 3078 + /* Count first partial extent */ 3079 + if (from % trks_per_ext != 0) { 3080 + tmp = from + trks_per_ext - (from % trks_per_ext) - 1; 3081 + if (tmp > to) 3082 + tmp = to; 3083 + cur_pos = tmp - from + 1; 3084 + count++; 3085 + } 3086 + /* Count full extents */ 3087 + if (to - (from + cur_pos) + 1 >= trks_per_ext) { 3088 + tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); 3089 + count += (tmp - (from + cur_pos) + 1) / trks_per_ext; 3090 + cur_pos = tmp; 3091 + } 3092 + /* Count last partial extent */ 3093 + if (cur_pos < to) 3094 + count++; 3095 + 3096 + return count; 3097 + } 3098 + 3099 + /* 3100 + * Release allocated space for a given range or an entire volume. 3101 + */ 3102 + static struct dasd_ccw_req * 3103 + dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, 3104 + struct request *req, unsigned int first_trk, 3105 + unsigned int last_trk, int by_extent) 3106 + { 3107 + struct dasd_eckd_private *private = device->private; 3108 + struct dasd_dso_ras_ext_range *ras_range; 3109 + struct dasd_rssd_features *features; 3110 + struct dasd_dso_ras_data *ras_data; 3111 + u16 heads, beg_head, end_head; 3112 + int cur_to_trk, cur_from_trk; 3113 + struct dasd_ccw_req *cqr; 3114 + u32 beg_cyl, end_cyl; 3115 + struct ccw1 *ccw; 3116 + int trks_per_ext; 3117 + size_t ras_size; 3118 + size_t size; 3119 + int nr_exts; 3120 + void *rq; 3121 + int i; 3122 + 3123 + if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) 3124 + return ERR_PTR(-EINVAL); 3125 + 3126 + rq = req ? blk_mq_rq_to_pdu(req) : NULL; 3127 + 3128 + features = &private->features; 3129 + 3130 + trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3131 + nr_exts = 0; 3132 + if (by_extent) 3133 + nr_exts = count_exts(first_trk, last_trk, trks_per_ext); 3134 + ras_size = sizeof(*ras_data); 3135 + size = ras_size + (nr_exts * sizeof(*ras_range)); 3136 + 3137 + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); 3138 + if (IS_ERR(cqr)) { 3139 + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3140 + "Could not allocate RAS request"); 3141 + return cqr; 3142 + } 3143 + 3144 + ras_data = cqr->data; 3145 + memset(ras_data, 0, size); 3146 + 3147 + ras_data->order = DSO_ORDER_RAS; 3148 + ras_data->flags.vol_type = 0; /* CKD volume */ 3149 + /* Release specified extents or entire volume */ 3150 + ras_data->op_flags.by_extent = by_extent; 3151 + /* 3152 + * This bit guarantees initialisation of tracks within an extent that is 3153 + * not fully specified, but is only supported with a certain feature 3154 + * subset. 3155 + */ 3156 + ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); 3157 + ras_data->lss = private->ned->ID; 3158 + ras_data->dev_addr = private->ned->unit_addr; 3159 + ras_data->nr_exts = nr_exts; 3160 + 3161 + if (by_extent) { 3162 + heads = private->rdc_data.trk_per_cyl; 3163 + cur_from_trk = first_trk; 3164 + cur_to_trk = first_trk + trks_per_ext - 3165 + (first_trk % trks_per_ext) - 1; 3166 + if (cur_to_trk > last_trk) 3167 + cur_to_trk = last_trk; 3168 + ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); 3169 + 3170 + for (i = 0; i < nr_exts; i++) { 3171 + beg_cyl = cur_from_trk / heads; 3172 + beg_head = cur_from_trk % heads; 3173 + end_cyl = cur_to_trk / heads; 3174 + end_head = cur_to_trk % heads; 3175 + 3176 + set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); 3177 + set_ch_t(&ras_range->end_ext, end_cyl, end_head); 3178 + 3179 + cur_from_trk = cur_to_trk + 1; 3180 + cur_to_trk = cur_from_trk + trks_per_ext - 1; 3181 + if (cur_to_trk > last_trk) 3182 + cur_to_trk = last_trk; 3183 + ras_range++; 3184 + } 3185 + } 3186 + 3187 + ccw = cqr->cpaddr; 3188 + ccw->cda = (__u32)(addr_t)cqr->data; 3189 + ccw->cmd_code = DASD_ECKD_CCW_DSO; 3190 + ccw->count = size; 3191 + 3192 + cqr->startdev = device; 3193 + cqr->memdev = device; 3194 + cqr->block = block; 3195 + cqr->retries = 256; 3196 + cqr->expires = device->default_expires * HZ; 3197 + cqr->buildclk = get_tod_clock(); 3198 + cqr->status = DASD_CQR_FILLED; 3199 + 3200 + return cqr; 3201 + } 3202 + 3203 + static int dasd_eckd_release_space_full(struct dasd_device *device) 3204 + { 3205 + struct dasd_ccw_req *cqr; 3206 + int rc; 3207 + 3208 + cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); 3209 + if (IS_ERR(cqr)) 3210 + return PTR_ERR(cqr); 3211 + 3212 + rc = dasd_sleep_on_interruptible(cqr); 3213 + 3214 + dasd_sfree_request(cqr, cqr->memdev); 3215 + 3216 + return rc; 3217 + } 3218 + 3219 + static int dasd_eckd_release_space_trks(struct dasd_device *device, 3220 + unsigned int from, unsigned int to) 3221 + { 3222 + struct dasd_eckd_private *private = device->private; 3223 + struct dasd_block *block = device->block; 3224 + struct dasd_ccw_req *cqr, *n; 3225 + struct list_head ras_queue; 3226 + unsigned int device_exts; 3227 + int trks_per_ext; 3228 + int stop, step; 3229 + int cur_pos; 3230 + int rc = 0; 3231 + int retry; 3232 + 3233 + INIT_LIST_HEAD(&ras_queue); 3234 + 3235 + device_exts = private->real_cyl / dasd_eckd_ext_size(device); 3236 + trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3237 + 3238 + /* Make sure device limits are not exceeded */ 3239 + step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); 3240 + cur_pos = from; 3241 + 3242 + do { 3243 + retry = 0; 3244 + while (cur_pos < to) { 3245 + stop = cur_pos + step - 3246 + ((cur_pos + step) % trks_per_ext) - 1; 3247 + if (stop > to) 3248 + stop = to; 3249 + 3250 + cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); 3251 + if (IS_ERR(cqr)) { 3252 + rc = PTR_ERR(cqr); 3253 + if (rc == -ENOMEM) { 3254 + if (list_empty(&ras_queue)) 3255 + goto out; 3256 + retry = 1; 3257 + break; 3258 + } 3259 + goto err_out; 3260 + } 3261 + 3262 + spin_lock_irq(&block->queue_lock); 3263 + list_add_tail(&cqr->blocklist, &ras_queue); 3264 + spin_unlock_irq(&block->queue_lock); 3265 + cur_pos = stop + 1; 3266 + } 3267 + 3268 + rc = dasd_sleep_on_queue_interruptible(&ras_queue); 3269 + 3270 + err_out: 3271 + list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { 3272 + device = cqr->startdev; 3273 + private = device->private; 3274 + 3275 + spin_lock_irq(&block->queue_lock); 3276 + list_del_init(&cqr->blocklist); 3277 + spin_unlock_irq(&block->queue_lock); 3278 + dasd_sfree_request(cqr, device); 3279 + private->count--; 3280 + } 3281 + } while (retry); 3282 + 3283 + out: 3284 + return rc; 3285 + } 3286 + 3287 + static int dasd_eckd_release_space(struct dasd_device *device, 3288 + struct format_data_t *rdata) 3289 + { 3290 + if (rdata->intensity & DASD_FMT_INT_ESE_FULL) 3291 + return dasd_eckd_release_space_full(device); 3292 + else if (rdata->intensity == 0) 3293 + return dasd_eckd_release_space_trks(device, rdata->start_unit, 3294 + rdata->stop_unit); 3295 + else 3296 + return -EINVAL; 3297 + } 3298 + 3299 + static struct dasd_ccw_req * 3300 + dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block, 3301 + struct request *req, sector_t first_trk, 3302 + sector_t last_trk) 3303 + { 3304 + return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1); 3305 + } 3306 + 3435 3307 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3436 3308 struct dasd_device *startdev, 3437 3309 struct dasd_block *block, ··· 3884 3214 cqr->retries = startdev->default_retries; 3885 3215 cqr->buildclk = get_tod_clock(); 3886 3216 cqr->status = DASD_CQR_FILLED; 3217 + 3218 + /* Set flags to suppress output for expected errors */ 3219 + if (dasd_eckd_is_ese(basedev)) { 3220 + set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 3221 + set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 3222 + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 3223 + } 3224 + 3887 3225 return cqr; 3888 3226 } 3889 3227 ··· 4063 3385 cqr->retries = startdev->default_retries; 4064 3386 cqr->buildclk = get_tod_clock(); 4065 3387 cqr->status = DASD_CQR_FILLED; 3388 + 3389 + /* Set flags to suppress output for expected errors */ 3390 + if (dasd_eckd_is_ese(basedev)) 3391 + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 3392 + 4066 3393 return cqr; 4067 3394 } 4068 3395 ··· 4387 3704 cqr->retries = startdev->default_retries; 4388 3705 cqr->buildclk = get_tod_clock(); 4389 3706 cqr->status = DASD_CQR_FILLED; 3707 + 3708 + /* Set flags to suppress output for expected errors */ 3709 + if (dasd_eckd_is_ese(basedev)) { 3710 + set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 3711 + set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 3712 + set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 3713 + } 3714 + 4390 3715 return cqr; 4391 3716 out_error: 4392 3717 dasd_sfree_request(cqr, startdev); ··· 4446 3755 cmdrtd = private->features.feature[9] & 0x20; 4447 3756 cmdwtd = private->features.feature[12] & 0x40; 4448 3757 use_prefix = private->features.feature[8] & 0x01; 3758 + 3759 + if (req_op(req) == REQ_OP_DISCARD) 3760 + return dasd_eckd_build_cp_discard(startdev, block, req, 3761 + first_trk, last_trk); 4449 3762 4450 3763 cqr = NULL; 4451 3764 if (cdlspecial || dasd_page_cache) { ··· 4729 4034 struct dasd_block *block, 4730 4035 struct request *req) 4731 4036 { 4037 + struct dasd_device *startdev = NULL; 4732 4038 struct dasd_eckd_private *private; 4733 - struct dasd_device *startdev; 4734 - unsigned long flags; 4735 4039 struct dasd_ccw_req *cqr; 4040 + unsigned long flags; 4736 4041 4737 - startdev = dasd_alias_get_start_dev(base); 4042 + /* Discard requests can only be processed on base devices */ 4043 + if (req_op(req) != REQ_OP_DISCARD) 4044 + startdev = dasd_alias_get_start_dev(base); 4738 4045 if (!startdev) 4739 4046 startdev = base; 4740 4047 private = startdev->private; ··· 5662 4965 /* Read Feature Codes */ 5663 4966 dasd_eckd_read_features(device); 5664 4967 4968 + /* Read Volume Information */ 4969 + rc = dasd_eckd_read_vol_info(device); 4970 + if (rc) 4971 + goto out_err2; 4972 + 4973 + /* Read Extent Pool Information */ 4974 + rc = dasd_eckd_read_ext_pool_info(device); 4975 + if (rc) 4976 + goto out_err2; 4977 + 5665 4978 /* Read Device Characteristics */ 5666 4979 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 5667 4980 &temp_rdc_data, 64); ··· 6342 5635 device->discipline->check_attention(device, lpum); 6343 5636 } 6344 5637 5638 + static void dasd_eckd_oos_resume(struct dasd_device *device) 5639 + { 5640 + struct dasd_eckd_private *private = device->private; 5641 + struct alias_pav_group *pavgroup, *tempgroup; 5642 + struct dasd_device *dev, *n; 5643 + unsigned long flags; 5644 + 5645 + spin_lock_irqsave(&private->lcu->lock, flags); 5646 + list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 5647 + alias_list) { 5648 + if (dev->stopped & DASD_STOPPED_NOSPC) 5649 + dasd_generic_space_avail(dev); 5650 + } 5651 + list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 5652 + alias_list) { 5653 + if (dev->stopped & DASD_STOPPED_NOSPC) 5654 + dasd_generic_space_avail(dev); 5655 + } 5656 + /* devices in PAV groups */ 5657 + list_for_each_entry_safe(pavgroup, tempgroup, 5658 + &private->lcu->grouplist, 5659 + group) { 5660 + list_for_each_entry_safe(dev, n, &pavgroup->baselist, 5661 + alias_list) { 5662 + if (dev->stopped & DASD_STOPPED_NOSPC) 5663 + dasd_generic_space_avail(dev); 5664 + } 5665 + list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 5666 + alias_list) { 5667 + if (dev->stopped & DASD_STOPPED_NOSPC) 5668 + dasd_generic_space_avail(dev); 5669 + } 5670 + } 5671 + spin_unlock_irqrestore(&private->lcu->lock, flags); 5672 + } 5673 + 5674 + static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, 5675 + __u8 lpum) 5676 + { 5677 + struct dasd_oos_message *oos = messages; 5678 + 5679 + switch (oos->code) { 5680 + case REPO_WARN: 5681 + case POOL_WARN: 5682 + dev_warn(&device->cdev->dev, 5683 + "Extent pool usage has reached a critical value\n"); 5684 + dasd_eckd_oos_resume(device); 5685 + break; 5686 + case REPO_EXHAUST: 5687 + case POOL_EXHAUST: 5688 + dev_warn(&device->cdev->dev, 5689 + "Extent pool is exhausted\n"); 5690 + break; 5691 + case REPO_RELIEVE: 5692 + case POOL_RELIEVE: 5693 + dev_info(&device->cdev->dev, 5694 + "Extent pool physical space constraint has been relieved\n"); 5695 + break; 5696 + } 5697 + 5698 + /* In any case, update related data */ 5699 + dasd_eckd_read_ext_pool_info(device); 5700 + 5701 + /* to make sure there is no attention left schedule work again */ 5702 + device->discipline->check_attention(device, lpum); 5703 + } 5704 + 6345 5705 static void dasd_eckd_check_attention_work(struct work_struct *work) 6346 5706 { 6347 5707 struct check_attention_work_data *data; ··· 6427 5653 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 6428 5654 if (rc) 6429 5655 goto out; 5656 + 6430 5657 if (messages->length == ATTENTION_LENGTH_CUIR && 6431 5658 messages->format == ATTENTION_FORMAT_CUIR) 6432 5659 dasd_eckd_handle_cuir(device, messages, data->lpum); 5660 + if (messages->length == ATTENTION_LENGTH_OOS && 5661 + messages->format == ATTENTION_FORMAT_OOS) 5662 + dasd_eckd_handle_oos(device, messages, data->lpum); 5663 + 6433 5664 out: 6434 5665 dasd_put_device(device); 6435 5666 kfree(messages); ··· 6513 5734 dasd_schedule_requeue(device); 6514 5735 } 6515 5736 5737 + /* 5738 + * Initialize block layer request queue. 5739 + */ 5740 + static void dasd_eckd_setup_blk_queue(struct dasd_block *block) 5741 + { 5742 + unsigned int logical_block_size = block->bp_block; 5743 + struct request_queue *q = block->request_queue; 5744 + struct dasd_device *device = block->base; 5745 + struct dasd_eckd_private *private; 5746 + unsigned int max_discard_sectors; 5747 + unsigned int max_bytes; 5748 + unsigned int ext_bytes; /* Extent Size in Bytes */ 5749 + int recs_per_trk; 5750 + int trks_per_cyl; 5751 + int ext_limit; 5752 + int ext_size; /* Extent Size in Cylinders */ 5753 + int max; 5754 + 5755 + private = device->private; 5756 + trks_per_cyl = private->rdc_data.trk_per_cyl; 5757 + recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size); 5758 + 5759 + if (device->features & DASD_FEATURE_USERAW) { 5760 + /* 5761 + * the max_blocks value for raw_track access is 256 5762 + * it is higher than the native ECKD value because we 5763 + * only need one ccw per track 5764 + * so the max_hw_sectors are 5765 + * 2048 x 512B = 1024kB = 16 tracks 5766 + */ 5767 + max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; 5768 + } else { 5769 + max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; 5770 + } 5771 + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 5772 + q->limits.max_dev_sectors = max; 5773 + blk_queue_logical_block_size(q, logical_block_size); 5774 + blk_queue_max_hw_sectors(q, max); 5775 + blk_queue_max_segments(q, USHRT_MAX); 5776 + /* With page sized segments each segment can be translated into one idaw/tidaw */ 5777 + blk_queue_max_segment_size(q, PAGE_SIZE); 5778 + blk_queue_segment_boundary(q, PAGE_SIZE - 1); 5779 + 5780 + if (dasd_eckd_is_ese(device)) { 5781 + /* 5782 + * Depending on the extent size, up to UINT_MAX bytes can be 5783 + * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the 5784 + * device limits should be exceeded. 5785 + */ 5786 + ext_size = dasd_eckd_ext_size(device); 5787 + ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX); 5788 + ext_bytes = ext_size * trks_per_cyl * recs_per_trk * 5789 + logical_block_size; 5790 + max_bytes = UINT_MAX - (UINT_MAX % ext_bytes); 5791 + if (max_bytes / ext_bytes > ext_limit) 5792 + max_bytes = ext_bytes * ext_limit; 5793 + 5794 + max_discard_sectors = max_bytes / 512; 5795 + 5796 + blk_queue_max_discard_sectors(q, max_discard_sectors); 5797 + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 5798 + q->limits.discard_granularity = ext_bytes; 5799 + q->limits.discard_alignment = ext_bytes; 5800 + } 5801 + } 5802 + 6516 5803 static struct ccw_driver dasd_eckd_driver = { 6517 5804 .driver = { 6518 5805 .name = "dasd-eckd", ··· 6599 5754 .int_class = IRQIO_DAS, 6600 5755 }; 6601 5756 6602 - /* 6603 - * max_blocks is dependent on the amount of storage that is available 6604 - * in the static io buffer for each device. Currently each device has 6605 - * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 6606 - * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 6607 - * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 6608 - * addition we have one define extent ccw + 16 bytes of data and one 6609 - * locate record ccw + 16 bytes of data. That makes: 6610 - * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 6611 - * We want to fit two into the available memory so that we can immediately 6612 - * start the next request if one finishes off. That makes 249.5 blocks 6613 - * for one request. Give a little safety and the result is 240. 6614 - */ 6615 5757 static struct dasd_discipline dasd_eckd_discipline = { 6616 5758 .owner = THIS_MODULE, 6617 5759 .name = "ECKD", 6618 5760 .ebcname = "ECKD", 6619 - .max_blocks = 190, 6620 5761 .check_device = dasd_eckd_check_characteristics, 6621 5762 .uncheck_device = dasd_eckd_uncheck_device, 6622 5763 .do_analysis = dasd_eckd_do_analysis, ··· 6610 5779 .basic_to_ready = dasd_eckd_basic_to_ready, 6611 5780 .online_to_ready = dasd_eckd_online_to_ready, 6612 5781 .basic_to_known = dasd_eckd_basic_to_known, 5782 + .setup_blk_queue = dasd_eckd_setup_blk_queue, 6613 5783 .fill_geometry = dasd_eckd_fill_geometry, 6614 5784 .start_IO = dasd_start_IO, 6615 5785 .term_IO = dasd_term_IO, ··· 6638 5806 .disable_hpf = dasd_eckd_disable_hpf_device, 6639 5807 .hpf_enabled = dasd_eckd_hpf_enabled, 6640 5808 .reset_path = dasd_eckd_reset_path, 5809 + .is_ese = dasd_eckd_is_ese, 5810 + .space_allocated = dasd_eckd_space_allocated, 5811 + .space_configured = dasd_eckd_space_configured, 5812 + .logical_capacity = dasd_eckd_logical_capacity, 5813 + .release_space = dasd_eckd_release_space, 5814 + .ext_pool_id = dasd_eckd_ext_pool_id, 5815 + .ext_size = dasd_eckd_ext_size, 5816 + .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, 5817 + .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, 5818 + .ext_pool_oos = dasd_eckd_ext_pool_oos, 5819 + .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, 5820 + .ese_format = dasd_eckd_ese_format, 5821 + .ese_read = dasd_eckd_ese_read, 6641 5822 }; 6642 5823 6643 5824 static int __init ··· 6663 5818 GFP_KERNEL | GFP_DMA); 6664 5819 if (!dasd_reserve_req) 6665 5820 return -ENOMEM; 5821 + dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), 5822 + GFP_KERNEL | GFP_DMA); 5823 + if (!dasd_vol_info_req) 5824 + return -ENOMEM; 6666 5825 path_verification_worker = kmalloc(sizeof(*path_verification_worker), 6667 5826 GFP_KERNEL | GFP_DMA); 6668 5827 if (!path_verification_worker) { 6669 5828 kfree(dasd_reserve_req); 5829 + kfree(dasd_vol_info_req); 6670 5830 return -ENOMEM; 6671 5831 } 6672 5832 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 6673 5833 if (!rawpadpage) { 6674 5834 kfree(path_verification_worker); 6675 5835 kfree(dasd_reserve_req); 5836 + kfree(dasd_vol_info_req); 6676 5837 return -ENOMEM; 6677 5838 } 6678 5839 ret = ccw_driver_register(&dasd_eckd_driver); ··· 6687 5836 else { 6688 5837 kfree(path_verification_worker); 6689 5838 kfree(dasd_reserve_req); 5839 + kfree(dasd_vol_info_req); 6690 5840 free_page((unsigned long)rawpadpage); 6691 5841 } 6692 5842 return ret;
+150 -29
drivers/s390/block/dasd_eckd.h
··· 50 50 #define DASD_ECKD_CCW_PFX_READ 0xEA 51 51 #define DASD_ECKD_CCW_RSCK 0xF9 52 52 #define DASD_ECKD_CCW_RCD 0xFA 53 + #define DASD_ECKD_CCW_DSO 0xF7 54 + 55 + /* Define Subssystem Function / Orders */ 56 + #define DSO_ORDER_RAS 0x81 57 + 58 + /* 59 + * Perform Subsystem Function / Orders 60 + */ 61 + #define PSF_ORDER_PRSSD 0x18 62 + #define PSF_ORDER_CUIR_RESPONSE 0x1A 63 + #define PSF_ORDER_SSC 0x1D 53 64 54 65 /* 55 66 * Perform Subsystem Function / Sub-Orders 56 67 */ 57 - #define PSF_ORDER_PRSSD 0x18 58 - #define PSF_ORDER_CUIR_RESPONSE 0x1A 59 - #define PSF_SUBORDER_QHA 0x1C 60 - #define PSF_ORDER_SSC 0x1D 68 + #define PSF_SUBORDER_QHA 0x1C /* Query Host Access */ 69 + #define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */ 70 + #define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */ 61 71 62 72 /* 63 73 * CUIR response condition codes ··· 90 80 #define CUIR_RESUME 0x02 91 81 92 82 /* 83 + * Out-of-space (OOS) Codes 84 + */ 85 + #define REPO_WARN 0x01 86 + #define REPO_EXHAUST 0x02 87 + #define POOL_WARN 0x03 88 + #define POOL_EXHAUST 0x04 89 + #define REPO_RELIEVE 0x05 90 + #define POOL_RELIEVE 0x06 91 + 92 + /* 93 93 * attention message definitions 94 94 */ 95 95 #define ATTENTION_LENGTH_CUIR 0x0e 96 96 #define ATTENTION_FORMAT_CUIR 0x01 97 + #define ATTENTION_LENGTH_OOS 0x10 98 + #define ATTENTION_FORMAT_OOS 0x06 97 99 98 100 #define DASD_ECKD_PG_GROUPED 0x10 99 101 ··· 120 98 121 99 #define DASD_ECKD_PATH_THRHLD 256 122 100 #define DASD_ECKD_PATH_INTERVAL 300 101 + 102 + /* 103 + * Maximum number of blocks to be chained 104 + */ 105 + #define DASD_ECKD_MAX_BLOCKS 190 106 + #define DASD_ECKD_MAX_BLOCKS_RAW 256 123 107 124 108 /***************************************************************************** 125 109 * SECTION: Type Definitions ··· 144 116 __u16 head; 145 117 } __attribute__ ((packed)); 146 118 147 - struct chs_t { 148 - __u16 cyl; 149 - __u16 head; 150 - __u32 sector; 151 - } __attribute__ ((packed)); 152 - 153 119 struct chr_t { 154 120 __u16 cyl; 155 121 __u16 head; 156 122 __u8 record; 157 - } __attribute__ ((packed)); 158 - 159 - struct geom_t { 160 - __u16 cyl; 161 - __u16 head; 162 - __u32 sector; 163 - } __attribute__ ((packed)); 164 - 165 - struct eckd_home { 166 - __u8 skip_control[14]; 167 - __u16 cell_number; 168 - __u8 physical_addr[3]; 169 - __u8 flag; 170 - struct ch_t track_addr; 171 - __u8 reserved; 172 - __u8 key_length; 173 - __u8 reserved2[2]; 174 123 } __attribute__ ((packed)); 175 124 176 125 struct DE_eckd_data { ··· 392 387 char messages[4087]; 393 388 } __packed; 394 389 390 + /* 391 + * Read Subsystem Data - Volume Storage Query 392 + */ 393 + struct dasd_rssd_vsq { 394 + struct { 395 + __u8 tse:1; 396 + __u8 space_not_available:1; 397 + __u8 ese:1; 398 + __u8 unused:5; 399 + } __packed vol_info; 400 + __u8 unused1; 401 + __u16 extent_pool_id; 402 + __u8 warn_cap_limit; 403 + __u8 warn_cap_guaranteed; 404 + __u16 unused2; 405 + __u32 limit_capacity; 406 + __u32 guaranteed_capacity; 407 + __u32 space_allocated; 408 + __u32 space_configured; 409 + __u32 logical_capacity; 410 + } __packed; 411 + 412 + /* 413 + * Extent Pool Summary 414 + */ 415 + struct dasd_ext_pool_sum { 416 + __u16 pool_id; 417 + __u8 repo_warn_thrshld; 418 + __u8 warn_thrshld; 419 + struct { 420 + __u8 type:1; /* 0 - CKD / 1 - FB */ 421 + __u8 track_space_efficient:1; 422 + __u8 extent_space_efficient:1; 423 + __u8 standard_volume:1; 424 + __u8 extent_size_valid:1; 425 + __u8 capacity_at_warnlevel:1; 426 + __u8 pool_oos:1; 427 + __u8 unused0:1; 428 + __u8 unused1; 429 + } __packed flags; 430 + struct { 431 + __u8 reserved0:1; 432 + __u8 size_1G:1; 433 + __u8 reserved1:5; 434 + __u8 size_16M:1; 435 + } __packed extent_size; 436 + __u8 unused; 437 + } __packed; 438 + 439 + /* 440 + * Read Subsystem Data-Response - Logical Configuration Query - Header 441 + */ 442 + struct dasd_rssd_lcq { 443 + __u16 data_length; /* Length of data returned */ 444 + __u16 pool_count; /* Count of extent pools returned - Max: 448 */ 445 + struct { 446 + __u8 pool_info_valid:1; /* Detailed Information valid */ 447 + __u8 pool_id_volume:1; 448 + __u8 pool_id_cec:1; 449 + __u8 unused0:5; 450 + __u8 unused1; 451 + } __packed header_flags; 452 + char sfi_type[6]; /* Storage Facility Image Type (EBCDIC) */ 453 + char sfi_model[3]; /* Storage Facility Image Model (EBCDIC) */ 454 + __u8 sfi_seq_num[10]; /* Storage Facility Image Sequence Number */ 455 + __u8 reserved[7]; 456 + struct dasd_ext_pool_sum ext_pool_sum[448]; 457 + } __packed; 458 + 459 + struct dasd_oos_message { 460 + __u16 length; 461 + __u8 format; 462 + __u8 code; 463 + __u8 percentage_empty; 464 + __u8 reserved; 465 + __u16 ext_pool_id; 466 + __u16 token; 467 + __u8 unused[6]; 468 + } __packed; 469 + 395 470 struct dasd_cuir_message { 396 471 __u16 length; 397 472 __u8 format; ··· 545 460 unsigned char suborder; 546 461 unsigned char reserved[59]; 547 462 } __attribute__((packed)); 463 + 464 + /* Maximum number of extents for a single Release Allocated Space command */ 465 + #define DASD_ECKD_RAS_EXTS_MAX 110U 466 + 467 + struct dasd_dso_ras_ext_range { 468 + struct ch_t beg_ext; 469 + struct ch_t end_ext; 470 + } __packed; 471 + 472 + /* 473 + * Define Subsytem Operation - Release Allocated Space 474 + */ 475 + struct dasd_dso_ras_data { 476 + __u8 order; 477 + struct { 478 + __u8 message:1; /* Must be zero */ 479 + __u8 reserved1:2; 480 + __u8 vol_type:1; /* 0 - CKD/FBA, 1 - FB */ 481 + __u8 reserved2:4; 482 + } __packed flags; 483 + /* Operation Flags to specify scope */ 484 + struct { 485 + __u8 reserved1:2; 486 + /* Release Space by Extent */ 487 + __u8 by_extent:1; /* 0 - entire volume, 1 - specified extents */ 488 + __u8 guarantee_init:1; 489 + __u8 force_release:1; /* Internal - will be ignored */ 490 + __u16 reserved2:11; 491 + } __packed op_flags; 492 + __u8 lss; 493 + __u8 dev_addr; 494 + __u32 reserved1; 495 + __u8 reserved2[10]; 496 + __u16 nr_exts; /* Defines number of ext_scope - max 110 */ 497 + __u16 reserved3; 498 + } __packed; 548 499 549 500 550 501 /* ··· 672 551 int uses_cdl; 673 552 struct attrib_data_t attrib; /* e.g. cache operations */ 674 553 struct dasd_rssd_features features; 554 + struct dasd_rssd_vsq vsq; 555 + struct dasd_ext_pool_sum eps; 675 556 u32 real_cyl; 676 557 677 558 /* alias managemnet */ ··· 695 572 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 696 573 void dasd_alias_handle_summary_unit_check(struct work_struct *); 697 574 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 698 - void dasd_alias_lcu_setup_complete(struct dasd_device *); 699 - void dasd_alias_wait_for_lcu_setup(struct dasd_device *); 700 575 int dasd_alias_update_add_device(struct dasd_device *); 701 576 #endif /* DASD_ECKD_H */
+1
drivers/s390/block/dasd_eer.c
··· 386 386 dasd_eer_write_standard_trigger(device, cqr, id); 387 387 break; 388 388 case DASD_EER_NOPATH: 389 + case DASD_EER_NOSPC: 389 390 dasd_eer_write_standard_trigger(device, NULL, id); 390 391 break; 391 392 case DASD_EER_STATECHANGE:
+32 -13
drivers/s390/block/dasd_fba.c
··· 770 770 } 771 771 772 772 /* 773 - * max_blocks is dependent on the amount of storage that is available 774 - * in the static io buffer for each device. Currently each device has 775 - * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 776 - * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 777 - * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 778 - * addition we have one define extent ccw + 16 bytes of data and a 779 - * locate record ccw for each block (stupid devices!) + 16 bytes of data. 780 - * That makes: 781 - * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum. 782 - * We want to fit two into the available memory so that we can immediately 783 - * start the next request if one finishes off. That makes 100.1 blocks 784 - * for one request. Give a little safety and the result is 96. 773 + * Initialize block layer request queue. 785 774 */ 775 + static void dasd_fba_setup_blk_queue(struct dasd_block *block) 776 + { 777 + unsigned int logical_block_size = block->bp_block; 778 + struct request_queue *q = block->request_queue; 779 + unsigned int max_bytes, max_discard_sectors; 780 + int max; 781 + 782 + max = DASD_FBA_MAX_BLOCKS << block->s2b_shift; 783 + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 784 + q->limits.max_dev_sectors = max; 785 + blk_queue_logical_block_size(q, logical_block_size); 786 + blk_queue_max_hw_sectors(q, max); 787 + blk_queue_max_segments(q, USHRT_MAX); 788 + /* With page sized segments each segment can be translated into one idaw/tidaw */ 789 + blk_queue_max_segment_size(q, PAGE_SIZE); 790 + blk_queue_segment_boundary(q, PAGE_SIZE - 1); 791 + 792 + q->limits.discard_granularity = logical_block_size; 793 + q->limits.discard_alignment = PAGE_SIZE; 794 + 795 + /* Calculate max_discard_sectors and make it PAGE aligned */ 796 + max_bytes = USHRT_MAX * logical_block_size; 797 + max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE); 798 + max_discard_sectors = max_bytes / logical_block_size; 799 + 800 + blk_queue_max_discard_sectors(q, max_discard_sectors); 801 + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); 802 + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 803 + } 804 + 786 805 static struct dasd_discipline dasd_fba_discipline = { 787 806 .owner = THIS_MODULE, 788 807 .name = "FBA ", 789 808 .ebcname = "FBA ", 790 - .max_blocks = 96, 791 809 .check_device = dasd_fba_check_characteristics, 792 810 .do_analysis = dasd_fba_do_analysis, 793 811 .verify_path = dasd_generic_verify_path, 812 + .setup_blk_queue = dasd_fba_setup_blk_queue, 794 813 .fill_geometry = dasd_fba_fill_geometry, 795 814 .start_IO = dasd_start_IO, 796 815 .term_IO = dasd_term_IO,
+5
drivers/s390/block/dasd_fba.h
··· 9 9 #ifndef DASD_FBA_H 10 10 #define DASD_FBA_H 11 11 12 + /* 13 + * Maximum number of blocks to be chained 14 + */ 15 + #define DASD_FBA_MAX_BLOCKS 96 16 + 12 17 struct DE_fba_data { 13 18 struct { 14 19 unsigned char perm:2; /* Permissions on this extent */
+32 -1
drivers/s390/block/dasd_int.h
··· 268 268 struct module *owner; 269 269 char ebcname[8]; /* a name used for tagging and printks */ 270 270 char name[8]; /* a name used for tagging and printks */ 271 - int max_blocks; /* maximum number of blocks to be chained */ 272 271 273 272 struct list_head list; /* used for list of disciplines */ 274 273 ··· 306 307 int (*online_to_ready) (struct dasd_device *); 307 308 int (*basic_to_known)(struct dasd_device *); 308 309 310 + /* 311 + * Initialize block layer request queue. 312 + */ 313 + void (*setup_blk_queue)(struct dasd_block *); 309 314 /* (struct dasd_device *); 310 315 * Device operation functions. build_cp creates a ccw chain for 311 316 * a block device request, start_io starts the request and ··· 370 367 void (*disable_hpf)(struct dasd_device *); 371 368 int (*hpf_enabled)(struct dasd_device *); 372 369 void (*reset_path)(struct dasd_device *, __u8); 370 + 371 + /* 372 + * Extent Space Efficient (ESE) relevant functions 373 + */ 374 + int (*is_ese)(struct dasd_device *); 375 + /* Capacity */ 376 + int (*space_allocated)(struct dasd_device *); 377 + int (*space_configured)(struct dasd_device *); 378 + int (*logical_capacity)(struct dasd_device *); 379 + int (*release_space)(struct dasd_device *, struct format_data_t *); 380 + /* Extent Pool */ 381 + int (*ext_pool_id)(struct dasd_device *); 382 + int (*ext_size)(struct dasd_device *); 383 + int (*ext_pool_cap_at_warnlevel)(struct dasd_device *); 384 + int (*ext_pool_warn_thrshld)(struct dasd_device *); 385 + int (*ext_pool_oos)(struct dasd_device *); 386 + int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); 387 + struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *); 388 + void (*ese_read)(struct dasd_ccw_req *); 373 389 }; 374 390 375 391 extern struct dasd_discipline *dasd_diag_discipline_pointer; ··· 408 386 #define DASD_EER_NOPATH 2 409 387 #define DASD_EER_STATECHANGE 3 410 388 #define DASD_EER_PPRCSUSPEND 4 389 + #define DASD_EER_NOSPC 5 411 390 412 391 /* DASD path handling */ 413 392 ··· 505 482 spinlock_t mem_lock; 506 483 void *ccw_mem; 507 484 void *erp_mem; 485 + void *ese_mem; 508 486 struct list_head ccw_chunks; 509 487 struct list_head erp_chunks; 488 + struct list_head ese_chunks; 510 489 511 490 atomic_t tasklet_scheduled; 512 491 struct tasklet_struct tasklet; ··· 583 558 #define DASD_STOPPED_SU 16 /* summary unit check handling */ 584 559 #define DASD_STOPPED_PM 32 /* pm state transition */ 585 560 #define DASD_UNRESUMED_PM 64 /* pm resume failed state */ 561 + #define DASD_STOPPED_NOSPC 128 /* no space left */ 586 562 587 563 /* per device flags */ 588 564 #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ ··· 726 700 727 701 struct dasd_ccw_req * 728 702 dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *); 703 + struct dasd_ccw_req *dasd_fmalloc_request(int, int, int, struct dasd_device *); 729 704 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 705 + void dasd_ffree_request(struct dasd_ccw_req *, struct dasd_device *); 730 706 void dasd_wakeup_cb(struct dasd_ccw_req *, void *); 731 707 732 708 struct dasd_device *dasd_alloc_device(void); ··· 755 727 int dasd_sleep_on(struct dasd_ccw_req *); 756 728 int dasd_sleep_on_queue(struct list_head *); 757 729 int dasd_sleep_on_immediatly(struct dasd_ccw_req *); 730 + int dasd_sleep_on_queue_interruptible(struct list_head *); 758 731 int dasd_sleep_on_interruptible(struct dasd_ccw_req *); 759 732 void dasd_device_set_timer(struct dasd_device *, int); 760 733 void dasd_device_clear_timer(struct dasd_device *); ··· 779 750 enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); 780 751 void dasd_generic_path_event(struct ccw_device *, int *); 781 752 int dasd_generic_verify_path(struct dasd_device *, __u8); 753 + void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *); 754 + void dasd_generic_space_avail(struct dasd_device *); 782 755 783 756 int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 784 757 char *dasd_get_sense(struct irb *);
+56
drivers/s390/block/dasd_ioctl.c
··· 333 333 return rc; 334 334 } 335 335 336 + static int dasd_release_space(struct dasd_device *device, 337 + struct format_data_t *rdata) 338 + { 339 + if (!device->discipline->is_ese && !device->discipline->is_ese(device)) 340 + return -ENOTSUPP; 341 + if (!device->discipline->release_space) 342 + return -ENOTSUPP; 343 + 344 + return device->discipline->release_space(device, rdata); 345 + } 346 + 347 + /* 348 + * Release allocated space 349 + */ 350 + static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp) 351 + { 352 + struct format_data_t rdata; 353 + struct dasd_device *base; 354 + int rc = 0; 355 + 356 + if (!capable(CAP_SYS_ADMIN)) 357 + return -EACCES; 358 + if (!argp) 359 + return -EINVAL; 360 + 361 + base = dasd_device_from_gendisk(bdev->bd_disk); 362 + if (!base) 363 + return -ENODEV; 364 + if (base->features & DASD_FEATURE_READONLY || 365 + test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) { 366 + rc = -EROFS; 367 + goto out_err; 368 + } 369 + if (bdev != bdev->bd_contains) { 370 + pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n", 371 + dev_name(&base->cdev->dev)); 372 + rc = -EINVAL; 373 + goto out_err; 374 + } 375 + 376 + if (copy_from_user(&rdata, argp, sizeof(rdata))) { 377 + rc = -EFAULT; 378 + goto out_err; 379 + } 380 + 381 + rc = dasd_release_space(base, &rdata); 382 + 383 + out_err: 384 + dasd_put_device(base); 385 + 386 + return rc; 387 + } 388 + 336 389 #ifdef CONFIG_DASD_PROFILE 337 390 /* 338 391 * Reset device profile information ··· 647 594 break; 648 595 case BIODASDREADALLCMB: 649 596 rc = dasd_ioctl_readall_cmb(block, cmd, argp); 597 + break; 598 + case BIODASDRAS: 599 + rc = dasd_ioctl_release_space(bdev, argp); 650 600 break; 651 601 default: 652 602 /* if the discipline has an ioctl method try it. */
-1
drivers/s390/char/sclp_early.c
··· 41 41 sclp.has_hvs = !!(sccb->fac119 & 0x80); 42 42 sclp.has_kss = !!(sccb->fac98 & 0x01); 43 43 sclp.has_sipl = !!(sccb->cbl & 0x02); 44 - sclp.has_sipl_g2 = !!(sccb->cbl & 0x04); 45 44 if (sccb->fac85 & 0x02) 46 45 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 47 46 if (sccb->fac91 & 0x40)
-1
drivers/s390/crypto/ap_bus.c
··· 208 208 return -EINVAL; 209 209 return ap_qci(info); 210 210 } 211 - EXPORT_SYMBOL(ap_query_configuration); 212 211 213 212 /** 214 213 * ap_init_configuration(): Allocate and query configuration array.
+1 -2
drivers/s390/crypto/vfio_ap_ops.c
··· 115 115 * Unregisters the ISC in the GIB when the saved ISC not invalid. 116 116 * Unpin the guest's page holding the NIB when it exist. 117 117 * Reset the saved_pfn and saved_isc to invalid values. 118 - * Clear the pointer to the matrix mediated device. 119 118 * 120 119 */ 121 120 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) ··· 126 127 &q->saved_pfn, 1); 127 128 q->saved_pfn = 0; 128 129 q->saved_isc = VFIO_AP_ISC_INVALID; 129 - q->matrix_mdev = NULL; 130 130 } 131 131 132 132 /** ··· 177 179 status.response_code); 178 180 end_free: 179 181 vfio_ap_free_aqic_resources(q); 182 + q->matrix_mdev = NULL; 180 183 return status; 181 184 } 182 185