Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
"Several last minute bug fixes.

Two of them are on the larger side for rc7, the dasd format patch for
older storage devices and the store-clock-fast patch where we have
been to optimistic with an optimization"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/time: correct use of store clock fast
s390/vmlogrdr: fix array access in vmlogrdr_open()
s390/compat,signal: fix return value of copy_siginfo_(to|from)_user32()
s390/dasd: check for availability of prefix command during format
s390/mm,kvm: fix software dirty bits vs. kvm for old machines

Changed files
+111 -65
arch
s390
drivers
+3 -1
arch/s390/include/asm/pgtable.h
··· 748 748 749 749 static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 750 750 { 751 - if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { 751 + if (!MACHINE_HAS_ESOP && 752 + (pte_val(entry) & _PAGE_PRESENT) && 753 + (pte_val(entry) & _PAGE_WRITE)) { 752 754 /* 753 755 * Without enhanced suppression-on-protection force 754 756 * the dirty bit on for all writable ptes.
+14 -14
arch/s390/include/asm/timex.h
··· 71 71 72 72 typedef unsigned long long cycles_t; 73 73 74 - static inline unsigned long long get_tod_clock(void) 75 - { 76 - unsigned long long clk; 77 - 78 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 79 - asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); 80 - #else 81 - asm volatile("stck %0" : "=Q" (clk) : : "cc"); 82 - #endif 83 - return clk; 84 - } 85 - 86 74 static inline void get_tod_clock_ext(char *clk) 87 75 { 88 76 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 89 77 } 90 78 91 - static inline unsigned long long get_tod_clock_xt(void) 79 + static inline unsigned long long get_tod_clock(void) 92 80 { 93 81 unsigned char clk[16]; 94 82 get_tod_clock_ext(clk); 95 83 return *((unsigned long long *)&clk[1]); 84 + } 85 + 86 + static inline unsigned long long get_tod_clock_fast(void) 87 + { 88 + #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 89 + unsigned long long clk; 90 + 91 + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 92 + return clk; 93 + #else 94 + return get_tod_clock(); 95 + #endif 96 96 } 97 97 98 98 static inline cycles_t get_cycles(void) ··· 125 125 */ 126 126 static inline unsigned long long get_tod_clock_monotonic(void) 127 127 { 128 - return get_tod_clock_xt() - sched_clock_base_cc; 128 + return get_tod_clock() - sched_clock_base_cc; 129 129 } 130 130 131 131 /**
+2 -2
arch/s390/kernel/compat_signal.c
··· 99 99 break; 100 100 } 101 101 } 102 - return err; 102 + return err ? -EFAULT : 0; 103 103 } 104 104 105 105 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ··· 148 148 break; 149 149 } 150 150 } 151 - return err; 151 + return err ? -EFAULT : 0; 152 152 } 153 153 154 154 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
+1 -1
arch/s390/kernel/debug.c
··· 867 867 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 868 868 int exception) 869 869 { 870 - active->id.stck = get_tod_clock(); 870 + active->id.stck = get_tod_clock_fast(); 871 871 active->id.fields.cpuid = smp_processor_id(); 872 872 active->caller = __builtin_return_address(0); 873 873 active->id.fields.exception = exception;
+3 -3
arch/s390/kvm/interrupt.c
··· 385 385 } 386 386 387 387 if ((!rc) && (vcpu->arch.sie_block->ckc < 388 - get_tod_clock() + vcpu->arch.sie_block->epoch)) { 388 + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { 389 389 if ((!psw_extint_disabled(vcpu)) && 390 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 391 391 rc = 1; ··· 425 425 goto no_timer; 426 426 } 427 427 428 - now = get_tod_clock() + vcpu->arch.sie_block->epoch; 428 + now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; 429 429 if (vcpu->arch.sie_block->ckc < now) { 430 430 __unset_cpu_idle(vcpu); 431 431 return 0; ··· 515 515 } 516 516 517 517 if ((vcpu->arch.sie_block->ckc < 518 - get_tod_clock() + vcpu->arch.sie_block->epoch)) 518 + get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) 519 519 __try_deliver_ckc_interrupt(vcpu); 520 520 521 521 if (atomic_read(&fi->active)) {
+7 -7
arch/s390/lib/delay.c
··· 44 44 do { 45 45 set_clock_comparator(end); 46 46 vtime_stop_cpu(); 47 - } while (get_tod_clock() < end); 47 + } while (get_tod_clock_fast() < end); 48 48 lockdep_on(); 49 49 __ctl_load(cr0, 0, 0); 50 50 __ctl_load(cr6, 6, 6); ··· 55 55 { 56 56 u64 clock_saved, end; 57 57 58 - end = get_tod_clock() + (usecs << 12); 58 + end = get_tod_clock_fast() + (usecs << 12); 59 59 do { 60 60 clock_saved = 0; 61 61 if (end < S390_lowcore.clock_comparator) { ··· 65 65 vtime_stop_cpu(); 66 66 if (clock_saved) 67 67 local_tick_enable(clock_saved); 68 - } while (get_tod_clock() < end); 68 + } while (get_tod_clock_fast() < end); 69 69 } 70 70 71 71 /* ··· 109 109 { 110 110 u64 end; 111 111 112 - end = get_tod_clock() + (usecs << 12); 113 - while (get_tod_clock() < end) 112 + end = get_tod_clock_fast() + (usecs << 12); 113 + while (get_tod_clock_fast() < end) 114 114 cpu_relax(); 115 115 } 116 116 ··· 120 120 121 121 nsecs <<= 9; 122 122 do_div(nsecs, 125); 123 - end = get_tod_clock() + nsecs; 123 + end = get_tod_clock_fast() + nsecs; 124 124 if (nsecs & ~0xfffUL) 125 125 __udelay(nsecs >> 12); 126 - while (get_tod_clock() < end) 126 + while (get_tod_clock_fast() < end) 127 127 barrier(); 128 128 } 129 129 EXPORT_SYMBOL(__ndelay);
+71 -27
drivers/s390/block/dasd_eckd.c
··· 2077 2077 int intensity = 0; 2078 2078 int r0_perm; 2079 2079 int nr_tracks; 2080 + int use_prefix; 2080 2081 2081 2082 startdev = dasd_alias_get_start_dev(base); 2082 2083 if (!startdev) ··· 2107 2106 intensity = fdata->intensity; 2108 2107 } 2109 2108 2109 + use_prefix = base_priv->features.feature[8] & 0x01; 2110 + 2110 2111 switch (intensity) { 2111 2112 case 0x00: /* Normal format */ 2112 2113 case 0x08: /* Normal format, use cdl. */ 2113 2114 cplength = 2 + (rpt*nr_tracks); 2114 - datasize = sizeof(struct PFX_eckd_data) + 2115 - sizeof(struct LO_eckd_data) + 2116 - rpt * nr_tracks * sizeof(struct eckd_count); 2115 + if (use_prefix) 2116 + datasize = sizeof(struct PFX_eckd_data) + 2117 + sizeof(struct LO_eckd_data) + 2118 + rpt * nr_tracks * sizeof(struct eckd_count); 2119 + else 2120 + datasize = sizeof(struct DE_eckd_data) + 2121 + sizeof(struct LO_eckd_data) + 2122 + rpt * nr_tracks * sizeof(struct eckd_count); 2117 2123 break; 2118 2124 case 0x01: /* Write record zero and format track. */ 2119 2125 case 0x09: /* Write record zero and format track, use cdl. */ 2120 2126 cplength = 2 + rpt * nr_tracks; 2121 - datasize = sizeof(struct PFX_eckd_data) + 2122 - sizeof(struct LO_eckd_data) + 2123 - sizeof(struct eckd_count) + 2124 - rpt * nr_tracks * sizeof(struct eckd_count); 2127 + if (use_prefix) 2128 + datasize = sizeof(struct PFX_eckd_data) + 2129 + sizeof(struct LO_eckd_data) + 2130 + sizeof(struct eckd_count) + 2131 + rpt * nr_tracks * sizeof(struct eckd_count); 2132 + else 2133 + datasize = sizeof(struct DE_eckd_data) + 2134 + sizeof(struct LO_eckd_data) + 2135 + sizeof(struct eckd_count) + 2136 + rpt * nr_tracks * sizeof(struct eckd_count); 2125 2137 break; 2126 2138 case 0x04: /* Invalidate track. */ 2127 2139 case 0x0c: /* Invalidate track, use cdl. */ 2128 2140 cplength = 3; 2129 - datasize = sizeof(struct PFX_eckd_data) + 2130 - sizeof(struct LO_eckd_data) + 2131 - sizeof(struct eckd_count); 2141 + if (use_prefix) 2142 + datasize = sizeof(struct PFX_eckd_data) + 2143 + sizeof(struct LO_eckd_data) + 2144 + sizeof(struct eckd_count); 2145 + else 2146 + datasize = sizeof(struct DE_eckd_data) + 2147 + sizeof(struct LO_eckd_data) + 2148 + sizeof(struct eckd_count); 2132 2149 break; 2133 2150 default: 2134 2151 dev_warn(&startdev->cdev->dev, ··· 2166 2147 2167 2148 switch (intensity & ~0x08) { 2168 2149 case 0x00: /* Normal format. */ 2169 - prefix(ccw++, (struct PFX_eckd_data *) data, 2170 - fdata->start_unit, fdata->stop_unit, 2171 - DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2172 - /* grant subsystem permission to format R0 */ 2173 - if (r0_perm) 2174 - ((struct PFX_eckd_data *)data) 2175 - ->define_extent.ga_extended |= 0x04; 2176 - data += sizeof(struct PFX_eckd_data); 2150 + if (use_prefix) { 2151 + prefix(ccw++, (struct PFX_eckd_data *) data, 2152 + fdata->start_unit, fdata->stop_unit, 2153 + DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2154 + /* grant subsystem permission to format R0 */ 2155 + if (r0_perm) 2156 + ((struct PFX_eckd_data *)data) 2157 + ->define_extent.ga_extended |= 0x04; 2158 + data += sizeof(struct PFX_eckd_data); 2159 + } else { 2160 + define_extent(ccw++, (struct DE_eckd_data *) data, 2161 + fdata->start_unit, fdata->stop_unit, 2162 + DASD_ECKD_CCW_WRITE_CKD, startdev); 2163 + /* grant subsystem permission to format R0 */ 2164 + if (r0_perm) 2165 + ((struct DE_eckd_data *) data) 2166 + ->ga_extended |= 0x04; 2167 + data += sizeof(struct DE_eckd_data); 2168 + } 2177 2169 ccw[-1].flags |= CCW_FLAG_CC; 2178 2170 locate_record(ccw++, (struct LO_eckd_data *) data, 2179 2171 fdata->start_unit, 0, rpt*nr_tracks, ··· 2193 2163 data += sizeof(struct LO_eckd_data); 2194 2164 break; 2195 2165 case 0x01: /* Write record zero + format track. */ 2196 - prefix(ccw++, (struct PFX_eckd_data *) data, 2197 - fdata->start_unit, fdata->stop_unit, 2198 - DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2199 - base, startdev); 2200 - data += sizeof(struct PFX_eckd_data); 2166 + if (use_prefix) { 2167 + prefix(ccw++, (struct PFX_eckd_data *) data, 2168 + fdata->start_unit, fdata->stop_unit, 2169 + DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2170 + base, startdev); 2171 + data += sizeof(struct PFX_eckd_data); 2172 + } else { 2173 + define_extent(ccw++, (struct DE_eckd_data *) data, 2174 + fdata->start_unit, fdata->stop_unit, 2175 + DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev); 2176 + data += sizeof(struct DE_eckd_data); 2177 + } 2201 2178 ccw[-1].flags |= CCW_FLAG_CC; 2202 2179 locate_record(ccw++, (struct LO_eckd_data *) data, 2203 2180 fdata->start_unit, 0, rpt * nr_tracks + 1, ··· 2213 2176 data += sizeof(struct LO_eckd_data); 2214 2177 break; 2215 2178 case 0x04: /* Invalidate track. */ 2216 - prefix(ccw++, (struct PFX_eckd_data *) data, 2217 - fdata->start_unit, fdata->stop_unit, 2218 - DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2219 - data += sizeof(struct PFX_eckd_data); 2179 + if (use_prefix) { 2180 + prefix(ccw++, (struct PFX_eckd_data *) data, 2181 + fdata->start_unit, fdata->stop_unit, 2182 + DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2183 + data += sizeof(struct PFX_eckd_data); 2184 + } else { 2185 + define_extent(ccw++, (struct DE_eckd_data *) data, 2186 + fdata->start_unit, fdata->stop_unit, 2187 + DASD_ECKD_CCW_WRITE_CKD, startdev); 2188 + data += sizeof(struct DE_eckd_data); 2189 + } 2220 2190 ccw[-1].flags |= CCW_FLAG_CC; 2221 2191 locate_record(ccw++, (struct LO_eckd_data *) data, 2222 2192 fdata->start_unit, 0, 1,
+2 -2
drivers/s390/char/sclp.c
··· 486 486 timeout = 0; 487 487 if (timer_pending(&sclp_request_timer)) { 488 488 /* Get timeout TOD value */ 489 - timeout = get_tod_clock() + 489 + timeout = get_tod_clock_fast() + 490 490 sclp_tod_from_jiffies(sclp_request_timer.expires - 491 491 jiffies); 492 492 } ··· 508 508 while (sclp_running_state != sclp_running_state_idle) { 509 509 /* Check for expired request timer */ 510 510 if (timer_pending(&sclp_request_timer) && 511 - get_tod_clock() > timeout && 511 + get_tod_clock_fast() > timeout && 512 512 del_timer(&sclp_request_timer)) 513 513 sclp_request_timer.function(sclp_request_timer.data); 514 514 cpu_relax();
+1 -1
drivers/s390/char/vmlogrdr.c
··· 313 313 int ret; 314 314 315 315 dev_num = iminor(inode); 316 - if (dev_num > MAXMINOR) 316 + if (dev_num >= MAXMINOR) 317 317 return -ENODEV; 318 318 logptr = &sys_ser[dev_num]; 319 319
+2 -2
drivers/s390/cio/cio.c
··· 878 878 atomic_inc(&chpid_reset_count); 879 879 } 880 880 /* Wait for machine check for all channel paths. */ 881 - timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 + timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12); 882 882 while (atomic_read(&chpid_reset_count) != 0) { 883 - if (get_tod_clock() > timeout) 883 + if (get_tod_clock_fast() > timeout) 884 884 break; 885 885 cpu_relax(); 886 886 }
+5 -5
drivers/s390/cio/qdio_main.c
··· 338 338 retries++; 339 339 340 340 if (!start_time) { 341 - start_time = get_tod_clock(); 341 + start_time = get_tod_clock_fast(); 342 342 goto again; 343 343 } 344 - if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 + if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) 345 345 goto again; 346 346 } 347 347 if (retries) { ··· 504 504 int count, stop; 505 505 unsigned char state = 0; 506 506 507 - q->timestamp = get_tod_clock(); 507 + q->timestamp = get_tod_clock_fast(); 508 508 509 509 /* 510 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved ··· 595 595 * At this point we know, that inbound first_to_check 596 596 * has (probably) not moved (see qdio_inbound_processing). 597 597 */ 598 - if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 + if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 599 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 600 600 q->first_to_check); 601 601 return 1; ··· 728 728 int count, stop; 729 729 unsigned char state = 0; 730 730 731 - q->timestamp = get_tod_clock(); 731 + q->timestamp = get_tod_clock_fast(); 732 732 733 733 if (need_siga_sync(q)) 734 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&