Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6:
[S390] Use strim instead of strstrip to avoid false warnings.
[S390] qdio: add counter for input queue full condition
[S390] qdio: remove superfluous log entries and WARN_ONs.
[S390] ptrace: dont abuse PT_PTRACED
[S390] cio: fix channel path vary
[S390] drivers: Correct size given to memset
[S390] tape: Add pr_fmt() macro to all tape source files
[S390] rename NT_PRXSTATUS to NT_S390_HIGHREGS
[S390] tty: PTR_ERR return of wrong pointer in fs3270_open()
[S390] s390: PTR_ERR return of wrong pointer in fallback_init_cip()
[S390] dasd: PTR_ERR return of wrong pointer in
[S390] dasd: move dasd-diag kmsg to dasd
[S390] cio: fix drvdata usage for the console subchannel
[S390] wire up sys_recvmmsg

+95 -62
+1 -1
arch/s390/crypto/aes_s390.c
··· 174 if (IS_ERR(sctx->fallback.cip)) { 175 pr_err("Allocating AES fallback algorithm %s failed\n", 176 name); 177 - return PTR_ERR(sctx->fallback.blk); 178 } 179 180 return 0;
··· 174 if (IS_ERR(sctx->fallback.cip)) { 175 pr_err("Allocating AES fallback algorithm %s failed\n", 176 name); 177 + return PTR_ERR(sctx->fallback.cip); 178 } 179 180 return 0;
+2 -2
arch/s390/hypfs/hypfs_diag.c
··· 164 LPAR_NAME_LEN); 165 EBCASC(name, LPAR_NAME_LEN); 166 name[LPAR_NAME_LEN] = 0; 167 - strstrip(name); 168 } 169 170 struct cpu_info { ··· 523 memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN), 524 CPU_NAME_LEN); 525 name[CPU_NAME_LEN] = 0; 526 - strstrip(name); 527 return 0; 528 } 529
··· 164 LPAR_NAME_LEN); 165 EBCASC(name, LPAR_NAME_LEN); 166 name[LPAR_NAME_LEN] = 0; 167 + strim(name); 168 } 169 170 struct cpu_info { ··· 523 memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN), 524 CPU_NAME_LEN); 525 name[CPU_NAME_LEN] = 0; 526 + strim(name); 527 return 0; 528 } 529
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 124 /* guest dir */ 125 memcpy(guest_name, data->guest_name, NAME_LEN); 126 EBCASC(guest_name, NAME_LEN); 127 - strstrip(guest_name); 128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); 129 if (IS_ERR(guest_dir)) 130 return PTR_ERR(guest_dir);
··· 124 /* guest dir */ 125 memcpy(guest_name, data->guest_name, NAME_LEN); 126 EBCASC(guest_name, NAME_LEN); 127 + strim(guest_name); 128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); 129 if (IS_ERR(guest_dir)) 130 return PTR_ERR(guest_dir);
+2 -1
arch/s390/include/asm/unistd.h
··· 269 #define __NR_pwritev 329 270 #define __NR_rt_tgsigqueueinfo 330 271 #define __NR_perf_event_open 331 272 - #define NR_syscalls 332 273 274 /* 275 * There are some system calls that are not present on 64 bit, some
··· 269 #define __NR_pwritev 329 270 #define __NR_rt_tgsigqueueinfo 330 271 #define __NR_perf_event_open 331 272 + #define __NR_recvmmsg 332 273 + #define NR_syscalls 333 274 275 /* 276 * There are some system calls that are not present on 64 bit, some
+9
arch/s390/kernel/compat_wrapper.S
··· 1853 llgtr %r3,%r3 # compat_uptr_t * 1854 llgtr %r4,%r4 # compat_uptr_t * 1855 jg sys32_execve # branch to system call
··· 1853 llgtr %r3,%r3 # compat_uptr_t * 1854 llgtr %r4,%r4 # compat_uptr_t * 1855 jg sys32_execve # branch to system call 1856 + 1857 + .globl compat_sys_recvmmsg_wrapper 1858 + compat_sys_recvmmsg_wrapper: 1859 + lgfr %r2,%r2 # int 1860 + llgtr %r3,%r3 # struct compat_mmsghdr * 1861 + llgfr %r4,%r4 # unsigned int 1862 + llgfr %r5,%r5 # unsigned int 1863 + llgtr %r6,%r6 # struct compat_timespec * 1864 + jg compat_sys_recvmmsg
+3 -3
arch/s390/kernel/ipl.c
··· 221 const char *buf, size_t len) \ 222 { \ 223 strncpy(_value, buf, sizeof(_value) - 1); \ 224 - strstrip(_value); \ 225 return len; \ 226 } \ 227 static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ ··· 472 return sprintf(page, "#unknown#\n"); 473 memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); 474 EBCASC(loadparm, LOADPARM_LEN); 475 - strstrip(loadparm); 476 return sprintf(page, "%s\n", loadparm); 477 } 478 ··· 776 memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); 777 EBCASC(loadparm, LOADPARM_LEN); 778 loadparm[LOADPARM_LEN] = 0; 779 - strstrip(loadparm); 780 } 781 782 static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
··· 221 const char *buf, size_t len) \ 222 { \ 223 strncpy(_value, buf, sizeof(_value) - 1); \ 224 + strim(_value); \ 225 return len; \ 226 } \ 227 static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ ··· 472 return sprintf(page, "#unknown#\n"); 473 memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); 474 EBCASC(loadparm, LOADPARM_LEN); 475 + strim(loadparm); 476 return sprintf(page, "%s\n", loadparm); 477 } 478 ··· 776 memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); 777 EBCASC(loadparm, LOADPARM_LEN); 778 loadparm[LOADPARM_LEN] = 0; 779 + strim(loadparm); 780 } 781 782 static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
+1 -1
arch/s390/kernel/ptrace.c
··· 959 .set = s390_fpregs_set, 960 }, 961 [REGSET_GENERAL_EXTENDED] = { 962 - .core_note_type = NT_PRXSTATUS, 963 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 964 .size = sizeof(compat_long_t), 965 .align = sizeof(compat_long_t),
··· 959 .set = s390_fpregs_set, 960 }, 961 [REGSET_GENERAL_EXTENDED] = { 962 + .core_note_type = NT_S390_HIGH_GPRS, 963 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 964 .size = sizeof(compat_long_t), 965 .align = sizeof(compat_long_t),
+1
arch/s390/kernel/syscalls.S
··· 340 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 341 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ 342 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
··· 340 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 341 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ 342 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) 343 + SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper)
+3 -3
arch/s390/kernel/traps.c
··· 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 - #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> ··· 382 SIGTRAP) == NOTIFY_STOP){ 383 return; 384 } 385 - if ((current->ptrace & PT_PTRACED) != 0) 386 force_sig(SIGTRAP, current); 387 } 388 ··· 483 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 484 return; 485 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 486 - if (current->ptrace & PT_PTRACED) 487 force_sig(SIGTRAP, current); 488 else 489 signal = SIGILL;
··· 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 + #include <linux/tracehook.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> ··· 382 SIGTRAP) == NOTIFY_STOP){ 383 return; 384 } 385 + if (tracehook_consider_fatal_signal(current, SIGTRAP)) 386 force_sig(SIGTRAP, current); 387 } 388 ··· 483 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 484 return; 485 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 486 + if (tracehook_consider_fatal_signal(current, SIGTRAP)) 487 force_sig(SIGTRAP, current); 488 else 489 signal = SIGILL;
+1 -1
drivers/s390/block/dasd_alias.c
··· 218 spin_unlock_irqrestore(&aliastree.lock, flags); 219 newlcu = _allocate_lcu(uid); 220 if (IS_ERR(newlcu)) 221 - return PTR_ERR(lcu); 222 spin_lock_irqsave(&aliastree.lock, flags); 223 lcu = _find_lcu(server, uid); 224 if (!lcu) {
··· 218 spin_unlock_irqrestore(&aliastree.lock, flags); 219 newlcu = _allocate_lcu(uid); 220 if (IS_ERR(newlcu)) 221 + return PTR_ERR(newlcu); 222 spin_lock_irqsave(&aliastree.lock, flags); 223 lcu = _find_lcu(server, uid); 224 if (!lcu) {
+22 -20
drivers/s390/block/dasd_diag.c
··· 8 * 9 */ 10 11 - #define KMSG_COMPONENT "dasd-diag" 12 13 #include <linux/stddef.h> 14 #include <linux/kernel.h> ··· 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 147 if (rc == 4) { 148 if (!(device->features & DASD_FEATURE_READONLY)) { 149 - dev_warn(&device->cdev->dev, 150 - "The access mode of a DIAG device changed" 151 - " to read-only"); 152 device->features |= DASD_FEATURE_READONLY; 153 } 154 rc = 0; 155 } 156 if (rc) 157 - dev_warn(&device->cdev->dev, "DIAG ERP failed with " 158 - "rc=%d\n", rc); 159 } 160 161 /* Start a given request at the device. Return zero on success, non-zero ··· 371 private->pt_block = 2; 372 break; 373 default: 374 - dev_warn(&device->cdev->dev, "Device type %d is not supported " 375 - "in DIAG mode\n", private->rdc_data.vdev_class); 376 rc = -EOPNOTSUPP; 377 goto out; 378 } ··· 414 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 415 rc = dia250(&private->iob, RW_BIO); 416 if (rc == 3) { 417 - dev_warn(&device->cdev->dev, 418 - "A 64-bit DIAG call failed\n"); 419 rc = -EOPNOTSUPP; 420 goto out_label; 421 } ··· 424 break; 425 } 426 if (bsize > PAGE_SIZE) { 427 - dev_warn(&device->cdev->dev, "Accessing the DASD failed because" 428 - " of an incorrect format (rc=%d)\n", rc); 429 rc = -EIO; 430 goto out_label; 431 } ··· 444 block->s2b_shift++; 445 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 446 if (rc && (rc != 4)) { 447 - dev_warn(&device->cdev->dev, "DIAG initialization " 448 - "failed with rc=%d\n", rc); 449 rc = -EIO; 450 } else { 451 if (rc == 4) 452 device->features |= DASD_FEATURE_READONLY; 453 - dev_info(&device->cdev->dev, 454 - "New DASD with %ld byte/block, total size %ld KB%s\n", 455 - (unsigned long) block->bp_block, 456 - (unsigned long) (block->blocks << 457 - block->s2b_shift) >> 1, 458 - (rc == 4) ? ", read-only device" : ""); 459 rc = 0; 460 } 461 out_label:
··· 8 * 9 */ 10 11 + #define KMSG_COMPONENT "dasd" 12 13 #include <linux/stddef.h> 14 #include <linux/kernel.h> ··· 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 147 if (rc == 4) { 148 if (!(device->features & DASD_FEATURE_READONLY)) { 149 + pr_warning("%s: The access mode of a DIAG device " 150 + "changed to read-only\n", 151 + dev_name(&device->cdev->dev)); 152 device->features |= DASD_FEATURE_READONLY; 153 } 154 rc = 0; 155 } 156 if (rc) 157 + pr_warning("%s: DIAG ERP failed with " 158 + "rc=%d\n", dev_name(&device->cdev->dev), rc); 159 } 160 161 /* Start a given request at the device. Return zero on success, non-zero ··· 371 private->pt_block = 2; 372 break; 373 default: 374 + pr_warning("%s: Device type %d is not supported " 375 + "in DIAG mode\n", dev_name(&device->cdev->dev), 376 + private->rdc_data.vdev_class); 377 rc = -EOPNOTSUPP; 378 goto out; 379 } ··· 413 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 414 rc = dia250(&private->iob, RW_BIO); 415 if (rc == 3) { 416 + pr_warning("%s: A 64-bit DIAG call failed\n", 417 + dev_name(&device->cdev->dev)); 418 rc = -EOPNOTSUPP; 419 goto out_label; 420 } ··· 423 break; 424 } 425 if (bsize > PAGE_SIZE) { 426 + pr_warning("%s: Accessing the DASD failed because of an " 427 + "incorrect format (rc=%d)\n", 428 + dev_name(&device->cdev->dev), rc); 429 rc = -EIO; 430 goto out_label; 431 } ··· 442 block->s2b_shift++; 443 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 444 if (rc && (rc != 4)) { 445 + pr_warning("%s: DIAG initialization failed with rc=%d\n", 446 + dev_name(&device->cdev->dev), rc); 447 rc = -EIO; 448 } else { 449 if (rc == 4) 450 device->features |= DASD_FEATURE_READONLY; 451 + pr_info("%s: New DASD with %ld byte/block, total size %ld " 452 + "KB%s\n", dev_name(&device->cdev->dev), 453 + (unsigned long) block->bp_block, 454 + (unsigned long) (block->blocks << 455 + block->s2b_shift) >> 1, 456 + (rc == 4) ? ", read-only device" : ""); 457 rc = 0; 458 } 459 out_label:
+1 -1
drivers/s390/char/fs3270.c
··· 467 if (IS_ERR(ib)) { 468 raw3270_put_view(&fp->view); 469 raw3270_del_view(&fp->view); 470 - rc = PTR_ERR(fp); 471 goto out; 472 } 473 fp->rdbuf = ib;
··· 467 if (IS_ERR(ib)) { 468 raw3270_put_view(&fp->view); 469 raw3270_del_view(&fp->view); 470 + rc = PTR_ERR(ib); 471 goto out; 472 } 473 fp->rdbuf = ib;
+1
drivers/s390/char/tape_34xx.c
··· 9 */ 10 11 #define KMSG_COMPONENT "tape_34xx" 12 13 #include <linux/module.h> 14 #include <linux/init.h>
··· 9 */ 10 11 #define KMSG_COMPONENT "tape_34xx" 12 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/init.h>
+2 -1
drivers/s390/char/tape_3590.c
··· 9 */ 10 11 #define KMSG_COMPONENT "tape_3590" 12 13 #include <linux/module.h> 14 #include <linux/init.h> ··· 137 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 138 memcpy(out->label, in->label, sizeof(in->label)); 139 EBCASC(out->label, sizeof(in->label)); 140 - strstrip(out->label); 141 } 142 143 static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
··· 9 */ 10 11 #define KMSG_COMPONENT "tape_3590" 12 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/init.h> ··· 136 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 137 memcpy(out->label, in->label, sizeof(in->label)); 138 EBCASC(out->label, sizeof(in->label)); 139 + strim(out->label); 140 } 141 142 static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
+1
drivers/s390/char/tape_block.c
··· 11 */ 12 13 #define KMSG_COMPONENT "tape" 14 15 #include <linux/fs.h> 16 #include <linux/module.h>
··· 11 */ 12 13 #define KMSG_COMPONENT "tape" 14 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <linux/fs.h> 17 #include <linux/module.h>
+3
drivers/s390/char/tape_char.c
··· 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/proc_fs.h>
··· 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 */ 12 13 + #define KMSG_COMPONENT "tape" 14 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 + 16 #include <linux/module.h> 17 #include <linux/types.h> 18 #include <linux/proc_fs.h>
+4
drivers/s390/char/tape_class.c
··· 7 * Author: Stefan Bader <shbader@de.ibm.com> 8 * Based on simple class device code by Greg K-H 9 */ 10 #include "tape_class.h" 11 12 MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
··· 7 * Author: Stefan Bader <shbader@de.ibm.com> 8 * Based on simple class device code by Greg K-H 9 */ 10 + 11 + #define KMSG_COMPONENT "tape" 12 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + 14 #include "tape_class.h" 15 16 MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
+2
drivers/s390/char/tape_core.c
··· 12 */ 13 14 #define KMSG_COMPONENT "tape" 15 #include <linux/module.h> 16 #include <linux/init.h> // for kernel parameters 17 #include <linux/kmod.h> // for requesting modules
··· 12 */ 13 14 #define KMSG_COMPONENT "tape" 15 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 + 17 #include <linux/module.h> 18 #include <linux/init.h> // for kernel parameters 19 #include <linux/kmod.h> // for requesting modules
+3
drivers/s390/char/tape_proc.c
··· 11 * PROCFS Functions 12 */ 13 14 #include <linux/module.h> 15 #include <linux/vmalloc.h> 16 #include <linux/seq_file.h>
··· 11 * PROCFS Functions 12 */ 13 14 + #define KMSG_COMPONENT "tape" 15 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 + 17 #include <linux/module.h> 18 #include <linux/vmalloc.h> 19 #include <linux/seq_file.h>
+3
drivers/s390/char/tape_std.c
··· 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #include <linux/stddef.h> 15 #include <linux/kernel.h> 16 #include <linux/bio.h>
··· 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 + #define KMSG_COMPONENT "tape" 15 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 + 17 #include <linux/stddef.h> 18 #include <linux/kernel.h> 19 #include <linux/bio.h>
-3
drivers/s390/cio/ccwreq.c
··· 49 */ 50 static void ccwreq_stop(struct ccw_device *cdev, int rc) 51 { 52 - struct subchannel *sch = to_subchannel(cdev->dev.parent); 53 struct ccw_request *req = &cdev->private->req; 54 55 if (req->done) ··· 56 req->done = 1; 57 ccw_device_set_timeout(cdev, 0); 58 memset(&cdev->private->irb, 0, sizeof(struct irb)); 59 - sch->lpm = sch->schib.pmcw.pam; 60 if (rc && rc != -ENODEV && req->drc) 61 rc = req->drc; 62 req->callback(cdev, req->data, rc); ··· 78 continue; 79 } 80 /* Perform start function. */ 81 - sch->lpm = 0xff; 82 memset(&cdev->private->irb, 0, sizeof(struct irb)); 83 rc = cio_start(sch, cp, (u8) req->mask); 84 if (rc == 0) {
··· 49 */ 50 static void ccwreq_stop(struct ccw_device *cdev, int rc) 51 { 52 struct ccw_request *req = &cdev->private->req; 53 54 if (req->done) ··· 57 req->done = 1; 58 ccw_device_set_timeout(cdev, 0); 59 memset(&cdev->private->irb, 0, sizeof(struct irb)); 60 if (rc && rc != -ENODEV && req->drc) 61 rc = req->drc; 62 req->callback(cdev, req->data, rc); ··· 80 continue; 81 } 82 /* Perform start function. */ 83 memset(&cdev->private->irb, 0, sizeof(struct irb)); 84 rc = cio_start(sch, cp, (u8) req->mask); 85 if (rc == 0) {
+1
drivers/s390/cio/device.c
··· 1519 sch->driver = &io_subchannel_driver; 1520 /* Initialize the ccw_device structure. */ 1521 cdev->dev.parent= &sch->dev; 1522 io_subchannel_recog(cdev, sch); 1523 /* Now wait for the async. recognition to come to an end. */ 1524 spin_lock_irq(cdev->ccwlock);
··· 1519 sch->driver = &io_subchannel_driver; 1520 /* Initialize the ccw_device structure. */ 1521 cdev->dev.parent= &sch->dev; 1522 + sch_set_cdev(sch, cdev); 1523 io_subchannel_recog(cdev, sch); 1524 /* Now wait for the async. recognition to come to an end. */ 1525 spin_lock_irq(cdev->ccwlock);
+18 -11
drivers/s390/cio/device_pgid.c
··· 142 u8 fn; 143 144 /* Use next available path that is not already in correct state. */ 145 - req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm); 146 if (!req->lpm) 147 goto out_nopath; 148 /* Channel program setup. */ ··· 254 *p = first; 255 } 256 257 - static u8 pgid_to_vpm(struct ccw_device *cdev) 258 { 259 struct subchannel *sch = to_subchannel(cdev->dev.parent); 260 struct pgid *pgid; 261 int i; 262 int lpm; 263 - u8 vpm = 0; 264 265 - /* Set VPM bits for paths which are already in the target state. */ 266 for (i = 0; i < 8; i++) { 267 lpm = 0x80 >> i; 268 if ((cdev->private->pgid_valid_mask & lpm) == 0) ··· 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) 283 continue; 284 } 285 - vpm |= lpm; 286 } 287 288 - return vpm; 289 } 290 291 static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) ··· 307 int mismatch = 0; 308 int reserved = 0; 309 int reset = 0; 310 311 if (rc) 312 goto out; ··· 317 else if (mismatch) 318 rc = -EOPNOTSUPP; 319 else { 320 - sch->vpm = pgid_to_vpm(cdev); 321 pgid_fill(cdev, pgid); 322 } 323 out: 324 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 325 - "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc, 326 - cdev->private->pgid_valid_mask, sch->vpm, mismatch, 327 - reserved, reset); 328 switch (rc) { 329 case 0: 330 /* Anything left to do? */ 331 - if (sch->vpm == sch->schib.pmcw.pam) { 332 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 333 return; 334 } ··· 414 struct ccw_dev_id *devid = &cdev->private->dev_id; 415 416 sch->vpm = 0; 417 /* Initialize request data. */ 418 memset(req, 0, sizeof(*req)); 419 req->timeout = PGID_TIMEOUT; ··· 446 */ 447 void ccw_device_verify_start(struct ccw_device *cdev) 448 { 449 CIO_TRACE_EVENT(4, "vrfy"); 450 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 451 /* Initialize PGID data. */ 452 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 453 cdev->private->pgid_valid_mask = 0; 454 /* 455 * Initialize pathgroup and multipath state with target values. 456 * They may change in the course of path verification.
··· 142 u8 fn; 143 144 /* Use next available path that is not already in correct state. */ 145 + req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); 146 if (!req->lpm) 147 goto out_nopath; 148 /* Channel program setup. */ ··· 254 *p = first; 255 } 256 257 + static u8 pgid_to_donepm(struct ccw_device *cdev) 258 { 259 struct subchannel *sch = to_subchannel(cdev->dev.parent); 260 struct pgid *pgid; 261 int i; 262 int lpm; 263 + u8 donepm = 0; 264 265 + /* Set bits for paths which are already in the target state. */ 266 for (i = 0; i < 8; i++) { 267 lpm = 0x80 >> i; 268 if ((cdev->private->pgid_valid_mask & lpm) == 0) ··· 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) 283 continue; 284 } 285 + donepm |= lpm; 286 } 287 288 + return donepm; 289 } 290 291 static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) ··· 307 int mismatch = 0; 308 int reserved = 0; 309 int reset = 0; 310 + u8 donepm; 311 312 if (rc) 313 goto out; ··· 316 else if (mismatch) 317 rc = -EOPNOTSUPP; 318 else { 319 + donepm = pgid_to_donepm(cdev); 320 + sch->vpm = donepm & sch->opm; 321 + cdev->private->pgid_todo_mask &= ~donepm; 322 pgid_fill(cdev, pgid); 323 } 324 out: 325 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 326 + "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, 327 + id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 328 + cdev->private->pgid_todo_mask, mismatch, reserved, reset); 329 switch (rc) { 330 case 0: 331 /* Anything left to do? */ 332 + if (cdev->private->pgid_todo_mask == 0) { 333 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 334 return; 335 } ··· 411 struct ccw_dev_id *devid = &cdev->private->dev_id; 412 413 sch->vpm = 0; 414 + sch->lpm = sch->schib.pmcw.pam; 415 /* Initialize request data. */ 416 memset(req, 0, sizeof(*req)); 417 req->timeout = PGID_TIMEOUT; ··· 442 */ 443 void ccw_device_verify_start(struct ccw_device *cdev) 444 { 445 + struct subchannel *sch = to_subchannel(cdev->dev.parent); 446 + 447 CIO_TRACE_EVENT(4, "vrfy"); 448 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 449 /* Initialize PGID data. */ 450 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 451 cdev->private->pgid_valid_mask = 0; 452 + cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; 453 /* 454 * Initialize pathgroup and multipath state with target values. 455 * They may change in the course of path verification.
+2 -2
drivers/s390/cio/fcx.c
··· 163 /* Add tcat to tccb. */ 164 tccb = tcw_get_tccb(tcw); 165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; 166 - memset(tcat, 0, sizeof(tcat)); 167 /* Calculate tcw input/output count and tcat transport count. */ 168 count = calc_dcw_count(tccb); 169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) ··· 269 */ 270 void tsb_init(struct tsb *tsb) 271 { 272 - memset(tsb, 0, sizeof(tsb)); 273 } 274 EXPORT_SYMBOL(tsb_init); 275
··· 163 /* Add tcat to tccb. */ 164 tccb = tcw_get_tccb(tcw); 165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; 166 + memset(tcat, 0, sizeof(*tcat)); 167 /* Calculate tcw input/output count and tcat transport count. */ 168 count = calc_dcw_count(tccb); 169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) ··· 269 */ 270 void tsb_init(struct tsb *tsb) 271 { 272 + memset(tsb, 0, sizeof(*tsb)); 273 } 274 EXPORT_SYMBOL(tsb_init); 275
+1
drivers/s390/cio/io_sch.h
··· 150 struct ccw_request req; /* internal I/O request */ 151 int iretry; 152 u8 pgid_valid_mask; /* mask of valid PGIDs */ 153 struct { 154 unsigned int fast:1; /* post with "channel end" */ 155 unsigned int repall:1; /* report every interrupt status */
··· 150 struct ccw_request req; /* internal I/O request */ 151 int iretry; 152 u8 pgid_valid_mask; /* mask of valid PGIDs */ 153 + u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 154 struct { 155 unsigned int fast:1; /* post with "channel end" */ 156 unsigned int repall:1; /* report every interrupt status */
+2 -1
drivers/s390/cio/qdio_main.c
··· 486 case SLSB_P_INPUT_PRIMED: 487 inbound_primed(q, count); 488 q->first_to_check = add_buf(q->first_to_check, count); 489 - atomic_sub(count, &q->nr_buf_used); 490 break; 491 case SLSB_P_INPUT_ERROR: 492 announce_buffer_error(q, count);
··· 486 case SLSB_P_INPUT_PRIMED: 487 inbound_primed(q, count); 488 q->first_to_check = add_buf(q->first_to_check, count); 489 + if (atomic_sub(count, &q->nr_buf_used) == 0) 490 + qdio_perf_stat_inc(&perf_stats.inbound_queue_full); 491 break; 492 case SLSB_P_INPUT_ERROR: 493 announce_buffer_error(q, count);
+2
drivers/s390/cio/qdio_perf.c
··· 64 (long)atomic_long_read(&perf_stats.fast_requeue)); 65 seq_printf(m, "Number of outbound target full condition\t: %li\n", 66 (long)atomic_long_read(&perf_stats.outbound_target_full)); 67 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", 68 (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); 69 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
··· 64 (long)atomic_long_read(&perf_stats.fast_requeue)); 65 seq_printf(m, "Number of outbound target full condition\t: %li\n", 66 (long)atomic_long_read(&perf_stats.outbound_target_full)); 67 + seq_printf(m, "Number of inbound queue full condition\t\t: %li\n", 68 + (long)atomic_long_read(&perf_stats.inbound_queue_full)); 69 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", 70 (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); 71 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
+1
drivers/s390/cio/qdio_perf.h
··· 36 atomic_long_t outbound_handler; 37 atomic_long_t fast_requeue; 38 atomic_long_t outbound_target_full; 39 40 /* for debugging */ 41 atomic_long_t debug_tl_out_timer;
··· 36 atomic_long_t outbound_handler; 37 atomic_long_t fast_requeue; 38 atomic_long_t outbound_target_full; 39 + atomic_long_t inbound_queue_full; 40 41 /* for debugging */ 42 atomic_long_t debug_tl_out_timer;
+1 -9
drivers/s390/cio/qdio_setup.c
··· 48 if (!irq_ptr) 49 return; 50 51 - WARN_ON((unsigned long)&irq_ptr->qib & 0xff); 52 irq_ptr->qib.pfmt = qib_param_field_format; 53 if (qib_param_field) 54 memcpy(irq_ptr->qib.parm, qib_param_field, ··· 81 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 82 if (!q) 83 return -ENOMEM; 84 - WARN_ON((unsigned long)q & 0xff); 85 86 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 87 if (!q->slib) { 88 kmem_cache_free(qdio_q_cache, q); 89 return -ENOMEM; 90 } 91 - WARN_ON((unsigned long)q->slib & 0x7ff); 92 irq_ptr_qs[i] = q; 93 } 94 return 0; ··· 128 /* fill in sbal */ 129 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 130 q->sbal[j] = *sbals_array++; 131 - WARN_ON((unsigned long)q->sbal[j] & 0xff); 132 } 133 134 /* fill in slib */ ··· 144 /* fill in sl */ 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 146 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 147 - 148 - DBF_EVENT("sl-slsb-sbal"); 149 - DBF_HEX(q->sl, sizeof(void *)); 150 - DBF_HEX(&q->slsb, sizeof(void *)); 151 - DBF_HEX(q->sbal, sizeof(void *)); 152 } 153 154 static void setup_queues(struct qdio_irq *irq_ptr,
··· 48 if (!irq_ptr) 49 return; 50 51 irq_ptr->qib.pfmt = qib_param_field_format; 52 if (qib_param_field) 53 memcpy(irq_ptr->qib.parm, qib_param_field, ··· 82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 83 if (!q) 84 return -ENOMEM; 85 86 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 87 if (!q->slib) { 88 kmem_cache_free(qdio_q_cache, q); 89 return -ENOMEM; 90 } 91 irq_ptr_qs[i] = q; 92 } 93 return 0; ··· 131 /* fill in sbal */ 132 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 133 q->sbal[j] = *sbals_array++; 134 + BUG_ON((unsigned long)q->sbal[j] & 0xff); 135 } 136 137 /* fill in slib */ ··· 147 /* fill in sl */ 148 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 149 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 150 } 151 152 static void setup_queues(struct qdio_irq *irq_ptr,
+1 -1
include/linux/elf.h
··· 361 #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ 362 #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 363 #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 364 - #define NT_PRXSTATUS 0x300 /* s390 upper register halves */ 365 366 367 /* Note header in a PT_NOTE section */
··· 361 #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ 362 #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 363 #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 364 + #define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ 365 366 367 /* Note header in a PT_NOTE section */