Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] monwriter: Serialization bug for multithreaded applications.
[S390] vmur: diag14 only works with buffers below 2GB
[S390] vmur: add "top of queue" sanity check for reader open
[S390] vmur: reject open on z/VM reader files with status HOLD
[S390] vmur: use DECLARE_COMPLETION_ONSTACK to keep lockdep happy
[S390] vmur: allocate single record buffers instead of one big data buffer
[S390] remove DEFAULT_MIGRATION_COST
[S390] qdio: make sure data structures are correctly aligned.
[S390] hypfs: implement show_options
[S390] cio: avoid memory leak on error in css_alloc_subchannel().

+178 -118
-4
arch/s390/Kconfig
··· 109 can be controlled through /sys/devices/system/cpu/cpu#. 110 Say N if you want to disable CPU hotplug. 111 112 - config DEFAULT_MIGRATION_COST 113 - int 114 - default "1000000" 115 - 116 config MATHEMU 117 bool "IEEE FPU emulation" 118 depends on MARCH_G5
··· 109 can be controlled through /sys/devices/system/cpu/cpu#. 110 Say N if you want to disable CPU hotplug. 111 112 config MATHEMU 113 bool "IEEE FPU emulation" 114 depends on MARCH_G5
+12
arch/s390/hypfs/inode.c
··· 17 #include <linux/parser.h> 18 #include <linux/sysfs.h> 19 #include <linux/module.h> 20 #include <asm/ebcdic.h> 21 #include "hypfs.h" 22 ··· 258 return 0; 259 } 260 261 static int hypfs_fill_super(struct super_block *sb, void *data, int silent) 262 { 263 struct inode *root_inode; ··· 470 static struct super_operations hypfs_s_ops = { 471 .statfs = simple_statfs, 472 .drop_inode = hypfs_drop_inode, 473 }; 474 475 static decl_subsys(s390, NULL, NULL);
··· 17 #include <linux/parser.h> 18 #include <linux/sysfs.h> 19 #include <linux/module.h> 20 + #include <linux/seq_file.h> 21 + #include <linux/mount.h> 22 #include <asm/ebcdic.h> 23 #include "hypfs.h" 24 ··· 256 return 0; 257 } 258 259 + static int hypfs_show_options(struct seq_file *s, struct vfsmount *mnt) 260 + { 261 + struct hypfs_sb_info *hypfs_info = mnt->mnt_sb->s_fs_info; 262 + 263 + seq_printf(s, ",uid=%u", hypfs_info->uid); 264 + seq_printf(s, ",gid=%u", hypfs_info->gid); 265 + return 0; 266 + } 267 + 268 static int hypfs_fill_super(struct super_block *sb, void *data, int silent) 269 { 270 struct inode *root_inode; ··· 459 static struct super_operations hypfs_s_ops = { 460 .statfs = simple_statfs, 461 .drop_inode = hypfs_drop_inode, 462 + .show_options = hypfs_show_options, 463 }; 464 465 static decl_subsys(s390, NULL, NULL);
+6
drivers/s390/char/monwriter.c
··· 17 #include <linux/miscdevice.h> 18 #include <linux/ctype.h> 19 #include <linux/poll.h> 20 #include <asm/uaccess.h> 21 #include <asm/ebcdic.h> 22 #include <asm/io.h> ··· 42 size_t hdr_to_read; 43 size_t data_to_read; 44 struct mon_buf *current_buf; 45 }; 46 47 /* ··· 181 return -ENOMEM; 182 INIT_LIST_HEAD(&monpriv->list); 183 monpriv->hdr_to_read = sizeof(monpriv->hdr); 184 filp->private_data = monpriv; 185 return nonseekable_open(inode, filp); 186 } ··· 212 void *to; 213 int rc; 214 215 for (written = 0; written < count; ) { 216 if (monpriv->hdr_to_read) { 217 len = min(count - written, monpriv->hdr_to_read); ··· 251 } 252 monpriv->hdr_to_read = sizeof(monpriv->hdr); 253 } 254 return written; 255 256 out_error: 257 monpriv->data_to_read = 0; 258 monpriv->hdr_to_read = sizeof(struct monwrite_hdr); 259 return rc; 260 } 261
··· 17 #include <linux/miscdevice.h> 18 #include <linux/ctype.h> 19 #include <linux/poll.h> 20 + #include <linux/mutex.h> 21 #include <asm/uaccess.h> 22 #include <asm/ebcdic.h> 23 #include <asm/io.h> ··· 41 size_t hdr_to_read; 42 size_t data_to_read; 43 struct mon_buf *current_buf; 44 + struct mutex thread_mutex; 45 }; 46 47 /* ··· 179 return -ENOMEM; 180 INIT_LIST_HEAD(&monpriv->list); 181 monpriv->hdr_to_read = sizeof(monpriv->hdr); 182 + mutex_init(&monpriv->thread_mutex); 183 filp->private_data = monpriv; 184 return nonseekable_open(inode, filp); 185 } ··· 209 void *to; 210 int rc; 211 212 + mutex_lock(&monpriv->thread_mutex); 213 for (written = 0; written < count; ) { 214 if (monpriv->hdr_to_read) { 215 len = min(count - written, monpriv->hdr_to_read); ··· 247 } 248 monpriv->hdr_to_read = sizeof(monpriv->hdr); 249 } 250 + mutex_unlock(&monpriv->thread_mutex); 251 return written; 252 253 out_error: 254 monpriv->data_to_read = 0; 255 monpriv->hdr_to_read = sizeof(struct monwrite_hdr); 256 + mutex_unlock(&monpriv->thread_mutex); 257 return rc; 258 } 259
+108 -68
drivers/s390/char/vmur.c
··· 119 /* 120 * Low-level functions to do I/O to a ur device. 121 * alloc_chan_prog 122 * do_ur_io 123 * ur_int_handler 124 * 125 * alloc_chan_prog allocates and builds the channel program 126 * 127 * do_ur_io issues the channel program to the device and blocks waiting 128 * on a completion event it publishes at urd->io_done. The function ··· 139 * address pointer that alloc_chan_prog returned. 140 */ 141 142 143 /* 144 * alloc_chan_prog ··· 156 * with a final NOP CCW command-chained on (which ensures that CE and DE 157 * are presented together in a single interrupt instead of as separate 158 * interrupts unless an incorrect length indication kicks in first). The 159 - * data length in each CCW is reclen. The caller must ensure that count 160 - * is an integral multiple of reclen. 161 - * The channel program pointer returned by this function must be freed 162 - * with kfree. The caller is responsible for checking that 163 - * count/reclen is not ridiculously large. 164 */ 165 - static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) 166 { 167 - size_t num_ccws; 168 struct ccw1 *cpa; 169 int i; 170 171 - TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); 172 173 /* 174 * We chain a NOP onto the writes to force CE+DE together. 175 * That means we allocate room for CCWs to cover count/reclen 176 * records plus a NOP. 177 */ 178 - num_ccws = count / reclen + 1; 179 - cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 180 if (!cpa) 181 - return NULL; 182 183 - for (i = 0; count; i++) { 184 cpa[i].cmd_code = WRITE_CCW_CMD; 185 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 186 cpa[i].count = reclen; 187 - cpa[i].cda = __pa(buf); 188 - buf += reclen; 189 - count -= reclen; 190 } 191 /* The following NOP CCW forces CE+DE to be presented together */ 192 cpa[i].cmd_code = CCW_CMD_NOOP; 193 - cpa[i].flags = 0; 194 - cpa[i].count = 0; 195 - cpa[i].cda = 0; 196 - 197 return cpa; 198 } 199 ··· 202 { 203 int rc; 204 struct ccw_device *cdev = urd->cdev; 205 - DECLARE_COMPLETION(event); 206 207 TRACE("do_ur_io: cpa=%p\n", cpa); 208 ··· 338 size_t count, size_t reclen, loff_t *ppos) 339 { 340 struct ccw1 *cpa; 341 - char *buf; 342 int rc; 343 344 - /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ 345 - buf = kmalloc(count, GFP_KERNEL | GFP_DMA); 346 - if (!buf) 347 - return -ENOMEM; 348 - 349 - if (copy_from_user(buf, udata, count)) { 350 - rc = -EFAULT; 351 - goto fail_kfree_buf; 352 - } 353 - 354 - cpa = alloc_chan_prog(buf, count, reclen); 355 - if (!cpa) { 356 - rc = -ENOMEM; 357 - goto fail_kfree_buf; 358 - } 359 360 rc = do_ur_io(urd, cpa); 361 if (rc) ··· 354 } 355 *ppos += count; 356 rc = count; 357 fail_kfree_cpa: 358 - kfree(cpa); 359 - fail_kfree_buf: 360 - kfree(buf); 361 return rc; 362 } 363 ··· 472 return rc; 473 474 len = min((size_t) PAGE_SIZE, count); 475 - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 476 if (!buf) 477 return -ENOMEM; 478 ··· 499 *offs += copied; 500 rc = copied; 501 fail: 502 - kfree(buf); 503 return rc; 504 } 505 ··· 542 } 543 } 544 545 - static int verify_device(struct urdev *urd) 546 { 547 - struct file_control_block fcb; 548 char *buf; 549 int rc; 550 551 switch (urd->class) { 552 case DEV_CLASS_UR_O: 553 return 0; /* no check needed here */ 554 case DEV_CLASS_UR_I: 555 - /* check for empty reader device (beginning of chain) */ 556 - rc = diag_read_next_file_info(&fcb, 0); 557 - if (rc) 558 - return rc; 559 - 560 - /* open file on virtual reader */ 561 - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 562 - if (!buf) 563 - return -ENOMEM; 564 - rc = diag_read_file(urd->dev_id.devno, buf); 565 - kfree(buf); 566 - 567 - if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 568 - return rc; 569 - return 0; 570 default: 571 return -ENOTSUPP; 572 } 573 } 574 575 - static int get_file_reclen(struct urdev *urd) 576 { 577 - struct file_control_block fcb; 578 int rc; 579 580 switch (urd->class) { 581 case DEV_CLASS_UR_O: 582 return 0; 583 case DEV_CLASS_UR_I: 584 - rc = diag_read_next_file_info(&fcb, 0); 585 - if (rc) 586 - return rc; 587 - break; 588 default: 589 return -ENOTSUPP; 590 } 591 - if (fcb.file_stat & FLG_CP_DUMP) 592 - return 0; 593 - 594 - return fcb.rec_len; 595 } 596 597 static int ur_open(struct inode *inode, struct file *file)
··· 119 /* 120 * Low-level functions to do I/O to a ur device. 121 * alloc_chan_prog 122 + * free_chan_prog 123 * do_ur_io 124 * ur_int_handler 125 * 126 * alloc_chan_prog allocates and builds the channel program 127 + * free_chan_prog frees memory of the channel program 128 * 129 * do_ur_io issues the channel program to the device and blocks waiting 130 * on a completion event it publishes at urd->io_done. The function ··· 137 * address pointer that alloc_chan_prog returned. 138 */ 139 140 + static void free_chan_prog(struct ccw1 *cpa) 141 + { 142 + struct ccw1 *ptr = cpa; 143 + 144 + while (ptr->cda) { 145 + kfree((void *)(addr_t) ptr->cda); 146 + ptr++; 147 + } 148 + kfree(cpa); 149 + } 150 151 /* 152 * alloc_chan_prog ··· 144 * with a final NOP CCW command-chained on (which ensures that CE and DE 145 * are presented together in a single interrupt instead of as separate 146 * interrupts unless an incorrect length indication kicks in first). The 147 + * data length in each CCW is reclen. 148 */ 149 + static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 150 + int reclen) 151 { 152 struct ccw1 *cpa; 153 + void *kbuf; 154 int i; 155 156 + TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 157 158 /* 159 * We chain a NOP onto the writes to force CE+DE together. 160 * That means we allocate room for CCWs to cover count/reclen 161 * records plus a NOP. 162 */ 163 + cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 164 + GFP_KERNEL | GFP_DMA); 165 if (!cpa) 166 + return ERR_PTR(-ENOMEM); 167 168 + for (i = 0; i < rec_count; i++) { 169 cpa[i].cmd_code = WRITE_CCW_CMD; 170 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 171 cpa[i].count = reclen; 172 + kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 173 + if (!kbuf) { 174 + free_chan_prog(cpa); 175 + return ERR_PTR(-ENOMEM); 176 + } 177 + cpa[i].cda = (u32)(addr_t) kbuf; 178 + if (copy_from_user(kbuf, ubuf, reclen)) { 179 + free_chan_prog(cpa); 180 + return ERR_PTR(-EFAULT); 181 + } 182 + ubuf += reclen; 183 } 184 /* The following NOP CCW forces CE+DE to be presented together */ 185 cpa[i].cmd_code = CCW_CMD_NOOP; 186 return cpa; 187 } 188 ··· 189 { 190 int rc; 191 struct ccw_device *cdev = urd->cdev; 192 + DECLARE_COMPLETION_ONSTACK(event); 193 194 TRACE("do_ur_io: cpa=%p\n", cpa); 195 ··· 325 size_t count, size_t reclen, loff_t *ppos) 326 { 327 struct ccw1 *cpa; 328 int rc; 329 330 + cpa = alloc_chan_prog(udata, count / reclen, reclen); 331 + if (IS_ERR(cpa)) 332 + return PTR_ERR(cpa); 333 334 rc = do_ur_io(urd, cpa); 335 if (rc) ··· 354 } 355 *ppos += count; 356 rc = count; 357 + 358 fail_kfree_cpa: 359 + free_chan_prog(cpa); 360 return rc; 361 } 362 ··· 473 return rc; 474 475 len = min((size_t) PAGE_SIZE, count); 476 + buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 477 if (!buf) 478 return -ENOMEM; 479 ··· 500 *offs += copied; 501 rc = copied; 502 fail: 503 + free_page((unsigned long) buf); 504 return rc; 505 } 506 ··· 543 } 544 } 545 546 + static int verify_uri_device(struct urdev *urd) 547 { 548 + struct file_control_block *fcb; 549 char *buf; 550 int rc; 551 552 + fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 553 + if (!fcb) 554 + return -ENOMEM; 555 + 556 + /* check for empty reader device (beginning of chain) */ 557 + rc = diag_read_next_file_info(fcb, 0); 558 + if (rc) 559 + goto fail_free_fcb; 560 + 561 + /* if file is in hold status, we do not read it */ 562 + if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 563 + rc = -EPERM; 564 + goto fail_free_fcb; 565 + } 566 + 567 + /* open file on virtual reader */ 568 + buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 569 + if (!buf) { 570 + rc = -ENOMEM; 571 + goto fail_free_fcb; 572 + } 573 + rc = diag_read_file(urd->dev_id.devno, buf); 574 + if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 575 + goto fail_free_buf; 576 + 577 + /* check if the file on top of the queue is open now */ 578 + rc = diag_read_next_file_info(fcb, 0); 579 + if (rc) 580 + goto fail_free_buf; 581 + if (!(fcb->file_stat & FLG_IN_USE)) { 582 + rc = -EMFILE; 583 + goto fail_free_buf; 584 + } 585 + rc = 0; 586 + 587 + fail_free_buf: 588 + free_page((unsigned long) buf); 589 + fail_free_fcb: 590 + kfree(fcb); 591 + return rc; 592 + } 593 + 594 + static int verify_device(struct urdev *urd) 595 + { 596 switch (urd->class) { 597 case DEV_CLASS_UR_O: 598 return 0; /* no check needed here */ 599 case DEV_CLASS_UR_I: 600 + return verify_uri_device(urd); 601 default: 602 return -ENOTSUPP; 603 } 604 } 605 606 + static int get_uri_file_reclen(struct urdev *urd) 607 { 608 + struct file_control_block *fcb; 609 int rc; 610 611 + fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 612 + if (!fcb) 613 + return -ENOMEM; 614 + rc = diag_read_next_file_info(fcb, 0); 615 + if (rc) 616 + goto fail_free; 617 + if (fcb->file_stat & FLG_CP_DUMP) 618 + rc = 0; 619 + else 620 + rc = fcb->rec_len; 621 + 622 + fail_free: 623 + kfree(fcb); 624 + return rc; 625 + } 626 + 627 + static int get_file_reclen(struct urdev *urd) 628 + { 629 switch (urd->class) { 630 case DEV_CLASS_UR_O: 631 return 0; 632 case DEV_CLASS_UR_I: 633 + return get_uri_file_reclen(urd); 634 default: 635 return -ENOTSUPP; 636 } 637 } 638 639 static int ur_open(struct inode *inode, struct file *file)
+4 -1
drivers/s390/char/vmur.h
··· 50 char rest[200]; 51 } __attribute__ ((packed)); 52 53 - #define FLG_CP_DUMP 0x10 54 55 /* 56 * A struct urdev is created for each ur device that is made available
··· 50 char rest[200]; 51 } __attribute__ ((packed)); 52 53 + #define FLG_SYSTEM_HOLD 0x04 54 + #define FLG_CP_DUMP 0x10 55 + #define FLG_USER_HOLD 0x20 56 + #define FLG_IN_USE 0x80 57 58 /* 59 * A struct urdev is created for each ur device that is made available
+1
drivers/s390/cio/css.c
··· 79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 80 ret = cio_modify(sch); 81 if (ret) { 82 kfree(sch); 83 return ERR_PTR(ret); 84 }
··· 79 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 80 ret = cio_modify(sch); 81 if (ret) { 82 + kfree(sch->lock); 83 kfree(sch); 84 return ERR_PTR(ret); 85 }
+47 -45
drivers/s390/cio/qdio.c
··· 81 static atomic_t spare_indicator_usecount; 82 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 83 static mempool_t *qdio_mempool_scssc; 84 85 static debug_info_t *qdio_dbf_setup; 86 static debug_info_t *qdio_dbf_sbal; ··· 1618 qdio_release_irq_memory(struct qdio_irq *irq_ptr) 1619 { 1620 int i; 1621 1622 - for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) { 1623 - if (!irq_ptr->input_qs[i]) 1624 - goto next; 1625 - 1626 - kfree(irq_ptr->input_qs[i]->slib); 1627 - kfree(irq_ptr->input_qs[i]); 1628 - 1629 - next: 1630 - if (!irq_ptr->output_qs[i]) 1631 - continue; 1632 - 1633 - kfree(irq_ptr->output_qs[i]->slib); 1634 - kfree(irq_ptr->output_qs[i]); 1635 - 1636 } 1637 - kfree(irq_ptr->qdr); 1638 free_page((unsigned long) irq_ptr); 1639 } 1640 ··· 1679 { 1680 int i; 1681 struct qdio_q *q; 1682 - int result=-ENOMEM; 1683 1684 - for (i=0;i<no_input_qs;i++) { 1685 - q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); 1686 1687 - if (!q) { 1688 - QDIO_PRINT_ERR("kmalloc of q failed!\n"); 1689 - goto out; 1690 - } 1691 - 1692 - q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL); 1693 if (!q->slib) { 1694 - QDIO_PRINT_ERR("kmalloc of slib failed!\n"); 1695 - goto out; 1696 } 1697 - 1698 irq_ptr->input_qs[i]=q; 1699 } 1700 1701 - for (i=0;i<no_output_qs;i++) { 1702 - q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); 1703 1704 - if (!q) { 1705 - goto out; 1706 - } 1707 - 1708 - q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); 1709 if (!q->slib) { 1710 - QDIO_PRINT_ERR("kmalloc of slib failed!\n"); 1711 - goto out; 1712 } 1713 - 1714 irq_ptr->output_qs[i]=q; 1715 } 1716 - 1717 - result=0; 1718 - out: 1719 - return result; 1720 } 1721 1722 static void ··· 2975 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); 2976 2977 if (!irq_ptr) { 2978 - QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n"); 2979 return -ENOMEM; 2980 } 2981 2982 init_MUTEX(&irq_ptr->setting_up_sema); 2983 2984 /* QDR must be in DMA area since CCW data address is only 32 bit */ 2985 - irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); 2986 if (!(irq_ptr->qdr)) { 2987 free_page((unsigned long) irq_ptr); 2988 - QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); 2989 return -ENOMEM; 2990 } 2991 QDIO_DBF_TEXT0(0,setup,"qdr:"); ··· 2994 if (qdio_alloc_qs(irq_ptr, 2995 init_data->no_input_qs, 2996 init_data->no_output_qs)) { 2997 qdio_release_irq_memory(irq_ptr); 2998 return -ENOMEM; 2999 } ··· 3886 if (res) 3887 return res; 3888 3889 res = qdio_register_dbf_views(); 3890 - if (res) 3891 return res; 3892 3893 QDIO_DBF_TEXT0(0,setup,"initQDIO"); 3894 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); ··· 3930 qdio_release_qdio_memory(); 3931 qdio_unregister_dbf_views(); 3932 mempool_destroy(qdio_mempool_scssc); 3933 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); 3934 printk("qdio: %s: module removed\n",version); 3935 }
··· 81 static atomic_t spare_indicator_usecount; 82 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 83 static mempool_t *qdio_mempool_scssc; 84 + static struct kmem_cache *qdio_q_cache; 85 86 static debug_info_t *qdio_dbf_setup; 87 static debug_info_t *qdio_dbf_sbal; ··· 1617 qdio_release_irq_memory(struct qdio_irq *irq_ptr) 1618 { 1619 int i; 1620 + struct qdio_q *q; 1621 1622 + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 1623 + q = irq_ptr->input_qs[i]; 1624 + if (q) { 1625 + free_page((unsigned long) q->slib); 1626 + kmem_cache_free(qdio_q_cache, q); 1627 + } 1628 + q = irq_ptr->output_qs[i]; 1629 + if (q) { 1630 + free_page((unsigned long) q->slib); 1631 + kmem_cache_free(qdio_q_cache, q); 1632 + } 1633 } 1634 + free_page((unsigned long) irq_ptr->qdr); 1635 free_page((unsigned long) irq_ptr); 1636 } 1637 ··· 1680 { 1681 int i; 1682 struct qdio_q *q; 1683 1684 + for (i = 0; i < no_input_qs; i++) { 1685 + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 1686 + if (!q) 1687 + return -ENOMEM; 1688 + memset(q, 0, sizeof(*q)); 1689 1690 + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 1691 if (!q->slib) { 1692 + kmem_cache_free(qdio_q_cache, q); 1693 + return -ENOMEM; 1694 } 1695 irq_ptr->input_qs[i]=q; 1696 } 1697 1698 + for (i = 0; i < no_output_qs; i++) { 1699 + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 1700 + if (!q) 1701 + return -ENOMEM; 1702 + memset(q, 0, sizeof(*q)); 1703 1704 + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 1705 if (!q->slib) { 1706 + kmem_cache_free(qdio_q_cache, q); 1707 + return -ENOMEM; 1708 } 1709 irq_ptr->output_qs[i]=q; 1710 } 1711 + return 0; 1712 } 1713 1714 static void ··· 2985 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); 2986 2987 if (!irq_ptr) { 2988 + QDIO_PRINT_ERR("allocation of irq_ptr failed!\n"); 2989 return -ENOMEM; 2990 } 2991 2992 init_MUTEX(&irq_ptr->setting_up_sema); 2993 2994 /* QDR must be in DMA area since CCW data address is only 32 bit */ 2995 + irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA); 2996 if (!(irq_ptr->qdr)) { 2997 free_page((unsigned long) irq_ptr); 2998 + QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n"); 2999 return -ENOMEM; 3000 } 3001 QDIO_DBF_TEXT0(0,setup,"qdr:"); ··· 3004 if (qdio_alloc_qs(irq_ptr, 3005 init_data->no_input_qs, 3006 init_data->no_output_qs)) { 3007 + QDIO_PRINT_ERR("queue allocation failed!\n"); 3008 qdio_release_irq_memory(irq_ptr); 3009 return -ENOMEM; 3010 } ··· 3895 if (res) 3896 return res; 3897 3898 + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 3899 + 256, 0, NULL); 3900 + if (!qdio_q_cache) { 3901 + qdio_release_qdio_memory(); 3902 + return -ENOMEM; 3903 + } 3904 + 3905 res = qdio_register_dbf_views(); 3906 + if (res) { 3907 + kmem_cache_destroy(qdio_q_cache); 3908 + qdio_release_qdio_memory(); 3909 return res; 3910 + } 3911 3912 QDIO_DBF_TEXT0(0,setup,"initQDIO"); 3913 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); ··· 3929 qdio_release_qdio_memory(); 3930 qdio_unregister_dbf_views(); 3931 mempool_destroy(qdio_mempool_scssc); 3932 + kmem_cache_destroy(qdio_q_cache); 3933 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); 3934 printk("qdio: %s: module removed\n",version); 3935 }