Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

i2o: Fix 32/64bit DMA locking

The I2O ioctls assume 32bits. In itself that is fine as they are old
cards and nobody uses 64bit. However on LKML it was noted this
assumption is also made for allocated memory and is unsafe on 64bit
systems.

Fixing this is a mess. It turns out there is tons of crap buried in a
header file that does racy 32/64bit filtering on the masks.

So we:
- Verify all callers of the racy code can sleep (i2o_dma_[re]alloc)
- Move the code into a new i2o/memory.c file
- Remove the gfp_mask argument so nobody can try and misuse the function
- Wrap a mutex around the problem area (a single mutex is easy to do and
none of this is performance relevant)
- Switch the remaining problem kmalloc holdout to use i2o_dma_alloc

Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Vasily Averin <vvs@sw.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Alan Cox and committed by
Linus Torvalds
9d793b0b 673c0c00

+351 -311
+1 -1
drivers/message/i2o/Makefile
··· 5 5 # In the future, some of these should be built conditionally. 6 6 # 7 7 8 - i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o 8 + i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o memory.o 9 9 i2o_bus-y += bus-osm.o 10 10 i2o_config-y += config-osm.o 11 11 obj-$(CONFIG_I2O) += i2o_core.o
+1 -1
drivers/message/i2o/device.c
··· 467 467 468 468 res.virt = NULL; 469 469 470 - if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 470 + if (i2o_dma_alloc(dev, &res, reslen)) 471 471 return -ENOMEM; 472 472 473 473 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+2 -2
drivers/message/i2o/exec-osm.c
··· 388 388 389 389 dev = &c->pdev->dev; 390 390 391 - if (i2o_dma_realloc 392 - (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL)) 391 + if (i2o_dma_realloc(dev, &c->dlct, 392 + le32_to_cpu(sb->expected_lct_size))) 393 393 return -ENOMEM; 394 394 395 395 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+14 -17
drivers/message/i2o/i2o_config.c
··· 260 260 if (IS_ERR(msg)) 261 261 return PTR_ERR(msg); 262 262 263 - if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 + if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { 264 264 i2o_msg_nop(c, msg); 265 265 return -ENOMEM; 266 266 } ··· 339 339 if (IS_ERR(msg)) 340 340 return PTR_ERR(msg); 341 341 342 - if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 342 + if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { 343 343 i2o_msg_nop(c, msg); 344 344 return -ENOMEM; 345 345 } ··· 634 634 sg_size = sg[i].flag_count & 0xffffff; 635 635 p = &(sg_list[sg_index]); 636 636 /* Allocate memory for the transfer */ 637 - if (i2o_dma_alloc 638 - (&c->pdev->dev, p, sg_size, 639 - PCI_DMA_BIDIRECTIONAL)) { 637 + if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { 640 638 printk(KERN_DEBUG 641 639 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 642 640 c->name, sg_size, i, sg_count); ··· 778 780 u32 size = 0; 779 781 u32 reply_size = 0; 780 782 u32 rcode = 0; 781 - void *sg_list[SG_TABLESIZE]; 783 + struct i2o_dma sg_list[SG_TABLESIZE]; 782 784 u32 sg_offset = 0; 783 785 u32 sg_count = 0; 784 786 int sg_index = 0; 785 787 u32 i = 0; 786 - void *p = NULL; 787 788 i2o_status_block *sb; 788 789 struct i2o_message *msg; 789 790 unsigned int iop; ··· 839 842 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 840 843 if (sg_offset) { 841 844 struct sg_simple_element *sg; 845 + struct i2o_dma *p; 842 846 843 847 if (sg_offset * 4 >= size) { 844 848 rcode = -EFAULT; ··· 869 871 goto sg_list_cleanup; 870 872 } 871 873 sg_size = sg[i].flag_count & 0xffffff; 874 + p = &(sg_list[sg_index]); 875 + if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { 872 876 /* Allocate memory for the transfer */ 873 - p = kmalloc(sg_size, GFP_KERNEL); 874 - if (!p) { 875 877 printk(KERN_DEBUG 876 878 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 877 879 c->name, sg_size, i, sg_count); 878 880 rcode = -ENOMEM; 879 881 goto sg_list_cleanup; 880 882 } 881 - sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. 883 + sg_index++; 882 884 /* Copy in the user's SG buffer if necessary */ 883 885 if (sg[i]. 884 886 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 885 887 // TODO 64bit fix 886 888 if (copy_from_user 887 - (p, (void __user *)sg[i].addr_bus, 889 + (p->virt, (void __user *)sg[i].addr_bus, 888 890 sg_size)) { 889 891 printk(KERN_DEBUG 890 892 "%s: Could not copy SG buf %d FROM user\n", ··· 893 895 goto sg_list_cleanup; 894 896 } 895 897 } 896 - //TODO 64bit fix 897 - sg[i].addr_bus = virt_to_bus(p); 898 + sg[i].addr_bus = p->phys; 898 899 } 899 900 } 900 901 ··· 905 908 } 906 909 907 910 if (sg_offset) { 908 - u32 rmsg[128]; 911 + u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 909 912 /* Copy back the Scatter Gather buffers back to user space */ 910 913 u32 j; 911 914 // TODO 64bit fix ··· 939 942 sg_size = sg[j].flag_count & 0xffffff; 940 943 // TODO 64bit fix 941 944 if (copy_to_user 942 - ((void __user *)sg[j].addr_bus, sg_list[j], 945 + ((void __user *)sg[j].addr_bus, sg_list[j].virt, 943 946 sg_size)) { 944 947 printk(KERN_WARNING 945 948 "%s: Could not copy %p TO user %x\n", 946 - c->name, sg_list[j], 949 + c->name, sg_list[j].virt, 947 950 sg[j].addr_bus); 948 951 rcode = -EFAULT; 949 952 goto sg_list_cleanup; ··· 970 973 } 971 974 972 975 for (i = 0; i < sg_index; i++) 973 - kfree(sg_list[i]); 976 + i2o_dma_free(&c->pdev->dev, &sg_list[i]); 974 977 975 978 cleanup: 976 979 kfree(reply);
+1 -1
drivers/message/i2o/iop.c
··· 1004 1004 1005 1005 size = hrt->num_entries * hrt->entry_len << 2; 1006 1006 if (size > c->hrt.len) { 1007 - if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL)) 1007 + if (i2o_dma_realloc(dev, &c->hrt, size)) 1008 1008 return -ENOMEM; 1009 1009 else 1010 1010 hrt = c->hrt.virt;
+313
drivers/message/i2o/memory.c
··· 1 + /* 2 + * Functions to handle I2O memory 3 + * 4 + * Pulled from the inlines in i2o headers and uninlined 5 + * 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the 9 + * Free Software Foundation; either version 2 of the License, or (at your 10 + * option) any later version. 11 + */ 12 + 13 + #include <linux/module.h> 14 + #include <linux/i2o.h> 15 + #include <linux/delay.h> 16 + #include <linux/string.h> 17 + #include <linux/slab.h> 18 + #include "core.h" 19 + 20 + /* Protects our 32/64bit mask switching */ 21 + static DEFINE_MUTEX(mem_lock); 22 + 23 + /** 24 + * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL 25 + * @c: I2O controller for which the calculation should be done 26 + * @body_size: maximum body size used for message in 32-bit words. 27 + * 28 + * Return the maximum number of SG elements in a SG list. 29 + */ 30 + u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) 31 + { 32 + i2o_status_block *sb = c->status_block.virt; 33 + u16 sg_count = 34 + (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - 35 + body_size; 36 + 37 + if (c->pae_support) { 38 + /* 39 + * for 64-bit a SG attribute element must be added and each 40 + * SG element needs 12 bytes instead of 8. 41 + */ 42 + sg_count -= 2; 43 + sg_count /= 3; 44 + } else 45 + sg_count /= 2; 46 + 47 + if (c->short_req && (sg_count > 8)) 48 + sg_count = 8; 49 + 50 + return sg_count; 51 + } 52 + EXPORT_SYMBOL_GPL(i2o_sg_tablesize); 53 + 54 + 55 + /** 56 + * i2o_dma_map_single - Map pointer to controller and fill in I2O message. 57 + * @c: I2O controller 58 + * @ptr: pointer to the data which should be mapped 59 + * @size: size of data in bytes 60 + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 61 + * @sg_ptr: pointer to the SG list inside the I2O message 62 + * 63 + * This function does all necessary DMA handling and also writes the I2O 64 + * SGL elements into the I2O message. For details on DMA handling see also 65 + * dma_map_single(). The pointer sg_ptr will only be set to the end of the 66 + * SG list if the allocation was successful. 67 + * 68 + * Returns DMA address which must be checked for failures using 69 + * dma_mapping_error(). 70 + */ 71 + dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, 72 + size_t size, 73 + enum dma_data_direction direction, 74 + u32 ** sg_ptr) 75 + { 76 + u32 sg_flags; 77 + u32 *mptr = *sg_ptr; 78 + dma_addr_t dma_addr; 79 + 80 + switch (direction) { 81 + case DMA_TO_DEVICE: 82 + sg_flags = 0xd4000000; 83 + break; 84 + case DMA_FROM_DEVICE: 85 + sg_flags = 0xd0000000; 86 + break; 87 + default: 88 + return 0; 89 + } 90 + 91 + dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); 92 + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { 93 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 94 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 95 + *mptr++ = cpu_to_le32(0x7C020002); 96 + *mptr++ = cpu_to_le32(PAGE_SIZE); 97 + } 98 + #endif 99 + 100 + *mptr++ = cpu_to_le32(sg_flags | size); 101 + *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); 102 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 103 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) 104 + *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); 105 + #endif 106 + *sg_ptr = mptr; 107 + } 108 + return dma_addr; 109 + } 110 + EXPORT_SYMBOL_GPL(i2o_dma_map_single); 111 + 112 + /** 113 + * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. 114 + * @c: I2O controller 115 + * @sg: SG list to be mapped 116 + * @sg_count: number of elements in the SG list 117 + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 118 + * @sg_ptr: pointer to the SG list inside the I2O message 119 + * 120 + * This function does all necessary DMA handling and also writes the I2O 121 + * SGL elements into the I2O message. For details on DMA handling see also 122 + * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG 123 + * list if the allocation was successful. 124 + * 125 + * Returns 0 on failure or 1 on success. 126 + */ 127 + int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, 128 + int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) 129 + { 130 + u32 sg_flags; 131 + u32 *mptr = *sg_ptr; 132 + 133 + switch (direction) { 134 + case DMA_TO_DEVICE: 135 + sg_flags = 0x14000000; 136 + break; 137 + case DMA_FROM_DEVICE: 138 + sg_flags = 0x10000000; 139 + break; 140 + default: 141 + return 0; 142 + } 143 + 144 + sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); 145 + if (!sg_count) 146 + return 0; 147 + 148 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 149 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 150 + *mptr++ = cpu_to_le32(0x7C020002); 151 + *mptr++ = cpu_to_le32(PAGE_SIZE); 152 + } 153 + #endif 154 + 155 + while (sg_count-- > 0) { 156 + if (!sg_count) 157 + sg_flags |= 0xC0000000; 158 + *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); 159 + *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); 160 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 161 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) 162 + *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); 163 + #endif 164 + sg = sg_next(sg); 165 + } 166 + *sg_ptr = mptr; 167 + 168 + return 1; 169 + } 170 + EXPORT_SYMBOL_GPL(i2o_dma_map_sg); 171 + 172 + /** 173 + * i2o_dma_alloc - Allocate DMA memory 174 + * @dev: struct device pointer to the PCI device of the I2O controller 175 + * @addr: i2o_dma struct which should get the DMA buffer 176 + * @len: length of the new DMA memory 177 + * 178 + * Allocate a coherent DMA memory and write the pointers into addr. 179 + * 180 + * Returns 0 on success or -ENOMEM on failure. 181 + */ 182 + int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) 183 + { 184 + struct pci_dev *pdev = to_pci_dev(dev); 185 + int dma_64 = 0; 186 + 187 + mutex_lock(&mem_lock); 188 + if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { 189 + dma_64 = 1; 190 + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 191 + mutex_unlock(&mem_lock); 192 + return -ENOMEM; 193 + } 194 + } 195 + 196 + addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); 197 + 198 + if ((sizeof(dma_addr_t) > 4) && dma_64) 199 + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 200 + printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); 201 + mutex_unlock(&mem_lock); 202 + 203 + if (!addr->virt) 204 + return -ENOMEM; 205 + 206 + memset(addr->virt, 0, len); 207 + addr->len = len; 208 + 209 + return 0; 210 + } 211 + EXPORT_SYMBOL_GPL(i2o_dma_alloc); 212 + 213 + 214 + /** 215 + * i2o_dma_free - Free DMA memory 216 + * @dev: struct device pointer to the PCI device of the I2O controller 217 + * @addr: i2o_dma struct which contains the DMA buffer 218 + * 219 + * Free a coherent DMA memory and set virtual address of addr to NULL. 220 + */ 221 + void i2o_dma_free(struct device *dev, struct i2o_dma *addr) 222 + { 223 + if (addr->virt) { 224 + if (addr->phys) 225 + dma_free_coherent(dev, addr->len, addr->virt, 226 + addr->phys); 227 + else 228 + kfree(addr->virt); 229 + addr->virt = NULL; 230 + } 231 + } 232 + EXPORT_SYMBOL_GPL(i2o_dma_free); 233 + 234 + 235 + /** 236 + * i2o_dma_realloc - Realloc DMA memory 237 + * @dev: struct device pointer to the PCI device of the I2O controller 238 + * @addr: pointer to a i2o_dma struct DMA buffer 239 + * @len: new length of memory 240 + * 241 + * If there was something allocated in the addr, free it first. If len > 0 242 + * than try to allocate it and write the addresses back to the addr 243 + * structure. If len == 0 set the virtual address to NULL. 244 + * 245 + * Returns the 0 on success or negative error code on failure. 246 + */ 247 + int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) 248 + { 249 + i2o_dma_free(dev, addr); 250 + 251 + if (len) 252 + return i2o_dma_alloc(dev, addr, len); 253 + 254 + return 0; 255 + } 256 + EXPORT_SYMBOL_GPL(i2o_dma_realloc); 257 + 258 + /* 259 + * i2o_pool_alloc - Allocate an slab cache and mempool 260 + * @mempool: pointer to struct i2o_pool to write data into. 261 + * @name: name which is used to identify cache 262 + * @size: size of each object 263 + * @min_nr: minimum number of objects 264 + * 265 + * First allocates a slab cache with name and size. Then allocates a 266 + * mempool which uses the slab cache for allocation and freeing. 267 + * 268 + * Returns 0 on success or negative error code on failure. 269 + */ 270 + int i2o_pool_alloc(struct i2o_pool *pool, const char *name, 271 + size_t size, int min_nr) 272 + { 273 + pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); 274 + if (!pool->name) 275 + goto exit; 276 + strcpy(pool->name, name); 277 + 278 + pool->slab = 279 + kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); 280 + if (!pool->slab) 281 + goto free_name; 282 + 283 + pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); 284 + if (!pool->mempool) 285 + goto free_slab; 286 + 287 + return 0; 288 + 289 + free_slab: 290 + kmem_cache_destroy(pool->slab); 291 + 292 + free_name: 293 + kfree(pool->name); 294 + 295 + exit: 296 + return -ENOMEM; 297 + } 298 + EXPORT_SYMBOL_GPL(i2o_pool_alloc); 299 + 300 + /* 301 + * i2o_pool_free - Free slab cache and mempool again 302 + * @mempool: pointer to struct i2o_pool which should be freed 303 + * 304 + * Note that you have to return all objects to the mempool again before 305 + * calling i2o_pool_free(). 306 + */ 307 + void i2o_pool_free(struct i2o_pool *pool) 308 + { 309 + mempool_destroy(pool->mempool); 310 + kmem_cache_destroy(pool->slab); 311 + kfree(pool->name); 312 + }; 313 + EXPORT_SYMBOL_GPL(i2o_pool_free);
+7 -9
drivers/message/i2o/pci.c
··· 186 186 } 187 187 } 188 188 189 - if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { 189 + if (i2o_dma_alloc(dev, &c->status, 8)) { 190 190 i2o_pci_free(c); 191 191 return -ENOMEM; 192 192 } 193 193 194 - if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) { 194 + if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) { 195 195 i2o_pci_free(c); 196 196 return -ENOMEM; 197 197 } 198 198 199 - if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) { 199 + if (i2o_dma_alloc(dev, &c->dlct, 8192)) { 200 200 i2o_pci_free(c); 201 201 return -ENOMEM; 202 202 } 203 203 204 - if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block), 205 - GFP_KERNEL)) { 204 + if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) { 206 205 i2o_pci_free(c); 207 206 return -ENOMEM; 208 207 } 209 208 210 - if (i2o_dma_alloc 211 - (dev, &c->out_queue, 212 - I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * 213 - sizeof(u32), GFP_KERNEL)) { 209 + if (i2o_dma_alloc(dev, &c->out_queue, 210 + I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * 211 + sizeof(u32))) { 214 212 i2o_pci_free(c); 215 213 return -ENOMEM; 216 214 }
+12 -280
include/linux/i2o.h
··· 570 570 #endif 571 571 spinlock_t lock; /* lock for controller 572 572 configuration */ 573 - 574 573 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ 575 574 }; 576 575 ··· 690 691 }; 691 692 #endif 692 693 693 - /** 694 - * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL 695 - * @c: I2O controller for which the calculation should be done 696 - * @body_size: maximum body size used for message in 32-bit words. 697 - * 698 - * Return the maximum number of SG elements in a SG list. 699 - */ 700 - static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) 701 - { 702 - i2o_status_block *sb = c->status_block.virt; 703 - u16 sg_count = 704 - (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - 705 - body_size; 706 - 707 - if (c->pae_support) { 708 - /* 709 - * for 64-bit a SG attribute element must be added and each 710 - * SG element needs 12 bytes instead of 8. 711 - */ 712 - sg_count -= 2; 713 - sg_count /= 3; 714 - } else 715 - sg_count /= 2; 716 - 717 - if (c->short_req && (sg_count > 8)) 718 - sg_count = 8; 719 - 720 - return sg_count; 721 - }; 722 - 723 - /** 724 - * i2o_dma_map_single - Map pointer to controller and fill in I2O message. 725 - * @c: I2O controller 726 - * @ptr: pointer to the data which should be mapped 727 - * @size: size of data in bytes 728 - * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 729 - * @sg_ptr: pointer to the SG list inside the I2O message 730 - * 731 - * This function does all necessary DMA handling and also writes the I2O 732 - * SGL elements into the I2O message. For details on DMA handling see also 733 - * dma_map_single(). The pointer sg_ptr will only be set to the end of the 734 - * SG list if the allocation was successful. 735 - * 736 - * Returns DMA address which must be checked for failures using 737 - * dma_mapping_error(). 738 - */ 739 - static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, 694 + extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size); 695 + extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, 740 696 size_t size, 741 697 enum dma_data_direction direction, 742 - u32 ** sg_ptr) 743 - { 744 - u32 sg_flags; 745 - u32 *mptr = *sg_ptr; 746 - dma_addr_t dma_addr; 747 - 748 - switch (direction) { 749 - case DMA_TO_DEVICE: 750 - sg_flags = 0xd4000000; 751 - break; 752 - case DMA_FROM_DEVICE: 753 - sg_flags = 0xd0000000; 754 - break; 755 - default: 756 - return 0; 757 - } 758 - 759 - dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); 760 - if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { 761 - #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 762 - if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 763 - *mptr++ = cpu_to_le32(0x7C020002); 764 - *mptr++ = cpu_to_le32(PAGE_SIZE); 765 - } 766 - #endif 767 - 768 - *mptr++ = cpu_to_le32(sg_flags | size); 769 - *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); 770 - #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 771 - if ((sizeof(dma_addr_t) > 4) && c->pae_support) 772 - *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); 773 - #endif 774 - *sg_ptr = mptr; 775 - } 776 - return dma_addr; 777 - }; 778 - 779 - /** 780 - * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. 781 - * @c: I2O controller 782 - * @sg: SG list to be mapped 783 - * @sg_count: number of elements in the SG list 784 - * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 785 - * @sg_ptr: pointer to the SG list inside the I2O message 786 - * 787 - * This function does all necessary DMA handling and also writes the I2O 788 - * SGL elements into the I2O message. For details on DMA handling see also 789 - * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG 790 - * list if the allocation was successful. 791 - * 792 - * Returns 0 on failure or 1 on success. 793 - */ 794 - static inline int i2o_dma_map_sg(struct i2o_controller *c, 698 + u32 ** sg_ptr); 699 + extern int i2o_dma_map_sg(struct i2o_controller *c, 795 700 struct scatterlist *sg, int sg_count, 796 701 enum dma_data_direction direction, 797 - u32 ** sg_ptr) 798 - { 799 - u32 sg_flags; 800 - u32 *mptr = *sg_ptr; 801 - 802 - switch (direction) { 803 - case DMA_TO_DEVICE: 804 - sg_flags = 0x14000000; 805 - break; 806 - case DMA_FROM_DEVICE: 807 - sg_flags = 0x10000000; 808 - break; 809 - default: 810 - return 0; 811 - } 812 - 813 - sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); 814 - if (!sg_count) 815 - return 0; 816 - 817 - #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 818 - if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 819 - *mptr++ = cpu_to_le32(0x7C020002); 820 - *mptr++ = cpu_to_le32(PAGE_SIZE); 821 - } 822 - #endif 823 - 824 - while (sg_count-- > 0) { 825 - if (!sg_count) 826 - sg_flags |= 0xC0000000; 827 - *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); 828 - *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); 829 - #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 830 - if ((sizeof(dma_addr_t) > 4) && c->pae_support) 831 - *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); 832 - #endif 833 - sg = sg_next(sg); 834 - } 835 - *sg_ptr = mptr; 836 - 837 - return 1; 838 - }; 839 - 840 - /** 841 - * i2o_dma_alloc - Allocate DMA memory 842 - * @dev: struct device pointer to the PCI device of the I2O controller 843 - * @addr: i2o_dma struct which should get the DMA buffer 844 - * @len: length of the new DMA memory 845 - * @gfp_mask: GFP mask 846 - * 847 - * Allocate a coherent DMA memory and write the pointers into addr. 848 - * 849 - * Returns 0 on success or -ENOMEM on failure. 850 - */ 851 - static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, 852 - size_t len, gfp_t gfp_mask) 853 - { 854 - struct pci_dev *pdev = to_pci_dev(dev); 855 - int dma_64 = 0; 856 - 857 - if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { 858 - dma_64 = 1; 859 - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 860 - return -ENOMEM; 861 - } 862 - 863 - addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); 864 - 865 - if ((sizeof(dma_addr_t) > 4) && dma_64) 866 - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 867 - printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); 868 - 869 - if (!addr->virt) 870 - return -ENOMEM; 871 - 872 - memset(addr->virt, 0, len); 873 - addr->len = len; 874 - 875 - return 0; 876 - }; 877 - 878 - /** 879 - * i2o_dma_free - Free DMA memory 880 - * @dev: struct device pointer to the PCI device of the I2O controller 881 - * @addr: i2o_dma struct which contains the DMA buffer 882 - * 883 - * Free a coherent DMA memory and set virtual address of addr to NULL. 884 - */ 885 - static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) 886 - { 887 - if (addr->virt) { 888 - if (addr->phys) 889 - dma_free_coherent(dev, addr->len, addr->virt, 890 - addr->phys); 891 - else 892 - kfree(addr->virt); 893 - addr->virt = NULL; 894 - } 895 - }; 896 - 897 - /** 898 - * i2o_dma_realloc - Realloc DMA memory 899 - * @dev: struct device pointer to the PCI device of the I2O controller 900 - * @addr: pointer to a i2o_dma struct DMA buffer 901 - * @len: new length of memory 902 - * @gfp_mask: GFP mask 903 - * 904 - * If there was something allocated in the addr, free it first. If len > 0 905 - * than try to allocate it and write the addresses back to the addr 906 - * structure. If len == 0 set the virtual address to NULL. 907 - * 908 - * Returns the 0 on success or negative error code on failure. 909 - */ 910 - static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, 911 - size_t len, gfp_t gfp_mask) 912 - { 913 - i2o_dma_free(dev, addr); 914 - 915 - if (len) 916 - return i2o_dma_alloc(dev, addr, len, gfp_mask); 917 - 918 - return 0; 919 - }; 920 - 921 - /* 922 - * i2o_pool_alloc - Allocate an slab cache and mempool 923 - * @mempool: pointer to struct i2o_pool to write data into. 924 - * @name: name which is used to identify cache 925 - * @size: size of each object 926 - * @min_nr: minimum number of objects 927 - * 928 - * First allocates a slab cache with name and size. Then allocates a 929 - * mempool which uses the slab cache for allocation and freeing. 930 - * 931 - * Returns 0 on success or negative error code on failure. 932 - */ 933 - static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, 934 - size_t size, int min_nr) 935 - { 936 - pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); 937 - if (!pool->name) 938 - goto exit; 939 - strcpy(pool->name, name); 940 - 941 - pool->slab = 942 - kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); 943 - if (!pool->slab) 944 - goto free_name; 945 - 946 - pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); 947 - if (!pool->mempool) 948 - goto free_slab; 949 - 950 - return 0; 951 - 952 - free_slab: 953 - kmem_cache_destroy(pool->slab); 954 - 955 - free_name: 956 - kfree(pool->name); 957 - 958 - exit: 959 - return -ENOMEM; 960 - }; 961 - 962 - /* 963 - * i2o_pool_free - Free slab cache and mempool again 964 - * @mempool: pointer to struct i2o_pool which should be freed 965 - * 966 - * Note that you have to return all objects to the mempool again before 967 - * calling i2o_pool_free(). 968 - */ 969 - static inline void i2o_pool_free(struct i2o_pool *pool) 970 - { 971 - mempool_destroy(pool->mempool); 972 - kmem_cache_destroy(pool->slab); 973 - kfree(pool->name); 974 - }; 975 - 702 + u32 ** sg_ptr); 703 + extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len); 704 + extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr); 705 + extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, 706 + size_t len); 707 + extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name, 708 + size_t size, int min_nr); 709 + extern void i2o_pool_free(struct i2o_pool *pool); 976 710 /* I2O driver (OSM) functions */ 977 711 extern int i2o_driver_register(struct i2o_driver *); 978 712 extern void i2o_driver_unregister(struct i2o_driver *);