Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] I2O: new sysfs attributes and Adaptec specific block device access and 64-bit DMA support

Changes:
- Added Bus-OSM which could be used by user space programs to reset a
channel on the controller
- Make ioctl's in Config-OSM obsolete in prefer for sysfs attributes and
move those to its own file
- Added sysfs attribute for firmware read and write access for I2O
controllers
- Added special handling of firmware read and write access for Adaptec
controllers
- Added vendor id and product id as sysfs-attribute to Executive classes
- Added automatic notification of LCT change handling to Exec-OSM
- Added flushing function to Block-OSM for later barrier implementation
- Use PRIVATE messages for Block access on Adaptec controllers, which are
faster then BLOCK class access
- Cleaned up support for Promise controller
- New messages are now detected using the IRQ status register as
suggested by the I2O spec
- Added i2o_dma_high() and i2o_dma_low() functions
- Added facility for SG tablesize calculation when using 32-bit and
64-bit DMA addresses
- Added i2o_dma_map_single() and i2o_dma_map_sg() which could build the
SG list for 32-bit as well as 64-bit DMA addresses

Signed-off-by: Markus Lidel <Markus.Lidel@shadowconnect.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Markus Lidel and committed by
Linus Torvalds
f10378ff f88e119c

+1473 -559
+18
drivers/message/i2o/Kconfig
··· 35 35 To compile this support as a module, choose M here: the 36 36 module will be called i2o_config. 37 37 38 + config I2O_CONFIG_OLD_IOCTL 39 + bool "Enable ioctls (OBSOLETE)" 40 + depends on I2O_CONFIG 41 + default y 42 + ---help--- 43 + Enables old ioctls. 44 + 45 + config I2O_BUS 46 + tristate "I2O Bus Adapter OSM" 47 + depends on I2O 48 + ---help--- 49 + Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM 50 + provides access to the busses on the I2O controller. The main purpose 51 + is to rescan the bus to find new devices. 52 + 53 + To compile this support as a module, choose M here: the 54 + module will be called i2o_bus. 55 + 38 56 config I2O_BLOCK 39 57 tristate "I2O Block OSM" 40 58 depends on I2O
+3
drivers/message/i2o/Makefile
··· 6 6 # 7 7 8 8 i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o 9 + i2o_bus-y += bus-osm.o 10 + i2o_config-y += config-osm.o 9 11 obj-$(CONFIG_I2O) += i2o_core.o 10 12 obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o 13 + obj-$(CONFIG_I2O_BUS) += i2o_bus.o 11 14 obj-$(CONFIG_I2O_BLOCK) += i2o_block.o 12 15 obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o 13 16 obj-$(CONFIG_I2O_PROC) += i2o_proc.o
+164
drivers/message/i2o/bus-osm.c
··· 1 + /* 2 + * Bus Adapter OSM 3 + * 4 + * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + * 11 + * Fixes/additions: 12 + * Markus Lidel <Markus.Lidel@shadowconnect.com> 13 + * initial version. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/i2o.h> 18 + 19 + #define OSM_NAME "bus-osm" 20 + #define OSM_VERSION "$Rev$" 21 + #define OSM_DESCRIPTION "I2O Bus Adapter OSM" 22 + 23 + static struct i2o_driver i2o_bus_driver; 24 + 25 + /* Bus OSM class handling definition */ 26 + static struct i2o_class_id i2o_bus_class_id[] = { 27 + {I2O_CLASS_BUS_ADAPTER}, 28 + {I2O_CLASS_END} 29 + }; 30 + 31 + /** 32 + * i2o_bus_scan - Scan the bus for new devices 33 + * @dev: I2O device of the bus, which should be scanned 34 + * 35 + * Scans the bus dev for new / removed devices. After the scan a new LCT 36 + * will be fetched automatically. 37 + * 38 + * Returns 0 on success or negative error code on failure. 39 + */ 40 + static int i2o_bus_scan(struct i2o_device *dev) 41 + { 42 + struct i2o_message __iomem *msg; 43 + u32 m; 44 + 45 + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 46 + if (m == I2O_QUEUE_EMPTY) 47 + return -ETIMEDOUT; 48 + 49 + writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 50 + writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 51 + &msg->u.head[1]); 52 + 53 + return i2o_msg_post_wait(dev->iop, m, 60); 54 + }; 55 + 56 + /** 57 + * i2o_bus_store_scan - Scan the I2O Bus Adapter 58 + * @d: device which should be scanned 59 + * 60 + * Returns count. 61 + */ 62 + static ssize_t i2o_bus_store_scan(struct device *d, const char *buf, 63 + size_t count) 64 + { 65 + struct i2o_device *i2o_dev = to_i2o_device(d); 66 + int rc; 67 + 68 + if ((rc = i2o_bus_scan(i2o_dev))) 69 + osm_warn("bus scan failed %d\n", rc); 70 + 71 + return count; 72 + } 73 + 74 + /* Bus Adapter OSM device attributes */ 75 + static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan); 76 + 77 + /** 78 + * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it 79 + * @dev: device to verify if it is a I2O Bus Adapter device 80 + * 81 + * Because we want all Bus Adapters always return 0. 82 + * 83 + * Returns 0. 84 + */ 85 + static int i2o_bus_probe(struct device *dev) 86 + { 87 + struct i2o_device *i2o_dev = to_i2o_device(get_device(dev)); 88 + 89 + device_create_file(dev, &dev_attr_scan); 90 + 91 + osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid); 92 + 93 + return 0; 94 + }; 95 + 96 + /** 97 + * i2o_bus_remove - remove the I2O Bus Adapter device from the system again 98 + * @dev: I2O Bus Adapter device which should be removed 99 + * 100 + * Always returns 0. 101 + */ 102 + static int i2o_bus_remove(struct device *dev) 103 + { 104 + struct i2o_device *i2o_dev = to_i2o_device(dev); 105 + 106 + device_remove_file(dev, &dev_attr_scan); 107 + 108 + put_device(dev); 109 + 110 + osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); 111 + 112 + return 0; 113 + }; 114 + 115 + /* Bus Adapter OSM driver struct */ 116 + static struct i2o_driver i2o_bus_driver = { 117 + .name = OSM_NAME, 118 + .classes = i2o_bus_class_id, 119 + .driver = { 120 + .probe = i2o_bus_probe, 121 + .remove = i2o_bus_remove, 122 + }, 123 + }; 124 + 125 + /** 126 + * i2o_bus_init - Bus Adapter OSM initialization function 127 + * 128 + * Only register the Bus Adapter OSM in the I2O core. 129 + * 130 + * Returns 0 on success or negative error code on failure. 131 + */ 132 + static int __init i2o_bus_init(void) 133 + { 134 + int rc; 135 + 136 + printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 137 + 138 + /* Register Bus Adapter OSM into I2O core */ 139 + rc = i2o_driver_register(&i2o_bus_driver); 140 + if (rc) { 141 + osm_err("Could not register Bus Adapter OSM\n"); 142 + return rc; 143 + } 144 + 145 + return 0; 146 + }; 147 + 148 + /** 149 + * i2o_bus_exit - Bus Adapter OSM exit function 150 + * 151 + * Unregisters Bus Adapter OSM from I2O core. 152 + */ 153 + static void __exit i2o_bus_exit(void) 154 + { 155 + i2o_driver_unregister(&i2o_bus_driver); 156 + }; 157 + 158 + MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); 159 + MODULE_LICENSE("GPL"); 160 + MODULE_DESCRIPTION(OSM_DESCRIPTION); 161 + MODULE_VERSION(OSM_VERSION); 162 + 163 + module_init(i2o_bus_init); 164 + module_exit(i2o_bus_exit);
+579
drivers/message/i2o/config-osm.c
··· 1 + /* 2 + * Configuration OSM 3 + * 4 + * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + * 11 + * Fixes/additions: 12 + * Markus Lidel <Markus.Lidel@shadowconnect.com> 13 + * initial version. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/i2o.h> 18 + #include <linux/namei.h> 19 + 20 + #include <asm/uaccess.h> 21 + 22 + #define OSM_NAME "config-osm" 23 + #define OSM_VERSION "1.248" 24 + #define OSM_DESCRIPTION "I2O Configuration OSM" 25 + 26 + /* access mode user rw */ 27 + #define S_IWRSR (S_IRUSR | S_IWUSR) 28 + 29 + static struct i2o_driver i2o_config_driver; 30 + 31 + /* Special file operations for sysfs */ 32 + struct fops_attribute { 33 + struct bin_attribute bin; 34 + struct file_operations fops; 35 + }; 36 + 37 + /** 38 + * sysfs_read_dummy 39 + */ 40 + static ssize_t sysfs_read_dummy(struct kobject *kobj, char *buf, loff_t offset, 41 + size_t count) 42 + { 43 + return 0; 44 + }; 45 + 46 + /** 47 + * sysfs_write_dummy 48 + */ 49 + static ssize_t sysfs_write_dummy(struct kobject *kobj, char *buf, loff_t offset, 50 + size_t count) 51 + { 52 + return 0; 53 + }; 54 + 55 + /** 56 + * sysfs_create_fops_file - Creates attribute with special file operations 57 + * @kobj: kobject which should contains the attribute 58 + * @attr: attributes which should be used to create file 59 + * 60 + * First creates attribute @attr in kobject @kobj. If it is the first time 61 + * this function is called, merge old fops from sysfs with new one and 62 + * write it back. Afterwords the new fops will be set for the created 63 + * attribute. 64 + * 65 + * Returns 0 on success or negative error code on failure. 66 + */ 67 + static int sysfs_create_fops_file(struct kobject *kobj, 68 + struct fops_attribute *attr) 69 + { 70 + struct file_operations tmp, *fops; 71 + struct dentry *d; 72 + struct qstr qstr; 73 + int rc; 74 + 75 + fops = &attr->fops; 76 + 77 + if (fops->read) 78 + attr->bin.read = sysfs_read_dummy; 79 + 80 + if (fops->write) 81 + attr->bin.write = sysfs_write_dummy; 82 + 83 + if ((rc = sysfs_create_bin_file(kobj, &attr->bin))) 84 + return rc; 85 + 86 + qstr.name = attr->bin.attr.name; 87 + qstr.len = strlen(qstr.name); 88 + qstr.hash = full_name_hash(qstr.name, qstr.len); 89 + 90 + if ((d = lookup_hash(&qstr, kobj->dentry))) { 91 + if (!fops->owner) { 92 + memcpy(&tmp, d->d_inode->i_fop, sizeof(tmp)); 93 + if (fops->read) 94 + tmp.read = fops->read; 95 + if (fops->write) 96 + tmp.write = fops->write; 97 + memcpy(fops, &tmp, sizeof(tmp)); 98 + } 99 + 100 + d->d_inode->i_fop = fops; 101 + } else 102 + sysfs_remove_bin_file(kobj, &attr->bin); 103 + 104 + return -ENOENT; 105 + }; 106 + 107 + /** 108 + * sysfs_remove_fops_file - Remove attribute with special file operations 109 + * @kobj: kobject which contains the attribute 110 + * @attr: attributes which are used to create file 111 + * 112 + * Only wrapper arround sysfs_remove_bin_file() 113 + * 114 + * Returns 0 on success or negative error code on failure. 115 + */ 116 + static inline int sysfs_remove_fops_file(struct kobject *kobj, 117 + struct fops_attribute *attr) 118 + { 119 + return sysfs_remove_bin_file(kobj, &attr->bin); 120 + }; 121 + 122 + /** 123 + * i2o_config_read_hrt - Returns the HRT of the controller 124 + * @kob: kernel object handle 125 + * @buf: buffer into which the HRT should be copied 126 + * @off: file offset 127 + * @count: number of bytes to read 128 + * 129 + * Put @count bytes starting at @off into @buf from the HRT of the I2O 130 + * controller corresponding to @kobj. 131 + * 132 + * Returns number of bytes copied into buffer. 133 + */ 134 + static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf, 135 + loff_t offset, size_t count) 136 + { 137 + struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; 138 + i2o_hrt *hrt = c->hrt.virt; 139 + 140 + u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4; 141 + 142 + if (offset > size) 143 + return 0; 144 + 145 + if (offset + count > size) 146 + count = size - offset; 147 + 148 + memcpy(buf, (u8 *) hrt + offset, count); 149 + 150 + return count; 151 + }; 152 + 153 + /** 154 + * i2o_config_read_lct - Returns the LCT of the controller 155 + * @kob: kernel object handle 156 + * @buf: buffer into which the LCT should be copied 157 + * @off: file offset 158 + * @count: number of bytes to read 159 + * 160 + * Put @count bytes starting at @off into @buf from the LCT of the I2O 161 + * controller corresponding to @kobj. 162 + * 163 + * Returns number of bytes copied into buffer. 164 + */ 165 + static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf, 166 + loff_t offset, size_t count) 167 + { 168 + struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; 169 + u32 size = c->lct->table_size * 4; 170 + 171 + if (offset > size) 172 + return 0; 173 + 174 + if (offset + count > size) 175 + count = size - offset; 176 + 177 + memcpy(buf, (u8 *) c->lct + offset, count); 178 + 179 + return count; 180 + }; 181 + 182 + #define I2O_CONFIG_SW_ATTR(_name,_mode,_type,_swid) \ 183 + static ssize_t i2o_config_##_name##_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { \ 184 + return i2o_config_sw_read(file, buf, count, offset, _type, _swid); \ 185 + };\ 186 + \ 187 + static ssize_t i2o_config_##_name##_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { \ 188 + return i2o_config_sw_write(file, buf, count, offset, _type, _swid); \ 189 + }; \ 190 + \ 191 + static struct fops_attribute i2o_config_attr_##_name = { \ 192 + .bin = { .attr = { .name = __stringify(_name), .mode = _mode, \ 193 + .owner = THIS_MODULE }, \ 194 + .size = 0, }, \ 195 + .fops = { .write = i2o_config_##_name##_write, \ 196 + .read = i2o_config_##_name##_read} \ 197 + }; 198 + 199 + #ifdef CONFIG_I2O_EXT_ADAPTEC 200 + 201 + /** 202 + * i2o_config_dpt_reagion - Converts type and id to flash region 203 + * @swtype: type of software module reading 204 + * @swid: id of software which should be read 205 + * 206 + * Converts type and id from I2O spec to the matching region for DPT / 207 + * Adaptec controllers. 208 + * 209 + * Returns region which match type and id or -1 on error. 210 + */ 211 + static u32 i2o_config_dpt_region(u8 swtype, u8 swid) 212 + { 213 + switch (swtype) { 214 + case I2O_SOFTWARE_MODULE_IRTOS: 215 + /* 216 + * content: operation firmware 217 + * region size: 218 + * 0xbc000 for 2554, 3754, 2564, 3757 219 + * 0x170000 for 2865 220 + * 0x17c000 for 3966 221 + */ 222 + if (!swid) 223 + return 0; 224 + 225 + break; 226 + 227 + case I2O_SOFTWARE_MODULE_IOP_PRIVATE: 228 + /* 229 + * content: BIOS and SMOR 230 + * BIOS size: first 0x8000 bytes 231 + * region size: 232 + * 0x40000 for 2554, 3754, 2564, 3757 233 + * 0x80000 for 2865, 3966 234 + */ 235 + if (!swid) 236 + return 1; 237 + 238 + break; 239 + 240 + case I2O_SOFTWARE_MODULE_IOP_CONFIG: 241 + switch (swid) { 242 + case 0: 243 + /* 244 + * content: NVRAM defaults 245 + * region size: 0x2000 bytes 246 + */ 247 + return 2; 248 + case 1: 249 + /* 250 + * content: serial number 251 + * region size: 0x2000 bytes 252 + */ 253 + return 3; 254 + } 255 + break; 256 + } 257 + 258 + return -1; 259 + }; 260 + 261 + #endif 262 + 263 + /** 264 + * i2o_config_sw_read - Read a software module from controller 265 + * @file: file pointer 266 + * @buf: buffer into which the data should be copied 267 + * @count: number of bytes to read 268 + * @off: file offset 269 + * @swtype: type of software module reading 270 + * @swid: id of software which should be read 271 + * 272 + * Transfers @count bytes at offset @offset from IOP into buffer using 273 + * type @swtype and id @swid as described in I2O spec. 274 + * 275 + * Returns number of bytes copied into buffer or error code on failure. 276 + */ 277 + static ssize_t i2o_config_sw_read(struct file *file, char __user * buf, 278 + size_t count, loff_t * offset, u8 swtype, 279 + u32 swid) 280 + { 281 + struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata; 282 + struct kobject *kobj = sd->s_element; 283 + struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; 284 + u32 m, function = I2O_CMD_SW_UPLOAD; 285 + struct i2o_dma buffer; 286 + struct i2o_message __iomem *msg; 287 + u32 __iomem *mptr; 288 + int rc, status; 289 + 290 + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 291 + if (m == I2O_QUEUE_EMPTY) 292 + return -EBUSY; 293 + 294 + mptr = &msg->body[3]; 295 + 296 + if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) { 297 + i2o_msg_nop(c, m); 298 + return rc; 299 + } 300 + #ifdef CONFIG_I2O_EXT_ADAPTEC 301 + if (c->adaptec) { 302 + mptr = &msg->body[4]; 303 + function = I2O_CMD_PRIVATE; 304 + 305 + writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]); 306 + 307 + writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_READ, 308 + &msg->body[0]); 309 + writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]); 310 + writel(*offset, &msg->body[2]); 311 + writel(count, &msg->body[3]); 312 + } else 313 + #endif 314 + writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 315 + 316 + writel(0xD0000000 | count, mptr++); 317 + writel(buffer.phys, mptr); 318 + 319 + writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]); 320 + writel(i2o_config_driver.context, &msg->u.head[2]); 321 + writel(0, &msg->u.head[3]); 322 + 323 + #ifdef CONFIG_I2O_EXT_ADAPTEC 324 + if (!c->adaptec) 325 + #endif 326 + { 327 + writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]); 328 + writel(0, &msg->body[1]); 329 + writel(swid, &msg->body[2]); 330 + } 331 + 332 + status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 333 + 334 + if (status == I2O_POST_WAIT_OK) { 335 + if (!(rc = copy_to_user(buf, buffer.virt, count))) { 336 + rc = count; 337 + *offset += count; 338 + } 339 + } else 340 + rc = -EIO; 341 + 342 + if (status != -ETIMEDOUT) 343 + i2o_dma_free(&c->pdev->dev, &buffer); 344 + 345 + return rc; 346 + }; 347 + 348 + /** 349 + * i2o_config_sw_write - Write a software module to controller 350 + * @file: file pointer 351 + * @buf: buffer into which the data should be copied 352 + * @count: number of bytes to read 353 + * @off: file offset 354 + * @swtype: type of software module writing 355 + * @swid: id of software which should be written 356 + * 357 + * Transfers @count bytes at offset @offset from buffer to IOP using 358 + * type @swtype and id @swid as described in I2O spec. 359 + * 360 + * Returns number of bytes copied from buffer or error code on failure. 361 + */ 362 + static ssize_t i2o_config_sw_write(struct file *file, const char __user * buf, 363 + size_t count, loff_t * offset, u8 swtype, 364 + u32 swid) 365 + { 366 + struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata; 367 + struct kobject *kobj = sd->s_element; 368 + struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; 369 + u32 m, function = I2O_CMD_SW_DOWNLOAD; 370 + struct i2o_dma buffer; 371 + struct i2o_message __iomem *msg; 372 + u32 __iomem *mptr; 373 + int rc, status; 374 + 375 + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 376 + if (m == I2O_QUEUE_EMPTY) 377 + return -EBUSY; 378 + 379 + mptr = &msg->body[3]; 380 + 381 + if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) 382 + goto nop_msg; 383 + 384 + if ((rc = copy_from_user(buffer.virt, buf, count))) 385 + goto free_buffer; 386 + 387 + #ifdef CONFIG_I2O_EXT_ADAPTEC 388 + if (c->adaptec) { 389 + mptr = &msg->body[4]; 390 + function = I2O_CMD_PRIVATE; 391 + 392 + writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]); 393 + 394 + writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_WRITE, 395 + &msg->body[0]); 396 + writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]); 397 + writel(*offset, &msg->body[2]); 398 + writel(count, &msg->body[3]); 399 + } else 400 + #endif 401 + writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 402 + 403 + writel(0xD4000000 | count, mptr++); 404 + writel(buffer.phys, mptr); 405 + 406 + writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]); 407 + writel(i2o_config_driver.context, &msg->u.head[2]); 408 + writel(0, &msg->u.head[3]); 409 + 410 + #ifdef CONFIG_I2O_EXT_ADAPTEC 411 + if (!c->adaptec) 412 + #endif 413 + { 414 + writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]); 415 + writel(0, &msg->body[1]); 416 + writel(swid, &msg->body[2]); 417 + } 418 + 419 + status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 420 + 421 + if (status != -ETIMEDOUT) 422 + i2o_dma_free(&c->pdev->dev, &buffer); 423 + 424 + if (status != I2O_POST_WAIT_OK) 425 + return -EIO; 426 + 427 + *offset += count; 428 + 429 + return count; 430 + 431 + free_buffer: 432 + i2o_dma_free(&c->pdev->dev, &buffer); 433 + 434 + nop_msg: 435 + i2o_msg_nop(c, m); 436 + 437 + return rc; 438 + }; 439 + 440 + /* attribute for HRT in sysfs */ 441 + static struct bin_attribute i2o_config_hrt_attr = { 442 + .attr = { 443 + .name = "hrt", 444 + .mode = S_IRUGO, 445 + .owner = THIS_MODULE}, 446 + .size = 0, 447 + .read = i2o_config_read_hrt 448 + }; 449 + 450 + /* attribute for LCT in sysfs */ 451 + static struct bin_attribute i2o_config_lct_attr = { 452 + .attr = { 453 + .name = "lct", 454 + .mode = S_IRUGO, 455 + .owner = THIS_MODULE}, 456 + .size = 0, 457 + .read = i2o_config_read_lct 458 + }; 459 + 460 + /* IRTOS firmware access */ 461 + I2O_CONFIG_SW_ATTR(irtos, S_IWRSR, I2O_SOFTWARE_MODULE_IRTOS, 0); 462 + 463 + #ifdef CONFIG_I2O_EXT_ADAPTEC 464 + 465 + /* 466 + * attribute for BIOS / SMOR, nvram and serial number access on DPT / Adaptec 467 + * controllers 468 + */ 469 + I2O_CONFIG_SW_ATTR(bios, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_PRIVATE, 0); 470 + I2O_CONFIG_SW_ATTR(nvram, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 0); 471 + I2O_CONFIG_SW_ATTR(serial, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 1); 472 + 473 + #endif 474 + 475 + /** 476 + * i2o_config_notify_controller_add - Notify of added controller 477 + * @c: the controller which was added 478 + * 479 + * If a I2O controller is added, we catch the notification to add sysfs 480 + * entries. 481 + */ 482 + static void i2o_config_notify_controller_add(struct i2o_controller *c) 483 + { 484 + struct kobject *kobj = &c->exec->device.kobj; 485 + 486 + sysfs_create_bin_file(kobj, &i2o_config_hrt_attr); 487 + sysfs_create_bin_file(kobj, &i2o_config_lct_attr); 488 + 489 + sysfs_create_fops_file(kobj, &i2o_config_attr_irtos); 490 + #ifdef CONFIG_I2O_EXT_ADAPTEC 491 + if (c->adaptec) { 492 + sysfs_create_fops_file(kobj, &i2o_config_attr_bios); 493 + sysfs_create_fops_file(kobj, &i2o_config_attr_nvram); 494 + sysfs_create_fops_file(kobj, &i2o_config_attr_serial); 495 + } 496 + #endif 497 + }; 498 + 499 + /** 500 + * i2o_config_notify_controller_remove - Notify of removed controller 501 + * @c: the controller which was removed 502 + * 503 + * If a I2O controller is removed, we catch the notification to remove the 504 + * sysfs entries. 505 + */ 506 + static void i2o_config_notify_controller_remove(struct i2o_controller *c) 507 + { 508 + struct kobject *kobj = &c->exec->device.kobj; 509 + 510 + #ifdef CONFIG_I2O_EXT_ADAPTEC 511 + if (c->adaptec) { 512 + sysfs_remove_fops_file(kobj, &i2o_config_attr_serial); 513 + sysfs_remove_fops_file(kobj, &i2o_config_attr_nvram); 514 + sysfs_remove_fops_file(kobj, &i2o_config_attr_bios); 515 + } 516 + #endif 517 + sysfs_remove_fops_file(kobj, &i2o_config_attr_irtos); 518 + 519 + sysfs_remove_bin_file(kobj, &i2o_config_lct_attr); 520 + sysfs_remove_bin_file(kobj, &i2o_config_hrt_attr); 521 + }; 522 + 523 + /* Config OSM driver struct */ 524 + static struct i2o_driver i2o_config_driver = { 525 + .name = OSM_NAME, 526 + .notify_controller_add = i2o_config_notify_controller_add, 527 + .notify_controller_remove = i2o_config_notify_controller_remove 528 + }; 529 + 530 + #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 531 + #include "i2o_config.c" 532 + #endif 533 + 534 + /** 535 + * i2o_config_init - Configuration OSM initialization function 536 + * 537 + * Registers Configuration OSM in the I2O core and if old ioctl's are 538 + * compiled in initialize them. 539 + * 540 + * Returns 0 on success or negative error code on failure. 541 + */ 542 + static int __init i2o_config_init(void) 543 + { 544 + printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 545 + 546 + if (i2o_driver_register(&i2o_config_driver)) { 547 + osm_err("handler register failed.\n"); 548 + return -EBUSY; 549 + } 550 + #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 551 + if (i2o_config_old_init()) 552 + i2o_driver_unregister(&i2o_config_driver); 553 + #endif 554 + 555 + return 0; 556 + } 557 + 558 + /** 559 + * i2o_config_exit - Configuration OSM exit function 560 + * 561 + * If old ioctl's are compiled in exit remove them and unregisters 562 + * Configuration OSM from I2O core. 563 + */ 564 + static void i2o_config_exit(void) 565 + { 566 + #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 567 + i2o_config_old_exit(); 568 + #endif 569 + 570 + i2o_driver_unregister(&i2o_config_driver); 571 + } 572 + 573 + MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); 574 + MODULE_LICENSE("GPL"); 575 + MODULE_DESCRIPTION(OSM_DESCRIPTION); 576 + MODULE_VERSION(OSM_VERSION); 577 + 578 + module_init(i2o_config_init); 579 + module_exit(i2o_config_exit);
+9 -3
drivers/message/i2o/driver.c
··· 180 180 { 181 181 struct i2o_driver *drv; 182 182 struct i2o_message __iomem *msg = i2o_msg_out_to_virt(c, m); 183 - u32 context = readl(&msg->u.s.icntxt); 183 + u32 context; 184 + unsigned long flags; 185 + 186 + if(unlikely(!msg)) 187 + return -EIO; 188 + 189 + context = readl(&msg->u.s.icntxt); 184 190 185 191 if (unlikely(context >= i2o_max_drivers)) { 186 192 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, ··· 194 188 return -EIO; 195 189 } 196 190 197 - spin_lock(&i2o_drivers_lock); 191 + spin_lock_irqsave(&i2o_drivers_lock, flags); 198 192 drv = i2o_drivers[context]; 199 - spin_unlock(&i2o_drivers_lock); 193 + spin_unlock_irqrestore(&i2o_drivers_lock, flags); 200 194 201 195 if (unlikely(!drv)) { 202 196 osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
+66 -8
drivers/message/i2o/exec-osm.c
··· 206 206 u32 context) 207 207 { 208 208 struct i2o_exec_wait *wait, *tmp; 209 + unsigned long flags; 209 210 static spinlock_t lock = SPIN_LOCK_UNLOCKED; 210 211 int rc = 1; 211 212 ··· 217 216 * already expired. Not much we can do about that except log it for 218 217 * debug purposes, increase timeout, and recompile. 219 218 */ 220 - spin_lock(&lock); 219 + spin_lock_irqsave(&lock, flags); 221 220 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { 222 221 if (wait->tcntxt == context) { 223 222 list_del(&wait->list); 223 + 224 + spin_unlock_irqrestore(&lock, flags); 224 225 225 226 wait->m = m; 226 227 wait->msg = msg; ··· 245 242 rc = -1; 246 243 } 247 244 248 - spin_unlock(&lock); 249 - 250 245 return rc; 251 246 } 252 247 } 253 248 254 - spin_unlock(&lock); 249 + spin_unlock_irqrestore(&lock, flags); 255 250 256 251 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, 257 252 context); 258 253 259 254 return -1; 260 255 }; 256 + 257 + /** 258 + * i2o_exec_show_vendor_id - Displays Vendor ID of controller 259 + * @d: device of which the Vendor ID should be displayed 260 + * @buf: buffer into which the Vendor ID should be printed 261 + * 262 + * Returns number of bytes printed into buffer. 263 + */ 264 + static ssize_t i2o_exec_show_vendor_id(struct device *d, char *buf) 265 + { 266 + struct i2o_device *dev = to_i2o_device(d); 267 + u16 id; 268 + 269 + if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { 270 + sprintf(buf, "0x%04x", id); 271 + return strlen(buf) + 1; 272 + } 273 + 274 + return 0; 275 + }; 276 + 277 + /** 278 + * i2o_exec_show_product_id - Displays Product ID of controller 279 + * @d: device of which the Product ID should be displayed 280 + * @buf: buffer into which the Product ID should be printed 281 + * 282 + * Returns number of bytes printed into buffer. 283 + */ 284 + static ssize_t i2o_exec_show_product_id(struct device *d, char *buf) 285 + { 286 + struct i2o_device *dev = to_i2o_device(d); 287 + u16 id; 288 + 289 + if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { 290 + sprintf(buf, "0x%04x", id); 291 + return strlen(buf) + 1; 292 + } 293 + 294 + return 0; 295 + }; 296 + 297 + /* Exec-OSM device attributes */ 298 + static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL); 299 + static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL); 261 300 262 301 /** 263 302 * i2o_exec_probe - Called if a new I2O device (executive class) appears ··· 313 268 static int i2o_exec_probe(struct device *dev) 314 269 { 315 270 struct i2o_device *i2o_dev = to_i2o_device(dev); 271 + struct i2o_controller *c = i2o_dev->iop; 316 272 317 273 i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); 318 274 319 - i2o_dev->iop->exec = i2o_dev; 275 + c->exec = i2o_dev; 276 + 277 + i2o_exec_lct_notify(c, c->lct->change_ind + 1); 278 + 279 + device_create_file(dev, &dev_attr_vendor_id); 280 + device_create_file(dev, &dev_attr_product_id); 320 281 321 282 return 0; 322 283 }; ··· 337 286 */ 338 287 static int i2o_exec_remove(struct device *dev) 339 288 { 289 + device_remove_file(dev, &dev_attr_product_id); 290 + device_remove_file(dev, &dev_attr_vendor_id); 291 + 340 292 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); 341 293 342 294 return 0; ··· 351 297 * 352 298 * This function handles asynchronus LCT NOTIFY replies. It parses the 353 299 * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY 354 - * again. 300 + * again, otherwise send LCT NOTIFY to get informed on next LCT change. 355 301 */ 356 302 static void i2o_exec_lct_modified(struct i2o_controller *c) 357 303 { 358 - if (i2o_device_parse_lct(c) == -EAGAIN) 359 - i2o_exec_lct_notify(c, 0); 304 + u32 change_ind = 0; 305 + 306 + if (i2o_device_parse_lct(c) != -EAGAIN) 307 + change_ind = c->lct->change_ind + 1; 308 + 309 + i2o_exec_lct_notify(c, change_ind); 360 310 }; 361 311 362 312 /**
+202 -129
drivers/message/i2o/i2o_block.c
··· 147 147 }; 148 148 149 149 /** 150 + * i2o_block_issue_flush - device-flush interface for block-layer 151 + * @queue: the request queue of the device which should be flushed 152 + * @disk: gendisk 153 + * @error_sector: error offset 154 + * 155 + * Helper function to provide flush functionality to block-layer. 156 + * 157 + * Returns 0 on success or negative error code on failure. 158 + */ 159 + 160 + static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, 161 + sector_t * error_sector) 162 + { 163 + struct i2o_block_device *i2o_blk_dev = queue->queuedata; 164 + int rc = -ENODEV; 165 + 166 + if (likely(i2o_blk_dev)) 167 + rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev); 168 + 169 + return rc; 170 + } 171 + 172 + /** 150 173 * i2o_block_device_mount - Mount (load) the media of device dev 151 174 * @dev: I2O device which should receive the mount request 152 175 * @media_id: Media Identifier ··· 322 299 323 300 /** 324 301 * i2o_block_sglist_alloc - Allocate the SG list and map it 302 + * @c: I2O controller to which the request belongs 325 303 * @ireq: I2O block request 326 304 * 327 - * Builds the SG list and map it into to be accessable by the controller. 305 + * Builds the SG list and map it to be accessable by the controller. 328 306 * 329 - * Returns the number of elements in the SG list or 0 on failure. 307 + * Returns 0 on failure or 1 on success. 330 308 */ 331 - static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) 309 + static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 310 + struct i2o_block_request *ireq, 311 + u32 __iomem ** mptr) 332 312 { 333 - struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; 334 313 int nents; 314 + enum dma_data_direction direction; 335 315 316 + ireq->dev = &c->pdev->dev; 336 317 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 337 318 338 319 if (rq_data_dir(ireq->req) == READ) 339 - ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; 320 + direction = PCI_DMA_FROMDEVICE; 340 321 else 341 - ireq->sg_dma_direction = PCI_DMA_TODEVICE; 322 + direction = PCI_DMA_TODEVICE; 342 323 343 - ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, 344 - ireq->sg_dma_direction); 324 + ireq->sg_nents = nents; 345 325 346 - return ireq->sg_nents; 326 + return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); 347 327 }; 348 328 349 329 /** ··· 357 331 */ 358 332 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 359 333 { 360 - struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; 334 + enum dma_data_direction direction; 361 335 362 - dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents, 363 - ireq->sg_dma_direction); 336 + if (rq_data_dir(ireq->req) == READ) 337 + direction = PCI_DMA_FROMDEVICE; 338 + else 339 + direction = PCI_DMA_TODEVICE; 340 + 341 + dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); 364 342 }; 365 343 366 344 /** ··· 381 351 { 382 352 struct i2o_block_device *i2o_blk_dev = q->queuedata; 383 353 struct i2o_block_request *ireq; 354 + 355 + if (unlikely(!i2o_blk_dev)) { 356 + osm_err("block device already removed\n"); 357 + return BLKPREP_KILL; 358 + } 384 359 385 360 /* request is already processed by us, so return */ 386 361 if (req->flags & REQ_SPECIAL) { ··· 449 414 { 450 415 struct i2o_block_request *ireq = req->special; 451 416 struct i2o_block_device *dev = ireq->i2o_blk_dev; 452 - request_queue_t *q = dev->gd->queue; 417 + request_queue_t *q = req->q; 453 418 unsigned long flags; 454 419 455 420 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 456 - int leftover = (req->hard_nr_sectors << 9); 421 + int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); 457 422 458 423 if (blk_pc_request(req)) 459 424 leftover = req->data_len; ··· 467 432 spin_lock_irqsave(q->queue_lock, flags); 468 433 469 434 end_that_request_last(req); 470 - dev->open_queue_depth--; 471 - list_del(&ireq->queue); 435 + 436 + if (likely(dev)) { 437 + dev->open_queue_depth--; 438 + list_del(&ireq->queue); 439 + } 472 440 473 441 blk_start_queue(q); 474 442 ··· 521 483 * Don't stick a supertrak100 into cache aggressive modes 522 484 */ 523 485 524 - osm_err("%03x error status: %02x, detailed status: %04x\n", 525 - (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 486 + osm_err("TID %03x error status: 0x%02x, detailed status: " 487 + "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 526 488 status >> 24, status & 0xffff); 527 489 528 490 req->errors++; ··· 743 705 static int i2o_block_transfer(struct request *req) 744 706 { 745 707 struct i2o_block_device *dev = req->rq_disk->private_data; 746 - struct i2o_controller *c = dev->i2o_dev->iop; 708 + struct i2o_controller *c; 747 709 int tid = dev->i2o_dev->lct_data.tid; 748 710 struct i2o_message __iomem *msg; 749 - void __iomem *mptr; 711 + u32 __iomem *mptr; 750 712 struct i2o_block_request *ireq = req->special; 751 - struct scatterlist *sg; 752 - int sgnum; 753 - int i; 754 713 u32 m; 755 714 u32 tcntxt; 756 - u32 sg_flags; 715 + u32 sgl_offset = SGL_OFFSET_8; 716 + u32 ctl_flags = 0x00000000; 757 717 int rc; 718 + u32 cmd; 719 + 720 + if (unlikely(!dev->i2o_dev)) { 721 + osm_err("transfer to removed drive\n"); 722 + rc = -ENODEV; 723 + goto exit; 724 + } 725 + 726 + c = dev->i2o_dev->iop; 758 727 759 728 m = i2o_msg_get(c, &msg); 760 729 if (m == I2O_QUEUE_EMPTY) { ··· 775 730 goto nop_msg; 776 731 } 777 732 778 - if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) { 733 + writel(i2o_block_driver.context, &msg->u.s.icntxt); 734 + writel(tcntxt, &msg->u.s.tcntxt); 735 + 736 + mptr = &msg->body[0]; 737 + 738 + if (rq_data_dir(req) == READ) { 739 + cmd = I2O_CMD_BLOCK_READ << 24; 740 + 741 + switch (dev->rcache) { 742 + case CACHE_PREFETCH: 743 + ctl_flags = 0x201F0008; 744 + break; 745 + 746 + case CACHE_SMARTFETCH: 747 + if (req->nr_sectors > 16) 748 + ctl_flags = 0x201F0008; 749 + else 750 + ctl_flags = 0x001F0000; 751 + break; 752 + 753 + default: 754 + break; 755 + } 756 + } else { 757 + cmd = I2O_CMD_BLOCK_WRITE << 24; 758 + 759 + switch (dev->wcache) { 760 + case CACHE_WRITETHROUGH: 761 + ctl_flags = 0x001F0008; 762 + break; 763 + case CACHE_WRITEBACK: 764 + ctl_flags = 0x001F0010; 765 + break; 766 + case CACHE_SMARTBACK: 767 + if (req->nr_sectors > 16) 768 + ctl_flags = 0x001F0004; 769 + else 770 + ctl_flags = 0x001F0010; 771 + break; 772 + case CACHE_SMARTTHROUGH: 773 + if (req->nr_sectors > 16) 774 + ctl_flags = 0x001F0004; 775 + else 776 + ctl_flags = 0x001F0010; 777 + default: 778 + break; 779 + } 780 + } 781 + 782 + #ifdef CONFIG_I2O_EXT_ADAPTEC 783 + if (c->adaptec) { 784 + u8 cmd[10]; 785 + u32 scsi_flags; 786 + u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 787 + 788 + memset(cmd, 0, 10); 789 + 790 + sgl_offset = SGL_OFFSET_12; 791 + 792 + writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 793 + &msg->u.head[1]); 794 + 795 + writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 796 + writel(tid, mptr++); 797 + 798 + /* 799 + * ENABLE_DISCONNECT 800 + * SIMPLE_TAG 801 + * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 802 + */ 803 + if (rq_data_dir(req) == READ) { 804 + cmd[0] = 0x28; 805 + scsi_flags = 0x60a0000a; 806 + } else { 807 + cmd[0] = 0x2A; 808 + scsi_flags = 0xa0a0000a; 809 + } 810 + 811 + writel(scsi_flags, mptr++); 812 + 813 + *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 814 + *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 815 + 816 + memcpy_toio(mptr, cmd, 10); 817 + mptr += 4; 818 + writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 819 + } else 820 + #endif 821 + { 822 + writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 823 + writel(ctl_flags, mptr++); 824 + writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 825 + writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 826 + writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 827 + } 828 + 829 + if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 779 830 rc = -ENOMEM; 780 831 goto context_remove; 781 832 } 782 833 783 - /* Build the message based on the request. */ 784 - writel(i2o_block_driver.context, &msg->u.s.icntxt); 785 - writel(tcntxt, &msg->u.s.tcntxt); 786 - writel(req->nr_sectors << 9, &msg->body[1]); 787 - 788 - writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]); 789 - writel(req->sector >> 23, &msg->body[3]); 790 - 791 - mptr = &msg->body[4]; 792 - 793 - sg = ireq->sg_table; 794 - 795 - if (rq_data_dir(req) == READ) { 796 - writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid, 797 - &msg->u.head[1]); 798 - sg_flags = 0x10000000; 799 - switch (dev->rcache) { 800 - case CACHE_NULL: 801 - writel(0, &msg->body[0]); 802 - break; 803 - case CACHE_PREFETCH: 804 - writel(0x201F0008, &msg->body[0]); 805 - break; 806 - case CACHE_SMARTFETCH: 807 - if (req->nr_sectors > 16) 808 - writel(0x201F0008, &msg->body[0]); 809 - else 810 - writel(0x001F0000, &msg->body[0]); 811 - break; 812 - } 813 - } else { 814 - writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, 815 - &msg->u.head[1]); 816 - sg_flags = 0x14000000; 817 - switch (dev->wcache) { 818 - case CACHE_NULL: 819 - writel(0, &msg->body[0]); 820 - break; 821 - case CACHE_WRITETHROUGH: 822 - writel(0x001F0008, &msg->body[0]); 823 - break; 824 - case CACHE_WRITEBACK: 825 - writel(0x001F0010, &msg->body[0]); 826 - break; 827 - case CACHE_SMARTBACK: 828 - if (req->nr_sectors > 16) 829 - writel(0x001F0004, &msg->body[0]); 830 - else 831 - writel(0x001F0010, &msg->body[0]); 832 - break; 833 - case CACHE_SMARTTHROUGH: 834 - if (req->nr_sectors > 16) 835 - writel(0x001F0004, &msg->body[0]); 836 - else 837 - writel(0x001F0010, &msg->body[0]); 838 - } 839 - } 840 - 841 - for (i = sgnum; i > 0; i--) { 842 - if (i == 1) 843 - sg_flags |= 0x80000000; 844 - writel(sg_flags | sg_dma_len(sg), mptr); 845 - writel(sg_dma_address(sg), mptr + 4); 846 - mptr += 8; 847 - sg++; 848 - } 849 - 850 - writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | SGL_OFFSET_8, 851 - &msg->u.head[0]); 834 + writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 835 + sgl_offset, &msg->u.head[0]); 852 836 853 837 list_add_tail(&ireq->queue, &dev->open_queue); 854 838 dev->open_queue_depth++; ··· 920 846 921 847 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 922 848 923 - if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) 849 + if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 924 850 if (!i2o_block_transfer(req)) { 925 851 blkdev_dequeue_request(req); 926 852 continue; 927 - } 853 + } else 854 + osm_info("transfer error\n"); 855 + } 928 856 929 857 if (queue_depth) 930 858 break; ··· 1009 933 } 1010 934 1011 935 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 936 + blk_queue_issue_flush_fn(queue, i2o_block_issue_flush); 1012 937 1013 938 gd->major = I2O_MAJOR; 1014 939 gd->queue = queue; ··· 1051 974 u64 size; 1052 975 u32 blocksize; 1053 976 u32 flags, status; 1054 - int segments; 977 + u16 body_size = 4; 978 + unsigned short max_sectors; 979 + 980 + #ifdef CONFIG_I2O_EXT_ADAPTEC 981 + if (c->adaptec) 982 + body_size = 8; 983 + #endif 984 + 985 + if (c->limit_sectors) 986 + max_sectors = I2O_MAX_SECTORS_LIMITED; 987 + else 988 + max_sectors = I2O_MAX_SECTORS; 1055 989 1056 990 /* skip devices which are used by IOP */ 1057 991 if (i2o_dev->lct_data.user_tid != 0xfff) { ··· 1097 1009 queue = gd->queue; 1098 1010 queue->queuedata = i2o_blk_dev; 1099 1011 1100 - blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); 1101 - blk_queue_max_sectors(queue, I2O_MAX_SECTORS); 1012 + blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1013 + blk_queue_max_sectors(queue, max_sectors); 1014 + blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); 1102 1015 1103 - if (c->short_req) 1104 - segments = 8; 1105 - else { 1106 - i2o_status_block *sb; 1107 - 1108 - sb = c->status_block.virt; 1109 - 1110 - segments = (sb->inbound_frame_size - 1111 - sizeof(struct i2o_message) / 4 - 4) / 2; 1112 - } 1113 - 1114 - blk_queue_max_hw_segments(queue, segments); 1115 - 1116 - osm_debug("max sectors = %d\n", I2O_MAX_SECTORS); 1117 - osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS); 1118 - osm_debug("hw segments = %d\n", segments); 1016 + osm_debug("max sectors = %d\n", queue->max_phys_segments); 1017 + osm_debug("phys segments = %d\n", queue->max_sectors); 1018 + osm_debug("max hw segments = %d\n", queue->max_hw_segments); 1119 1019 1120 1020 /* 1121 1021 * Ask for the current media data. If that isn't supported 1122 1022 * then we ask for the device capacity data 1123 1023 */ 1124 - if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8)) 1125 - if (!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1126 - osm_warn("could not get size of %s\n", gd->disk_name); 1127 - size = 0; 1128 - } 1024 + if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1025 + i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1026 + blk_queue_hardsect_size(queue, blocksize); 1027 + } else 1028 + osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1129 1029 1130 - if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4)) 1131 - if (!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1132 - osm_warn("unable to get blocksize of %s\n", 1133 - gd->disk_name); 1134 - blocksize = 0; 1135 - } 1030 + if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1031 + i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1032 + set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1033 + } else 1034 + osm_warn("could not get size of %s\n", gd->disk_name); 1136 1035 1137 1036 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1138 1037 i2o_blk_dev->power = 0; 1139 1038 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); 1140 1039 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); 1141 - 1142 - set_capacity(gd, size >> 9); 1143 1040 1144 1041 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1145 1042 ··· 1182 1109 goto exit; 1183 1110 } 1184 1111 1185 - i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, 1112 + i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1186 1113 mempool_alloc_slab, 1187 1114 mempool_free_slab, 1188 1115 i2o_blk_req_pool.slab);
+2 -2
drivers/message/i2o/i2o_block.h
··· 84 84 struct list_head queue; 85 85 struct request *req; /* corresponding request */ 86 86 struct i2o_block_device *i2o_blk_dev; /* I2O block device */ 87 - int sg_dma_direction; /* direction of DMA buffer read/write */ 87 + struct device *dev; /* device used for DMA */ 88 88 int sg_nents; /* number of SG elements */ 89 - struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */ 89 + struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */ 90 90 }; 91 91 92 92 /* I2O Block device delayed request */
+2 -154
drivers/message/i2o/i2o_config.c
··· 30 30 * 2 of the License, or (at your option) any later version. 31 31 */ 32 32 33 - #include <linux/module.h> 34 - #include <linux/kernel.h> 35 - #include <linux/pci.h> 36 - #include <linux/i2o.h> 37 - #include <linux/errno.h> 38 - #include <linux/init.h> 39 - #include <linux/slab.h> 40 33 #include <linux/miscdevice.h> 41 - #include <linux/mm.h> 42 - #include <linux/spinlock.h> 43 34 #include <linux/smp_lock.h> 44 - #include <linux/ioctl32.h> 45 35 #include <linux/compat.h> 46 - #include <linux/syscalls.h> 47 36 48 37 #include <asm/uaccess.h> 49 - #include <asm/io.h> 50 - 51 - #define OSM_NAME "config-osm" 52 - #define OSM_VERSION "$Rev$" 53 - #define OSM_DESCRIPTION "I2O Configuration OSM" 54 38 55 39 extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); 56 40 ··· 63 79 }; 64 80 static struct i2o_cfg_info *open_files = NULL; 65 81 static ulong i2o_cfg_info_id = 0; 66 - 67 - /** 68 - * i2o_config_read_hrt - Returns the HRT of the controller 69 - * @kob: kernel object handle 70 - * @buf: buffer into which the HRT should be copied 71 - * @off: file offset 72 - * @count: number of bytes to read 73 - * 74 - * Put @count bytes starting at @off into @buf from the HRT of the I2O 75 - * controller corresponding to @kobj. 76 - * 77 - * Returns number of bytes copied into buffer. 78 - */ 79 - static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf, 80 - loff_t offset, size_t count) 81 - { 82 - struct i2o_controller *c = to_i2o_controller(container_of(kobj, 83 - struct device, 84 - kobj)); 85 - i2o_hrt *hrt = c->hrt.virt; 86 - 87 - u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4; 88 - 89 - if(offset > size) 90 - return 0; 91 - 92 - if(offset + count > size) 93 - count = size - offset; 94 - 95 - memcpy(buf, (u8 *) hrt + offset, count); 96 - 97 - return count; 98 - }; 99 - 100 - /** 101 - * i2o_config_read_lct - Returns the LCT of the controller 102 - * @kob: kernel object handle 103 - * @buf: buffer into which the LCT should be copied 104 - * @off: file offset 105 - * @count: number of bytes to read 106 - * 107 - * Put @count bytes starting at @off into @buf from the LCT of the I2O 108 - * controller corresponding to @kobj. 109 - * 110 - * Returns number of bytes copied into buffer. 111 - */ 112 - static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf, 113 - loff_t offset, size_t count) 114 - { 115 - struct i2o_controller *c = to_i2o_controller(container_of(kobj, 116 - struct device, 117 - kobj)); 118 - u32 size = c->lct->table_size * 4; 119 - 120 - if(offset > size) 121 - return 0; 122 - 123 - if(offset + count > size) 124 - count = size - offset; 125 - 126 - memcpy(buf, (u8 *) c->lct + offset, count); 127 - 128 - return count; 129 - }; 130 - 131 - /* attribute for HRT in sysfs */ 132 - static struct bin_attribute i2o_config_hrt_attr = { 133 - .attr = { 134 - .name = "hrt", 135 - .mode = S_IRUGO, 136 - .owner = THIS_MODULE 137 - }, 138 - .size = 0, 139 - .read = i2o_config_read_hrt 140 - }; 141 - 142 - /* attribute for LCT in sysfs */ 143 - static struct bin_attribute i2o_config_lct_attr = { 144 - .attr = { 145 - .name = "lct", 146 - .mode = S_IRUGO, 147 - .owner = THIS_MODULE 148 - }, 149 - .size = 0, 150 - .read = i2o_config_read_lct 151 - }; 152 - 153 - /** 154 - * i2o_config_notify_controller_add - Notify of added controller 155 - * @c: the controller which was added 156 - * 157 - * If a I2O controller is added, we catch the notification to add sysfs 158 - * entries. 159 - */ 160 - static void i2o_config_notify_controller_add(struct i2o_controller *c) 161 - { 162 - sysfs_create_bin_file(&(c->device.kobj), &i2o_config_hrt_attr); 163 - sysfs_create_bin_file(&(c->device.kobj), &i2o_config_lct_attr); 164 - }; 165 - 166 - /** 167 - * i2o_config_notify_controller_remove - Notify of removed controller 168 - * @c: the controller which was removed 169 - * 170 - * If a I2O controller is removed, we catch the notification to remove the 171 - * sysfs entries. 172 - */ 173 - static void i2o_config_notify_controller_remove(struct i2o_controller *c) 174 - { 175 - sysfs_remove_bin_file(&c->device.kobj, &i2o_config_lct_attr); 176 - sysfs_remove_bin_file(&c->device.kobj, &i2o_config_hrt_attr); 177 - }; 178 - 179 - /* Config OSM driver struct */ 180 - static struct i2o_driver i2o_config_driver = { 181 - .name = OSM_NAME, 182 - .notify_controller_add = i2o_config_notify_controller_add, 183 - .notify_controller_remove = i2o_config_notify_controller_remove 184 - }; 185 82 186 83 static int i2o_cfg_getiops(unsigned long arg) 187 84 { ··· 1122 1257 &config_fops 1123 1258 }; 1124 1259 1125 - static int __init i2o_config_init(void) 1260 + static int __init i2o_config_old_init(void) 1126 1261 { 1127 - printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1128 - 1129 1262 spin_lock_init(&i2o_config_lock); 1130 1263 1131 1264 if (misc_register(&i2o_miscdev) < 0) { 1132 1265 osm_err("can't register device.\n"); 1133 1266 return -EBUSY; 1134 1267 } 1135 - /* 1136 - * Install our handler 1137 - */ 1138 - if (i2o_driver_register(&i2o_config_driver)) { 1139 - osm_err("handler register failed.\n"); 1140 - misc_deregister(&i2o_miscdev); 1141 - return -EBUSY; 1142 - } 1143 1268 return 0; 1144 1269 } 1145 1270 1146 - static void i2o_config_exit(void) 1271 + static void i2o_config_old_exit(void) 1147 1272 { 1148 1273 misc_deregister(&i2o_miscdev); 1149 - i2o_driver_unregister(&i2o_config_driver); 1150 1274 } 1151 1275 1152 1276 MODULE_AUTHOR("Red Hat Software"); 1153 - MODULE_LICENSE("GPL"); 1154 - MODULE_DESCRIPTION(OSM_DESCRIPTION); 1155 - MODULE_VERSION(OSM_VERSION); 1156 - 1157 - module_init(i2o_config_init); 1158 - module_exit(i2o_config_exit);
+2 -2
drivers/message/i2o/i2o_proc.c
··· 228 228 case I2O_CLASS_FLOPPY_DEVICE: 229 229 idx = 12; 230 230 break; 231 - case I2O_CLASS_BUS_ADAPTER_PORT: 231 + case I2O_CLASS_BUS_ADAPTER: 232 232 idx = 13; 233 233 break; 234 234 case I2O_CLASS_PEER_TRANSPORT_AGENT: ··· 490 490 seq_printf(seq, ", Unknown Device Type"); 491 491 break; 492 492 493 - case I2O_CLASS_BUS_ADAPTER_PORT: 493 + case I2O_CLASS_BUS_ADAPTER: 494 494 if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) 495 495 seq_printf(seq, ", %s", 496 496 bus_ports[lct->lct_entry[i].
+20 -10
drivers/message/i2o/i2o_scsi.c
··· 103 103 i2o_status_block *sb; 104 104 105 105 list_for_each_entry(i2o_dev, &c->devices, list) 106 - if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { 106 + if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 107 107 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 108 108 && (type == 0x01)) /* SCSI bus */ 109 109 max_channel++; ··· 139 139 140 140 i = 0; 141 141 list_for_each_entry(i2o_dev, &c->devices, list) 142 - if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { 142 + if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 143 143 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */ 144 144 i2o_shost->channel[i++] = i2o_dev; 145 145 ··· 186 186 187 187 shost_for_each_device(scsi_dev, i2o_shost->scsi_host) 188 188 if (scsi_dev->hostdata == i2o_dev) { 189 + sysfs_remove_link(&i2o_dev->device.kobj, "scsi"); 189 190 scsi_remove_device(scsi_dev); 190 191 scsi_device_put(scsi_dev); 191 192 break; ··· 260 259 scsi_dev = 261 260 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 262 261 263 - if (!scsi_dev) { 262 + if (IS_ERR(scsi_dev)) { 264 263 osm_warn("can not add SCSI device %03x\n", 265 264 i2o_dev->lct_data.tid); 266 - return -EFAULT; 265 + return PTR_ERR(scsi_dev); 267 266 } 267 + 268 + sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi"); 268 269 269 270 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 270 271 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); ··· 548 545 int tid; 549 546 struct i2o_message __iomem *msg; 550 547 u32 m; 551 - u32 scsi_flags, sg_flags; 548 + /* 549 + * ENABLE_DISCONNECT 550 + * SIMPLE_TAG 551 + * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 552 + */ 553 + u32 scsi_flags = 0x20a00000; 554 + u32 sg_flags; 552 555 u32 __iomem *mptr; 553 556 u32 __iomem *lenptr; 554 557 u32 len; ··· 600 591 601 592 switch (SCpnt->sc_data_direction) { 602 593 case PCI_DMA_NONE: 603 - scsi_flags = 0x00000000; // DATA NO XFER 594 + /* DATA NO XFER */ 604 595 sg_flags = 0x00000000; 605 596 break; 606 597 607 598 case PCI_DMA_TODEVICE: 608 - scsi_flags = 0x80000000; // DATA OUT (iop-->dev) 599 + /* DATA OUT (iop-->dev) */ 600 + scsi_flags |= 0x80000000; 609 601 sg_flags = 0x14000000; 610 602 break; 611 603 612 604 case PCI_DMA_FROMDEVICE: 613 - scsi_flags = 0x40000000; // DATA IN (iop<--dev) 605 + /* DATA IN (iop<--dev) */ 606 + scsi_flags |= 0x40000000; 614 607 sg_flags = 0x10000000; 615 608 break; 616 609 ··· 650 639 } 651 640 */ 652 641 653 - /* Direction, disconnect ok, tag, CDBLen */ 654 - writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, mptr ++); 642 + writel(scsi_flags | SCpnt->cmd_len, mptr++); 655 643 656 644 /* Write SCSI command into the message - always 16 byte block */ 657 645 memcpy_toio(mptr, SCpnt->cmnd, 16);
+118 -147
drivers/message/i2o/iop.c
··· 456 456 } 457 457 458 458 /** 459 + * i2o_iop_init_outbound_queue - setup the outbound message queue 460 + * @c: I2O controller 461 + * 462 + * Clear and (re)initialize IOP's outbound queue and post the message 463 + * frames to the IOP. 464 + * 465 + * Returns 0 on success or a negative errno code on failure. 466 + */ 467 + static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 468 + { 469 + u8 *status = c->status.virt; 470 + u32 m; 471 + struct i2o_message __iomem *msg; 472 + ulong timeout; 473 + int i; 474 + 475 + osm_debug("%s: Initializing Outbound Queue...\n", c->name); 476 + 477 + memset(status, 0, 4); 478 + 479 + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 480 + if (m == I2O_QUEUE_EMPTY) 481 + return -ETIMEDOUT; 482 + 483 + writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]); 484 + writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 485 + &msg->u.head[1]); 486 + writel(i2o_exec_driver.context, &msg->u.s.icntxt); 487 + writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in 488 + Spec? */ 489 + writel(PAGE_SIZE, &msg->body[0]); 490 + /* Outbound msg frame size in words and Initcode */ 491 + writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 492 + writel(0xd0000004, &msg->body[2]); 493 + writel(i2o_dma_low(c->status.phys), &msg->body[3]); 494 + writel(i2o_dma_high(c->status.phys), &msg->body[4]); 495 + 496 + i2o_msg_post(c, m); 497 + 498 + timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 499 + while (*status <= I2O_CMD_IN_PROGRESS) { 500 + if (time_after(jiffies, timeout)) { 501 + osm_warn("%s: Timeout Initializing\n", c->name); 502 + return -ETIMEDOUT; 503 + } 504 + set_current_state(TASK_UNINTERRUPTIBLE); 505 + schedule_timeout(1); 506 + 507 + rmb(); 508 + } 509 + 510 + m = c->out_queue.phys; 511 + 512 + /* Post frames */ 513 + for (i = 0; i < NMBR_MSG_FRAMES; i++) { 514 + i2o_flush_reply(c, m); 515 + udelay(1); /* Promise */ 516 + m += MSG_FRAME_SIZE * 4; 517 + } 518 + 519 + return 0; 520 + } 521 + 522 + /** 459 523 * i2o_iop_reset - reset an I2O controller 460 524 * @c: controller to reset 461 525 * ··· 555 491 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 556 492 writel(0, &msg->body[0]); 557 493 writel(0, &msg->body[1]); 558 - writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]); 559 - writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]); 494 + writel(i2o_dma_low(c->status.phys), &msg->body[2]); 495 + writel(i2o_dma_high(c->status.phys), &msg->body[3]); 560 496 561 497 i2o_msg_post(c, m); 562 498 563 499 /* Wait for a reply */ 564 500 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 565 501 while (!*status) { 566 - if (time_after(jiffies, timeout)) { 567 - printk(KERN_ERR "%s: IOP reset timeout.\n", c->name); 568 - rc = -ETIMEDOUT; 569 - goto exit; 570 - } 571 - 572 - /* Promise bug */ 573 - if (status[1] || status[4]) { 574 - *status = 0; 502 + if (time_after(jiffies, timeout)) 575 503 break; 576 - } 577 504 578 505 set_current_state(TASK_UNINTERRUPTIBLE); 579 506 schedule_timeout(1); ··· 572 517 rmb(); 573 518 } 574 519 575 - if (*status == I2O_CMD_IN_PROGRESS) { 520 + switch (*status) { 521 + case I2O_CMD_REJECTED: 522 + osm_warn("%s: IOP reset rejected\n", c->name); 523 + rc = -EPERM; 524 + break; 525 + 526 + case I2O_CMD_IN_PROGRESS: 576 527 /* 577 528 * Once the reset is sent, the IOP goes into the INIT state 578 - * which is indeterminate. We need to wait until the IOP 579 - * has rebooted before we can let the system talk to 580 - * it. We read the inbound Free_List until a message is 581 - * available. If we can't read one in the given ammount of 582 - * time, we assume the IOP could not reboot properly. 529 + * which is indeterminate. We need to wait until the IOP has 530 + * rebooted before we can let the system talk to it. We read 531 + * the inbound Free_List until a message is available. If we 532 + * can't read one in the given ammount of time, we assume the 533 + * IOP could not reboot properly. 583 534 */ 584 535 pr_debug("%s: Reset in progress, waiting for reboot...\n", 585 536 c->name); ··· 604 543 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 605 544 } 606 545 i2o_msg_nop(c, m); 546 + 547 + /* from here all quiesce commands are safe */ 548 + c->no_quiesce = 0; 549 + 550 + /* verify if controller is in state RESET */ 551 + i2o_status_get(c); 552 + 553 + if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET)) 554 + osm_warn("%s: reset completed, but adapter not in RESET" 555 + " state.\n", c->name); 556 + else 557 + osm_debug("%s: reset completed.\n", c->name); 558 + 559 + break; 560 + 561 + default: 562 + osm_err("%s: IOP reset timeout.\n", c->name); 563 + rc = -ETIMEDOUT; 564 + break; 607 565 } 608 - 609 - /* from here all quiesce commands are safe */ 610 - c->no_quiesce = 0; 611 - 612 - /* If IopReset was rejected or didn't perform reset, try IopClear */ 613 - i2o_status_get(c); 614 - if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) { 615 - printk(KERN_WARNING "%s: Reset rejected, trying to clear\n", 616 - c->name); 617 - i2o_iop_clear(c); 618 - } else 619 - pr_debug("%s: Reset completed.\n", c->name); 620 566 621 567 exit: 622 568 /* Enable all IOPs */ ··· 631 563 632 564 return rc; 633 565 }; 634 - 635 - /** 636 - * i2o_iop_init_outbound_queue - setup the outbound message queue 637 - * @c: I2O controller 638 - * 639 - * Clear and (re)initialize IOP's outbound queue and post the message 640 - * frames to the IOP. 641 - * 642 - * Returns 0 on success or a negative errno code on failure. 643 - */ 644 - static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 645 - { 646 - u8 *status = c->status.virt; 647 - u32 m; 648 - struct i2o_message __iomem *msg; 649 - ulong timeout; 650 - int i; 651 - 652 - pr_debug("%s: Initializing Outbound Queue...\n", c->name); 653 - 654 - memset(status, 0, 4); 655 - 656 - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 657 - if (m == I2O_QUEUE_EMPTY) 658 - return -ETIMEDOUT; 659 - 660 - writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 661 - writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 662 - &msg->u.head[1]); 663 - writel(i2o_exec_driver.context, &msg->u.s.icntxt); 664 - writel(0x00000000, &msg->u.s.tcntxt); 665 - writel(PAGE_SIZE, &msg->body[0]); 666 - writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame 667 - size in words and Initcode */ 668 - writel(0xd0000004, &msg->body[2]); 669 - writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]); 670 - writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]); 671 - 672 - i2o_msg_post(c, m); 673 - 674 - timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 675 - while (*status <= I2O_CMD_IN_PROGRESS) { 676 - if (time_after(jiffies, timeout)) { 677 - printk(KERN_WARNING "%s: Timeout Initializing\n", 678 - c->name); 679 - return -ETIMEDOUT; 680 - } 681 - set_current_state(TASK_UNINTERRUPTIBLE); 682 - schedule_timeout(1); 683 - 684 - rmb(); 685 - } 686 - 687 - m = c->out_queue.phys; 688 - 689 - /* Post frames */ 690 - for (i = 0; i < NMBR_MSG_FRAMES; i++) { 691 - i2o_flush_reply(c, m); 692 - udelay(1); /* Promise */ 693 - m += MSG_FRAME_SIZE * 4; 694 - } 695 - 696 - return 0; 697 - } 698 - 699 - /** 700 - * i2o_iop_send_nop - send a core NOP message 701 - * @c: controller 702 - * 703 - * Send a no-operation message with a reply set to cause no 704 - * action either. Needed for bringing up promise controllers. 705 - */ 706 - static int i2o_iop_send_nop(struct i2o_controller *c) 707 - { 708 - struct i2o_message __iomem *msg; 709 - u32 m = i2o_msg_get_wait(c, &msg, HZ); 710 - if (m == I2O_QUEUE_EMPTY) 711 - return -ETIMEDOUT; 712 - i2o_msg_nop(c, m); 713 - return 0; 714 - } 715 566 716 567 /** 717 568 * i2o_iop_activate - Bring controller up to HOLD ··· 643 656 */ 644 657 static int i2o_iop_activate(struct i2o_controller *c) 645 658 { 646 - struct pci_dev *i960 = NULL; 647 659 i2o_status_block *sb = c->status_block.virt; 648 660 int rc; 649 - 650 - if (c->promise) { 651 - /* Beat up the hardware first of all */ 652 - i960 = 653 - pci_find_slot(c->pdev->bus->number, 654 - PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0)); 655 - if (i960) 656 - pci_write_config_word(i960, 0x42, 0); 657 - 658 - /* Follow this sequence precisely or the controller 659 - ceases to perform useful functions until reboot */ 660 - if ((rc = i2o_iop_send_nop(c))) 661 - return rc; 662 - 663 - if ((rc = i2o_iop_reset(c))) 664 - return rc; 665 - } 661 + int state; 666 662 667 663 /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ 668 664 /* In READY state, Get status */ ··· 654 684 if (rc) { 655 685 printk(KERN_INFO "%s: Unable to obtain status, " 656 686 "attempting a reset.\n", c->name); 657 - if (i2o_iop_reset(c)) 687 + rc = i2o_iop_reset(c); 688 + if (rc) 658 689 return rc; 659 690 } 660 691 ··· 668 697 switch (sb->iop_state) { 669 698 case ADAPTER_STATE_FAULTED: 670 699 printk(KERN_CRIT "%s: hardware fault\n", c->name); 671 - return -ENODEV; 700 + return -EFAULT; 672 701 673 702 case ADAPTER_STATE_READY: 674 703 case ADAPTER_STATE_OPERATIONAL: 675 704 case ADAPTER_STATE_HOLD: 676 705 case ADAPTER_STATE_FAILED: 677 706 pr_debug("%s: already running, trying to reset...\n", c->name); 678 - if (i2o_iop_reset(c)) 679 - return -ENODEV; 707 + rc = i2o_iop_reset(c); 708 + if (rc) 709 + return rc; 680 710 } 711 + 712 + /* preserve state */ 713 + state = sb->iop_state; 681 714 682 715 rc = i2o_iop_init_outbound_queue(c); 683 716 if (rc) 684 717 return rc; 685 718 686 - if (c->promise) { 687 - if ((rc = i2o_iop_send_nop(c))) 688 - return rc; 719 + /* if adapter was not in RESET state clear now */ 720 + if (state != ADAPTER_STATE_RESET) 721 + i2o_iop_clear(c); 689 722 690 - if ((rc = i2o_status_get(c))) 691 - return rc; 723 + i2o_status_get(c); 692 724 693 - if (i960) 694 - pci_write_config_word(i960, 0x42, 0x3FF); 725 + if (sb->iop_state != ADAPTER_STATE_HOLD) { 726 + osm_err("%s: failed to bring IOP into HOLD state\n", c->name); 727 + return -EIO; 695 728 } 696 729 697 - /* In HOLD state */ 698 - 699 - rc = i2o_hrt_get(c); 700 - 701 - return rc; 730 + return i2o_hrt_get(c); 702 731 }; 703 732 704 733 /** ··· 1001 1030 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 1002 1031 writel(0, &msg->body[0]); 1003 1032 writel(0, &msg->body[1]); 1004 - writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]); 1005 - writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]); 1033 + writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 1034 + writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 1006 1035 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 1007 1036 1008 1037 i2o_msg_post(c, m);
+18 -47
drivers/message/i2o/pci.c
··· 50 50 }; 51 51 52 52 /** 53 - * i2o_dma_realloc - Realloc DMA memory 54 - * @dev: struct device pointer to the PCI device of the I2O controller 55 - * @addr: pointer to a i2o_dma struct DMA buffer 56 - * @len: new length of memory 57 - * @gfp_mask: GFP mask 58 - * 59 - * If there was something allocated in the addr, free it first. If len > 0 60 - * than try to allocate it and write the addresses back to the addr 61 - * structure. If len == 0 set the virtual address to NULL. 62 - * 63 - * Returns the 0 on success or negative error code on failure. 64 - */ 65 - int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len, 66 - unsigned int gfp_mask) 67 - { 68 - i2o_dma_free(dev, addr); 69 - 70 - if (len) 71 - return i2o_dma_alloc(dev, addr, len, gfp_mask); 72 - 73 - return 0; 74 - }; 75 - 76 - /** 77 53 * i2o_pci_free - Frees the DMA memory for the I2O controller 78 54 * @c: I2O controller to free 79 55 * ··· 161 185 } else 162 186 c->in_queue = c->base; 163 187 188 + c->irq_status = c->base.virt + I2O_IRQ_STATUS; 164 189 c->irq_mask = c->base.virt + I2O_IRQ_MASK; 165 190 c->in_port = c->base.virt + I2O_IN_PORT; 166 191 c->out_port = c->base.virt + I2O_OUT_PORT; ··· 209 232 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) 210 233 { 211 234 struct i2o_controller *c = dev_id; 212 - struct device *dev = &c->pdev->dev; 213 - u32 mv = readl(c->out_port); 235 + u32 m; 236 + irqreturn_t rc = IRQ_NONE; 214 237 215 - /* 216 - * Old 960 steppings had a bug in the I2O unit that caused 217 - * the queue to appear empty when it wasn't. 218 - */ 219 - if (mv == I2O_QUEUE_EMPTY) { 220 - mv = readl(c->out_port); 221 - if (unlikely(mv == I2O_QUEUE_EMPTY)) 222 - return IRQ_NONE; 223 - else 224 - pr_debug("%s: 960 bug detected\n", c->name); 225 - } 238 + while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) { 239 + m = readl(c->out_port); 240 + if (m == I2O_QUEUE_EMPTY) { 241 + /* 242 + * Old 960 steppings had a bug in the I2O unit that 243 + * caused the queue to appear empty when it wasn't. 244 + */ 245 + m = readl(c->out_port); 246 + if (unlikely(m == I2O_QUEUE_EMPTY)) 247 + break; 248 + } 226 249 227 - while (mv != I2O_QUEUE_EMPTY) { 228 250 /* dispatch it */ 229 - if (i2o_driver_dispatch(c, mv)) 251 + if (i2o_driver_dispatch(c, m)) 230 252 /* flush it if result != 0 */ 231 - i2o_flush_reply(c, mv); 253 + i2o_flush_reply(c, m); 232 254 233 - /* 234 - * That 960 bug again... 235 - */ 236 - mv = readl(c->out_port); 237 - if (mv == I2O_QUEUE_EMPTY) 238 - mv = readl(c->out_port); 255 + rc = IRQ_HANDLED; 239 256 } 240 257 241 - return IRQ_HANDLED; 258 + return rc; 242 259 } 243 260 244 261 /**
+5 -1
include/linux/i2o-dev.h
··· 32 32 33 33 #endif /* __KERNEL__ */ 34 34 35 + /* 36 + * Vendors 37 + */ 38 + #define I2O_VENDOR_DPT 0x001b 35 39 36 40 /* 37 41 * I2O Control IOCTLs and structures ··· 337 333 #define I2O_CLASS_ATE_PERIPHERAL 0x061 338 334 #define I2O_CLASS_FLOPPY_CONTROLLER 0x070 339 335 #define I2O_CLASS_FLOPPY_DEVICE 0x071 340 - #define I2O_CLASS_BUS_ADAPTER_PORT 0x080 336 + #define I2O_CLASS_BUS_ADAPTER 0x080 341 337 #define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 342 338 #define I2O_CLASS_PEER_TRANSPORT 0x091 343 339 #define I2O_CLASS_END 0xfff
+265 -56
include/linux/i2o.h
··· 157 157 158 158 void __iomem *in_port; /* Inbout port address */ 159 159 void __iomem *out_port; /* Outbound port address */ 160 - void __iomem *irq_mask; /* Interrupt register address */ 160 + void __iomem *irq_status; /* Interrupt status register address */ 161 + void __iomem *irq_mask; /* Interrupt mask register address */ 161 162 162 163 /* Dynamic LCT related data */ 163 164 ··· 243 242 extern void i2o_msg_nop(struct i2o_controller *, u32); 244 243 static inline void i2o_flush_reply(struct i2o_controller *, u32); 245 244 246 - /* DMA handling functions */ 247 - static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t, 248 - unsigned int); 249 - static inline void i2o_dma_free(struct device *, struct i2o_dma *); 250 - int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int); 251 - 252 - static inline int i2o_dma_map(struct device *, struct i2o_dma *); 253 - static inline void i2o_dma_unmap(struct device *, struct i2o_dma *); 254 - 255 245 /* IOP functions */ 256 246 extern int i2o_status_get(struct i2o_controller *); 257 247 ··· 266 274 static inline u32 i2o_ptr_high(void *ptr) 267 275 { 268 276 return (u32) ((u64) ptr >> 32); 277 + }; 278 + 279 + static inline u32 i2o_dma_low(dma_addr_t dma_addr) 280 + { 281 + return (u32) (u64) dma_addr; 282 + }; 283 + 284 + static inline u32 i2o_dma_high(dma_addr_t dma_addr) 285 + { 286 + return (u32) ((u64) dma_addr >> 32); 269 287 }; 270 288 #else 271 289 static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) ··· 307 305 { 308 306 return 0; 309 307 }; 308 + 309 + static inline u32 i2o_dma_low(dma_addr_t dma_addr) 310 + { 311 + return (u32) dma_addr; 312 + }; 313 + 314 + static inline u32 i2o_dma_high(dma_addr_t dma_addr) 315 + { 316 + return 0; 317 + }; 310 318 #endif 319 + 320 + /** 321 + * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL 322 + * @c: I2O controller for which the calculation should be done 323 + * @body_size: maximum body size used for message in 32-bit words. 324 + * 325 + * Return the maximum number of SG elements in a SG list. 326 + */ 327 + static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) 328 + { 329 + i2o_status_block *sb = c->status_block.virt; 330 + u16 sg_count = 331 + (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - 332 + body_size; 333 + 334 + if (c->pae_support) { 335 + /* 336 + * for 64-bit a SG attribute element must be added and each 337 + * SG element needs 12 bytes instead of 8. 338 + */ 339 + sg_count -= 2; 340 + sg_count /= 3; 341 + } else 342 + sg_count /= 2; 343 + 344 + if (c->short_req && (sg_count > 8)) 345 + sg_count = 8; 346 + 347 + return sg_count; 348 + }; 349 + 350 + /** 351 + * i2o_dma_map_single - Map pointer to controller and fill in I2O message. 352 + * @c: I2O controller 353 + * @ptr: pointer to the data which should be mapped 354 + * @size: size of data in bytes 355 + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 356 + * @sg_ptr: pointer to the SG list inside the I2O message 357 + * 358 + * This function does all necessary DMA handling and also writes the I2O 359 + * SGL elements into the I2O message. For details on DMA handling see also 360 + * dma_map_single(). The pointer sg_ptr will only be set to the end of the 361 + * SG list if the allocation was successful. 362 + * 363 + * Returns DMA address which must be checked for failures using 364 + * dma_mapping_error(). 365 + */ 366 + static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, 367 + size_t size, 368 + enum dma_data_direction direction, 369 + u32 __iomem ** sg_ptr) 370 + { 371 + u32 sg_flags; 372 + u32 __iomem *mptr = *sg_ptr; 373 + dma_addr_t dma_addr; 374 + 375 + switch (direction) { 376 + case DMA_TO_DEVICE: 377 + sg_flags = 0xd4000000; 378 + break; 379 + case DMA_FROM_DEVICE: 380 + sg_flags = 0xd0000000; 381 + break; 382 + default: 383 + return 0; 384 + } 385 + 386 + dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); 387 + if (!dma_mapping_error(dma_addr)) { 388 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 389 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 390 + writel(0x7C020002, mptr++); 391 + writel(PAGE_SIZE, mptr++); 392 + } 393 + #endif 394 + 395 + writel(sg_flags | size, mptr++); 396 + writel(i2o_dma_low(dma_addr), mptr++); 397 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 398 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) 399 + writel(i2o_dma_high(dma_addr), mptr++); 400 + #endif 401 + *sg_ptr = mptr; 402 + } 403 + return dma_addr; 404 + }; 405 + 406 + /** 407 + * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. 408 + * @c: I2O controller 409 + * @sg: SG list to be mapped 410 + * @sg_count: number of elements in the SG list 411 + * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE 412 + * @sg_ptr: pointer to the SG list inside the I2O message 413 + * 414 + * This function does all necessary DMA handling and also writes the I2O 415 + * SGL elements into the I2O message. For details on DMA handling see also 416 + * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG 417 + * list if the allocation was successful. 418 + * 419 + * Returns 0 on failure or 1 on success. 420 + */ 421 + static inline int i2o_dma_map_sg(struct i2o_controller *c, 422 + struct scatterlist *sg, int sg_count, 423 + enum dma_data_direction direction, 424 + u32 __iomem ** sg_ptr) 425 + { 426 + u32 sg_flags; 427 + u32 __iomem *mptr = *sg_ptr; 428 + 429 + switch (direction) { 430 + case DMA_TO_DEVICE: 431 + sg_flags = 0x14000000; 432 + break; 433 + case DMA_FROM_DEVICE: 434 + sg_flags = 0x10000000; 435 + break; 436 + default: 437 + return 0; 438 + } 439 + 440 + sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); 441 + if (!sg_count) 442 + return 0; 443 + 444 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 445 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 446 + writel(0x7C020002, mptr++); 447 + writel(PAGE_SIZE, mptr++); 448 + } 449 + #endif 450 + 451 + while (sg_count-- > 0) { 452 + if (!sg_count) 453 + sg_flags |= 0xC0000000; 454 + writel(sg_flags | sg_dma_len(sg), mptr++); 455 + writel(i2o_dma_low(sg_dma_address(sg)), mptr++); 456 + #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 457 + if ((sizeof(dma_addr_t) > 4) && c->pae_support) 458 + writel(i2o_dma_high(sg_dma_address(sg)), mptr++); 459 + #endif 460 + sg++; 461 + } 462 + *sg_ptr = mptr; 463 + 464 + return 1; 465 + }; 466 + 467 + /** 468 + * i2o_dma_alloc - Allocate DMA memory 469 + * @dev: struct device pointer to the PCI device of the I2O controller 470 + * @addr: i2o_dma struct which should get the DMA buffer 471 + * @len: length of the new DMA memory 472 + * @gfp_mask: GFP mask 473 + * 474 + * Allocate a coherent DMA memory and write the pointers into addr. 475 + * 476 + * Returns 0 on success or -ENOMEM on failure. 477 + */ 478 + static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, 479 + size_t len, unsigned int gfp_mask) 480 + { 481 + struct pci_dev *pdev = to_pci_dev(dev); 482 + int dma_64 = 0; 483 + 484 + if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { 485 + dma_64 = 1; 486 + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 487 + return -ENOMEM; 488 + } 489 + 490 + addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); 491 + 492 + if ((sizeof(dma_addr_t) > 4) && dma_64) 493 + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 494 + printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); 495 + 496 + if (!addr->virt) 497 + return -ENOMEM; 498 + 499 + memset(addr->virt, 0, len); 500 + addr->len = len; 501 + 502 + return 0; 503 + }; 504 + 505 + /** 506 + * i2o_dma_free - Free DMA memory 507 + * @dev: struct device pointer to the PCI device of the I2O controller 508 + * @addr: i2o_dma struct which contains the DMA buffer 509 + * 510 + * Free a coherent DMA memory and set virtual address of addr to NULL. 511 + */ 512 + static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) 513 + { 514 + if (addr->virt) { 515 + if (addr->phys) 516 + dma_free_coherent(dev, addr->len, addr->virt, 517 + addr->phys); 518 + else 519 + kfree(addr->virt); 520 + addr->virt = NULL; 521 + } 522 + }; 523 + 524 + /** 525 + * i2o_dma_realloc - Realloc DMA memory 526 + * @dev: struct device pointer to the PCI device of the I2O controller 527 + * @addr: pointer to a i2o_dma struct DMA buffer 528 + * @len: new length of memory 529 + * @gfp_mask: GFP mask 530 + * 531 + * If there was something allocated in the addr, free it first. If len > 0 532 + * than try to allocate it and write the addresses back to the addr 533 + * structure. If len == 0 set the virtual address to NULL. 534 + * 535 + * Returns the 0 on success or negative error code on failure. 536 + */ 537 + static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, 538 + size_t len, unsigned int gfp_mask) 539 + { 540 + i2o_dma_free(dev, addr); 541 + 542 + if (len) 543 + return i2o_dma_alloc(dev, addr, len, gfp_mask); 544 + 545 + return 0; 546 + }; 311 547 312 548 /* I2O driver (OSM) functions */ 313 549 extern int i2o_driver_register(struct i2o_driver *); ··· 615 375 /* Exec OSM functions */ 616 376 extern int i2o_exec_lct_get(struct i2o_controller *); 617 377 618 - /* device / driver conversion functions */ 378 + /* device / driver / kobject conversion functions */ 619 379 #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) 620 380 #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) 621 381 #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) 382 + #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) 622 383 623 384 /** 624 385 * i2o_msg_get - obtain an I2O message from the IOP ··· 707 466 i2o_controller *c, 708 467 u32 m) 709 468 { 710 - BUG_ON(m < c->out_queue.phys 711 - || m >= c->out_queue.phys + c->out_queue.len); 469 + if (unlikely 470 + (m < c->out_queue.phys 471 + || m >= c->out_queue.phys + c->out_queue.len)) 472 + return NULL; 712 473 713 474 return c->out_queue.virt + (m - c->out_queue.phys); 714 475 }; ··· 772 529 else 773 530 kfree(addr->virt); 774 531 addr->virt = NULL; 775 - } 776 - }; 777 - 778 - /** 779 - * i2o_dma_map - Map the memory to DMA 780 - * @dev: struct device pointer to the PCI device of the I2O controller 781 - * @addr: i2o_dma struct which should be mapped 782 - * 783 - * Map the memory in addr->virt to coherent DMA memory and write the 784 - * physical address into addr->phys. 785 - * 786 - * Returns 0 on success or -ENOMEM on failure. 787 - */ 788 - static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr) 789 - { 790 - if (!addr->virt) 791 - return -EFAULT; 792 - 793 - if (!addr->phys) 794 - addr->phys = dma_map_single(dev, addr->virt, addr->len, 795 - DMA_BIDIRECTIONAL); 796 - if (!addr->phys) 797 - return -ENOMEM; 798 - 799 - return 0; 800 - }; 801 - 802 - /** 803 - * i2o_dma_unmap - Unmap the DMA memory 804 - * @dev: struct device pointer to the PCI device of the I2O controller 805 - * @addr: i2o_dma struct which should be unmapped 806 - * 807 - * Unmap the memory in addr->virt from DMA memory. 808 - */ 809 - static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr) 810 - { 811 - if (!addr->virt) 812 - return; 813 - 814 - if (addr->phys) { 815 - dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL); 816 - addr->phys = 0; 817 532 } 818 533 }; 819 534 ··· 925 724 #define I2O_CMD_SCSI_EXEC 0x81 926 725 #define I2O_CMD_SCSI_ABORT 0x83 927 726 #define I2O_CMD_SCSI_BUSRESET 0x27 727 + 728 + /* 729 + * Bus Adapter Class 730 + */ 731 + #define I2O_CMD_BUS_ADAPTER_RESET 0x85 732 + #define I2O_CMD_BUS_RESET 0x87 733 + #define I2O_CMD_BUS_SCAN 0x89 734 + #define I2O_CMD_BUS_QUIESCE 0x8b 928 735 929 736 /* 930 737 * Random Block Storage Class ··· 1157 948 1158 949 /* request queue sizes */ 1159 950 #define I2O_MAX_SECTORS 1024 1160 - #define I2O_MAX_SEGMENTS 128 951 + #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS 1161 952 1162 953 #define I2O_REQ_MEMPOOL_SIZE 32 1163 954