Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (48 commits)
[SCSI] aacraid: do not set valid bit in sense information
[SCSI] ses: add new Enclosure ULD
[SCSI] enclosure: add support for enclosure services
[SCSI] sr: fix test unit ready responses
[SCSI] u14-34f: fix data direction bug
[SCSI] aacraid: pci_set_dma_max_seg_size opened up for late model controllers
[SCSI] fix BUG when sum(scatterlist) > bufflen
[SCSI] arcmsr: updates (1.20.00.15)
[SCSI] advansys: make 3 functions static
[SCSI] Small cleanups for scsi_host.h
[SCSI] dc395x: fix uninitialized var warning
[SCSI] NCR53C9x: remove driver
[SCSI] remove m68k NCR53C9x based drivers
[SCSI] dec_esp: Remove driver
[SCSI] kernel-doc: fix scsi docbook
[SCSI] update my email address
[SCSI] add protocol definitions
[SCSI] sd: handle bad lba in sense information
[SCSI] qla2xxx: Update version number to 8.02.00-k8.
[SCSI] qla2xxx: Correct issue where incorrect init-fw mailbox command was used on non-NPIV capable ISPs.
...

+2515 -10055
+1 -1
Documentation/DocBook/scsi.tmpl
··· 12 12 <surname>Bottomley</surname> 13 13 <affiliation> 14 14 <address> 15 - <email>James.Bottomley@steeleye.com</email> 15 + <email>James.Bottomley@hansenpartnership.com</email> 16 16 </address> 17 17 </affiliation> 18 18 </author>
+41
Documentation/scsi/ChangeLog.arcmsr
··· 68 68 ** 2. modify the arcmsr_pci_slot_reset function 69 69 ** 3. modify the arcmsr_pci_ers_disconnect_forepart function 70 70 ** 4. modify the arcmsr_pci_ers_need_reset_forepart function 71 + ** 1.20.00.15 09/27/2007 Erich Chen & Nick Cheng 72 + ** 1. add arcmsr_enable_eoi_mode() on adapter Type B 73 + ** 2. add readl(reg->iop2drv_doorbell_reg) in arcmsr_handle_hbb_isr() 74 + ** in case of the doorbell interrupt clearance is cached 75 + ** 1.20.00.15 10/01/2007 Erich Chen & Nick Cheng 76 + ** 1. modify acb->devstate[i][j] 77 + ** as ARECA_RAID_GOOD instead of 78 + ** ARECA_RAID_GONE in arcmsr_alloc_ccb_pool 79 + ** 1.20.00.15 11/06/2007 Erich Chen & Nick Cheng 80 + ** 1. add conditional declaration for 81 + ** arcmsr_pci_error_detected() and 82 + ** arcmsr_pci_slot_reset 83 + ** 1.20.00.15 11/23/2007 Erich Chen & Nick Cheng 84 + ** 1.check if the sg list member number 85 + ** exceeds arcmsr default limit in arcmsr_build_ccb() 86 + ** 2.change the returned value type of arcmsr_build_ccb() 87 + ** from "void" to "int" 88 + ** 3.add the conditional check if arcmsr_build_ccb() 89 + ** returns FAILED 90 + ** 1.20.00.15 12/04/2007 Erich Chen & Nick Cheng 91 + ** 1. modify arcmsr_drain_donequeue() to ignore unknown 92 + ** command and let kernel process command timeout. 93 + ** This could handle IO request violating max. segments 94 + ** while Linux XFS over DM-CRYPT. 95 + ** Thanks to Milan Broz's comments <mbroz@redhat.com> 96 + ** 1.20.00.15 12/24/2007 Erich Chen & Nick Cheng 97 + ** 1.fix the portability problems 98 + ** 2.fix type B where we should _not_ iounmap() acb->pmu; 99 + ** it's not ioremapped. 100 + ** 3.add return -ENOMEM if ioremap() fails 101 + ** 4.transfer IS_SG64_ADDR w/ cpu_to_le32() 102 + ** in arcmsr_build_ccb 103 + ** 5. modify acb->devstate[i][j] as ARECA_RAID_GONE instead of 104 + ** ARECA_RAID_GOOD in arcmsr_alloc_ccb_pool() 105 + ** 6.fix arcmsr_cdb->Context as (unsigned long)arcmsr_cdb 106 + ** 7.add the checking state of 107 + ** (outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT) == 0 108 + ** in arcmsr_handle_hba_isr 109 + ** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer() 110 + ** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool() 111 + ** 10.fix the arcmsr_polling_hbb_ccbdone() 71 112 **************************************************************************
+1 -1
Documentation/scsi/scsi_mid_low_api.txt
··· 1407 1407 ======= 1408 1408 The following people have contributed to this document: 1409 1409 Mike Anderson <andmike at us dot ibm dot com> 1410 - James Bottomley <James dot Bottomley at steeleye dot com> 1410 + James Bottomley <James dot Bottomley at hansenpartnership dot com> 1411 1411 Patrick Mansfield <patmans at us dot ibm dot com> 1412 1412 Christoph Hellwig <hch at infradead dot org> 1413 1413 Doug Ledford <dledford at redhat dot com>
+9
drivers/misc/Kconfig
··· 285 285 286 286 If unsure, say N. 287 287 288 + config ENCLOSURE_SERVICES 289 + tristate "Enclosure Services" 290 + default n 291 + help 292 + Provides support for intelligent enclosures (bays which 293 + contain storage devices). You also need either a host 294 + driver (SCSI/ATA) which supports enclosures 295 + or a SCSI enclosure device (SES) to use these services. 296 + 288 297 endif # MISC_DEVICES
+1
drivers/misc/Makefile
··· 20 20 obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o 21 21 obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 22 22 obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 23 + obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+484
drivers/misc/enclosure.c
··· 1 + /* 2 + * Enclosure Services 3 + * 4 + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> 5 + * 6 + **----------------------------------------------------------------------------- 7 + ** 8 + ** This program is free software; you can redistribute it and/or 9 + ** modify it under the terms of the GNU General Public License 10 + ** version 2 as published by the Free Software Foundation. 11 + ** 12 + ** This program is distributed in the hope that it will be useful, 13 + ** but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + ** GNU General Public License for more details. 16 + ** 17 + ** You should have received a copy of the GNU General Public License 18 + ** along with this program; if not, write to the Free Software 19 + ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 + ** 21 + **----------------------------------------------------------------------------- 22 + */ 23 + #include <linux/device.h> 24 + #include <linux/enclosure.h> 25 + #include <linux/err.h> 26 + #include <linux/list.h> 27 + #include <linux/kernel.h> 28 + #include <linux/module.h> 29 + #include <linux/mutex.h> 30 + 31 + static LIST_HEAD(container_list); 32 + static DEFINE_MUTEX(container_list_lock); 33 + static struct class enclosure_class; 34 + static struct class enclosure_component_class; 35 + 36 + /** 37 + * enclosure_find - find an enclosure given a device 38 + * @dev: the device to find for 39 + * 40 + * Looks through the list of registered enclosures to see 41 + * if it can find a match for a device. Returns NULL if no 42 + * enclosure is found. Obtains a reference to the enclosure class 43 + * device which must be released with class_device_put(). 44 + */ 45 + struct enclosure_device *enclosure_find(struct device *dev) 46 + { 47 + struct enclosure_device *edev = NULL; 48 + 49 + mutex_lock(&container_list_lock); 50 + list_for_each_entry(edev, &container_list, node) { 51 + if (edev->cdev.dev == dev) { 52 + class_device_get(&edev->cdev); 53 + mutex_unlock(&container_list_lock); 54 + return edev; 55 + } 56 + } 57 + mutex_unlock(&container_list_lock); 58 + 59 + return NULL; 60 + } 61 + EXPORT_SYMBOL_GPL(enclosure_find); 62 + 63 + /** 64 + * enclosure_for_each_device - calls a function for each enclosure 65 + * @fn: the function to call 66 + * @data: the data to pass to each call 67 + * 68 + * Loops over all the enclosures calling the function. 69 + * 70 + * Note, this function uses a mutex which will be held across calls to 71 + * @fn, so it must have non atomic context, and @fn may (although it 72 + * should not) sleep or otherwise cause the mutex to be held for 73 + * indefinite periods 74 + */ 75 + int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), 76 + void *data) 77 + { 78 + int error = 0; 79 + struct enclosure_device *edev; 80 + 81 + mutex_lock(&container_list_lock); 82 + list_for_each_entry(edev, &container_list, node) { 83 + error = fn(edev, data); 84 + if (error) 85 + break; 86 + } 87 + mutex_unlock(&container_list_lock); 88 + 89 + return error; 90 + } 91 + EXPORT_SYMBOL_GPL(enclosure_for_each_device); 92 + 93 + /** 94 + * enclosure_register - register device as an enclosure 95 + * 96 + * @dev: device containing the enclosure 97 + * @components: number of components in the enclosure 98 + * 99 + * This sets up the device for being an enclosure. Note that @dev does 100 + * not have to be a dedicated enclosure device. It may be some other type 101 + * of device that additionally responds to enclosure services 102 + */ 103 + struct enclosure_device * 104 + enclosure_register(struct device *dev, const char *name, int components, 105 + struct enclosure_component_callbacks *cb) 106 + { 107 + struct enclosure_device *edev = 108 + kzalloc(sizeof(struct enclosure_device) + 109 + sizeof(struct enclosure_component)*components, 110 + GFP_KERNEL); 111 + int err, i; 112 + 113 + BUG_ON(!cb); 114 + 115 + if (!edev) 116 + return ERR_PTR(-ENOMEM); 117 + 118 + edev->components = components; 119 + 120 + edev->cdev.class = &enclosure_class; 121 + edev->cdev.dev = get_device(dev); 122 + edev->cb = cb; 123 + snprintf(edev->cdev.class_id, BUS_ID_SIZE, "%s", name); 124 + err = class_device_register(&edev->cdev); 125 + if (err) 126 + goto err; 127 + 128 + for (i = 0; i < components; i++) 129 + edev->component[i].number = -1; 130 + 131 + mutex_lock(&container_list_lock); 132 + list_add_tail(&edev->node, &container_list); 133 + mutex_unlock(&container_list_lock); 134 + 135 + return edev; 136 + 137 + err: 138 + put_device(edev->cdev.dev); 139 + kfree(edev); 140 + return ERR_PTR(err); 141 + } 142 + EXPORT_SYMBOL_GPL(enclosure_register); 143 + 144 + static struct enclosure_component_callbacks enclosure_null_callbacks; 145 + 146 + /** 147 + * enclosure_unregister - remove an enclosure 148 + * 149 + * @edev: the registered enclosure to remove; 150 + */ 151 + void enclosure_unregister(struct enclosure_device *edev) 152 + { 153 + int i; 154 + 155 + mutex_lock(&container_list_lock); 156 + list_del(&edev->node); 157 + mutex_unlock(&container_list_lock); 158 + 159 + for (i = 0; i < edev->components; i++) 160 + if (edev->component[i].number != -1) 161 + class_device_unregister(&edev->component[i].cdev); 162 + 163 + /* prevent any callbacks into service user */ 164 + edev->cb = &enclosure_null_callbacks; 165 + class_device_unregister(&edev->cdev); 166 + } 167 + EXPORT_SYMBOL_GPL(enclosure_unregister); 168 + 169 + static void enclosure_release(struct class_device *cdev) 170 + { 171 + struct enclosure_device *edev = to_enclosure_device(cdev); 172 + 173 + put_device(cdev->dev); 174 + kfree(edev); 175 + } 176 + 177 + static void enclosure_component_release(struct class_device *cdev) 178 + { 179 + if (cdev->dev) 180 + put_device(cdev->dev); 181 + class_device_put(cdev->parent); 182 + } 183 + 184 + /** 185 + * enclosure_component_register - add a particular component to an enclosure 186 + * @edev: the enclosure to add the component 187 + * @num: the device number 188 + * @type: the type of component being added 189 + * @name: an optional name to appear in sysfs (leave NULL if none) 190 + * 191 + * Registers the component. The name is optional for enclosures that 192 + * give their components a unique name. If not, leave the field NULL 193 + * and a name will be assigned. 194 + * 195 + * Returns a pointer to the enclosure component or an error. 196 + */ 197 + struct enclosure_component * 198 + enclosure_component_register(struct enclosure_device *edev, 199 + unsigned int number, 200 + enum enclosure_component_type type, 201 + const char *name) 202 + { 203 + struct enclosure_component *ecomp; 204 + struct class_device *cdev; 205 + int err; 206 + 207 + if (number >= edev->components) 208 + return ERR_PTR(-EINVAL); 209 + 210 + ecomp = &edev->component[number]; 211 + 212 + if (ecomp->number != -1) 213 + return ERR_PTR(-EINVAL); 214 + 215 + ecomp->type = type; 216 + ecomp->number = number; 217 + cdev = &ecomp->cdev; 218 + cdev->parent = class_device_get(&edev->cdev); 219 + cdev->class = &enclosure_component_class; 220 + if (name) 221 + snprintf(cdev->class_id, BUS_ID_SIZE, "%s", name); 222 + else 223 + snprintf(cdev->class_id, BUS_ID_SIZE, "%u", number); 224 + 225 + err = class_device_register(cdev); 226 + if (err) 227 + ERR_PTR(err); 228 + 229 + return ecomp; 230 + } 231 + EXPORT_SYMBOL_GPL(enclosure_component_register); 232 + 233 + /** 234 + * enclosure_add_device - add a device as being part of an enclosure 235 + * @edev: the enclosure device being added to. 236 + * @num: the number of the component 237 + * @dev: the device being added 238 + * 239 + * Declares a real device to reside in slot (or identifier) @num of an 240 + * enclosure. This will cause the relevant sysfs links to appear. 241 + * This function may also be used to change a device associated with 242 + * an enclosure without having to call enclosure_remove_device() in 243 + * between. 244 + * 245 + * Returns zero on success or an error. 246 + */ 247 + int enclosure_add_device(struct enclosure_device *edev, int component, 248 + struct device *dev) 249 + { 250 + struct class_device *cdev; 251 + 252 + if (!edev || component >= edev->components) 253 + return -EINVAL; 254 + 255 + cdev = &edev->component[component].cdev; 256 + 257 + class_device_del(cdev); 258 + if (cdev->dev) 259 + put_device(cdev->dev); 260 + cdev->dev = get_device(dev); 261 + return class_device_add(cdev); 262 + } 263 + EXPORT_SYMBOL_GPL(enclosure_add_device); 264 + 265 + /** 266 + * enclosure_remove_device - remove a device from an enclosure 267 + * @edev: the enclosure device 268 + * @num: the number of the component to remove 269 + * 270 + * Returns zero on success or an error. 271 + * 272 + */ 273 + int enclosure_remove_device(struct enclosure_device *edev, int component) 274 + { 275 + struct class_device *cdev; 276 + 277 + if (!edev || component >= edev->components) 278 + return -EINVAL; 279 + 280 + cdev = &edev->component[component].cdev; 281 + 282 + class_device_del(cdev); 283 + if (cdev->dev) 284 + put_device(cdev->dev); 285 + cdev->dev = NULL; 286 + return class_device_add(cdev); 287 + } 288 + EXPORT_SYMBOL_GPL(enclosure_remove_device); 289 + 290 + /* 291 + * sysfs pieces below 292 + */ 293 + 294 + static ssize_t enclosure_show_components(struct class_device *cdev, char *buf) 295 + { 296 + struct enclosure_device *edev = to_enclosure_device(cdev); 297 + 298 + return snprintf(buf, 40, "%d\n", edev->components); 299 + } 300 + 301 + static struct class_device_attribute enclosure_attrs[] = { 302 + __ATTR(components, S_IRUGO, enclosure_show_components, NULL), 303 + __ATTR_NULL 304 + }; 305 + 306 + static struct class enclosure_class = { 307 + .name = "enclosure", 308 + .owner = THIS_MODULE, 309 + .release = enclosure_release, 310 + .class_dev_attrs = enclosure_attrs, 311 + }; 312 + 313 + static const char *const enclosure_status [] = { 314 + [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported", 315 + [ENCLOSURE_STATUS_OK] = "OK", 316 + [ENCLOSURE_STATUS_CRITICAL] = "critical", 317 + [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical", 318 + [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable", 319 + [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", 320 + [ENCLOSURE_STATUS_UNKNOWN] = "unknown", 321 + [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", 322 + }; 323 + 324 + static const char *const enclosure_type [] = { 325 + [ENCLOSURE_COMPONENT_DEVICE] = "device", 326 + [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device", 327 + }; 328 + 329 + static ssize_t get_component_fault(struct class_device *cdev, char *buf) 330 + { 331 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 332 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 333 + 334 + if (edev->cb->get_fault) 335 + edev->cb->get_fault(edev, ecomp); 336 + return snprintf(buf, 40, "%d\n", ecomp->fault); 337 + } 338 + 339 + static ssize_t set_component_fault(struct class_device *cdev, const char *buf, 340 + size_t count) 341 + { 342 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 343 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 344 + int val = simple_strtoul(buf, NULL, 0); 345 + 346 + if (edev->cb->set_fault) 347 + edev->cb->set_fault(edev, ecomp, val); 348 + return count; 349 + } 350 + 351 + static ssize_t get_component_status(struct class_device *cdev, char *buf) 352 + { 353 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 354 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 355 + 356 + if (edev->cb->get_status) 357 + edev->cb->get_status(edev, ecomp); 358 + return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]); 359 + } 360 + 361 + static ssize_t set_component_status(struct class_device *cdev, const char *buf, 362 + size_t count) 363 + { 364 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 365 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 366 + int i; 367 + 368 + for (i = 0; enclosure_status[i]; i++) { 369 + if (strncmp(buf, enclosure_status[i], 370 + strlen(enclosure_status[i])) == 0 && 371 + (buf[strlen(enclosure_status[i])] == '\n' || 372 + buf[strlen(enclosure_status[i])] == '\0')) 373 + break; 374 + } 375 + 376 + if (enclosure_status[i] && edev->cb->set_status) { 377 + edev->cb->set_status(edev, ecomp, i); 378 + return count; 379 + } else 380 + return -EINVAL; 381 + } 382 + 383 + static ssize_t get_component_active(struct class_device *cdev, char *buf) 384 + { 385 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 386 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 387 + 388 + if (edev->cb->get_active) 389 + edev->cb->get_active(edev, ecomp); 390 + return snprintf(buf, 40, "%d\n", ecomp->active); 391 + } 392 + 393 + static ssize_t set_component_active(struct class_device *cdev, const char *buf, 394 + size_t count) 395 + { 396 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 397 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 398 + int val = simple_strtoul(buf, NULL, 0); 399 + 400 + if (edev->cb->set_active) 401 + edev->cb->set_active(edev, ecomp, val); 402 + return count; 403 + } 404 + 405 + static ssize_t get_component_locate(struct class_device *cdev, char *buf) 406 + { 407 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 408 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 409 + 410 + if (edev->cb->get_locate) 411 + edev->cb->get_locate(edev, ecomp); 412 + return snprintf(buf, 40, "%d\n", ecomp->locate); 413 + } 414 + 415 + static ssize_t set_component_locate(struct class_device *cdev, const char *buf, 416 + size_t count) 417 + { 418 + struct enclosure_device *edev = to_enclosure_device(cdev->parent); 419 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 420 + int val = simple_strtoul(buf, NULL, 0); 421 + 422 + if (edev->cb->set_locate) 423 + edev->cb->set_locate(edev, ecomp, val); 424 + return count; 425 + } 426 + 427 + static ssize_t get_component_type(struct class_device *cdev, char *buf) 428 + { 429 + struct enclosure_component *ecomp = to_enclosure_component(cdev); 430 + 431 + return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); 432 + } 433 + 434 + 435 + static struct class_device_attribute enclosure_component_attrs[] = { 436 + __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, 437 + set_component_fault), 438 + __ATTR(status, S_IRUGO | S_IWUSR, get_component_status, 439 + set_component_status), 440 + __ATTR(active, S_IRUGO | S_IWUSR, get_component_active, 441 + set_component_active), 442 + __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, 443 + set_component_locate), 444 + __ATTR(type, S_IRUGO, get_component_type, NULL), 445 + __ATTR_NULL 446 + }; 447 + 448 + static struct class enclosure_component_class = { 449 + .name = "enclosure_component", 450 + .owner = THIS_MODULE, 451 + .class_dev_attrs = enclosure_component_attrs, 452 + .release = enclosure_component_release, 453 + }; 454 + 455 + static int __init enclosure_init(void) 456 + { 457 + int err; 458 + 459 + err = class_register(&enclosure_class); 460 + if (err) 461 + return err; 462 + err = class_register(&enclosure_component_class); 463 + if (err) 464 + goto err_out; 465 + 466 + return 0; 467 + err_out: 468 + class_unregister(&enclosure_class); 469 + 470 + return err; 471 + } 472 + 473 + static void __exit enclosure_exit(void) 474 + { 475 + class_unregister(&enclosure_component_class); 476 + class_unregister(&enclosure_class); 477 + } 478 + 479 + module_init(enclosure_init); 480 + module_exit(enclosure_exit); 481 + 482 + MODULE_AUTHOR("James Bottomley"); 483 + MODULE_DESCRIPTION("Enclosure Services"); 484 + MODULE_LICENSE("GPL v2");
+10 -83
drivers/scsi/Kconfig
··· 179 179 say M here and read <file:Documentation/kbuild/modules.txt> and 180 180 <file:Documentation/scsi/scsi.txt>. The module will be called ch.o. 181 181 If unsure, say N. 182 - 182 + 183 + config SCSI_ENCLOSURE 184 + tristate "SCSI Enclosure Support" 185 + depends on SCSI && ENCLOSURE_SERVICES 186 + help 187 + Enclosures are devices sitting on or in SCSI backplanes that 188 + manage devices. If you have a disk cage, the chances are that 189 + it has an enclosure device. Selecting this option will just allow 190 + certain enclosure conditions to be reported and is not required. 183 191 184 192 comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" 185 193 depends on SCSI ··· 357 349 help 358 350 If you have a Western Digital WD93 SCSI controller on 359 351 an SGI MIPS system, say Y. Otherwise, say N. 360 - 361 - config SCSI_DECNCR 362 - tristate "DEC NCR53C94 Scsi Driver" 363 - depends on MACH_DECSTATION && SCSI && TC 364 - help 365 - Say Y here to support the NCR53C94 SCSI controller chips on IOASIC 366 - based TURBOchannel DECstations and TURBOchannel PMAZ-A cards. 367 - 368 - config SCSI_DECSII 369 - tristate "DEC SII Scsi Driver" 370 - depends on MACH_DECSTATION && SCSI && 32BIT 371 352 372 353 config BLK_DEV_3W_XXXX_RAID 373 354 tristate "3ware 5/6/7/8xxx ATA-RAID support" ··· 1260 1263 not allow targets to disconnect is not reasonable if there is more 1261 1264 than 1 device on a SCSI bus. The normal answer therefore is N. 1262 1265 1263 - config SCSI_MCA_53C9X 1264 - tristate "NCR MCA 53C9x SCSI support" 1265 - depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP 1266 - help 1267 - Some MicroChannel machines, notably the NCR 35xx line, use a SCSI 1268 - controller based on the NCR 53C94. This driver will allow use of 1269 - the controller on the 3550, and very possibly others. 1270 - 1271 - To compile this driver as a module, choose M here: the 1272 - module will be called mca_53c9x. 1273 - 1274 1266 config SCSI_PAS16 1275 1267 tristate "PAS16 SCSI support" 1276 1268 depends on ISA && SCSI ··· 1586 1600 To compile this driver as a module, choose M here: the 1587 1601 module will be called gvp11. 1588 1602 1589 - config CYBERSTORM_SCSI 1590 - tristate "CyberStorm SCSI support" 1591 - depends on ZORRO && SCSI 1592 - help 1593 - If you have an Amiga with an original (MkI) Phase5 Cyberstorm 1594 - accelerator board and the optional Cyberstorm SCSI controller, 1595 - answer Y. Otherwise, say N. 1596 - 1597 - config CYBERSTORMII_SCSI 1598 - tristate "CyberStorm Mk II SCSI support" 1599 - depends on ZORRO && SCSI 1600 - help 1601 - If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board 1602 - and the optional Cyberstorm SCSI controller, say Y. Otherwise, 1603 - answer N. 1604 - 1605 - config BLZ2060_SCSI 1606 - tristate "Blizzard 2060 SCSI support" 1607 - depends on ZORRO && SCSI 1608 - help 1609 - If you have an Amiga with a Phase5 Blizzard 2060 accelerator board 1610 - and want to use the onboard SCSI controller, say Y. Otherwise, 1611 - answer N. 1612 - 1613 - config BLZ1230_SCSI 1614 - tristate "Blizzard 1230IV/1260 SCSI support" 1615 - depends on ZORRO && SCSI 1616 - help 1617 - If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard 1618 - 1260 accelerator, and the optional SCSI module, say Y. Otherwise, 1619 - say N. 1620 - 1621 - config FASTLANE_SCSI 1622 - tristate "Fastlane SCSI support" 1623 - depends on ZORRO && SCSI 1624 - help 1625 - If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use 1626 - one in the near future, say Y to this question. Otherwise, say N. 1627 - 1628 1603 config SCSI_A4000T 1629 1604 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)" 1630 1605 depends on AMIGA && SCSI && EXPERIMENTAL ··· 1612 1665 - the SCSI controller on the Phase5 Blizzard PowerUP 603e+ 1613 1666 accelerator card for the Amiga 1200, 1614 1667 - the SCSI controller on the GVP Turbo 040/060 accelerator. 1615 - 1616 - config OKTAGON_SCSI 1617 - tristate "BSC Oktagon SCSI support (EXPERIMENTAL)" 1618 - depends on ZORRO && SCSI && EXPERIMENTAL 1619 - help 1620 - If you have the BSC Oktagon SCSI disk controller for the Amiga, say 1621 - Y to this question. If you're in doubt about whether you have one, 1622 - see the picture at 1623 - <http://amiga.resource.cx/exp/search.pl?product=oktagon>. 1624 1668 1625 1669 config ATARI_SCSI 1626 1670 tristate "Atari native SCSI support" ··· 1665 1727 SCSI-HOWTO, available from 1666 1728 <http://www.tldp.org/docs.html#howto>. 1667 1729 1668 - config SCSI_MAC_ESP 1669 - tristate "Macintosh NCR53c9[46] SCSI" 1670 - depends on MAC && SCSI 1671 - help 1672 - This is the NCR 53c9x SCSI controller found on most of the 68040 1673 - based Macintoshes. If you have one of these say Y and read the 1674 - SCSI-HOWTO, available from 1675 - <http://www.tldp.org/docs.html#howto>. 1676 - 1677 - To compile this driver as a module, choose M here: the 1678 - module will be called mac_esp. 1679 - 1680 1730 config MVME147_SCSI 1681 1731 bool "WD33C93 SCSI driver for MVME147" 1682 1732 depends on MVME147 && SCSI=y ··· 1705 1779 config SUN3X_ESP 1706 1780 bool "Sun3x ESP SCSI" 1707 1781 depends on SUN3X && SCSI=y 1782 + select SCSI_SPI_ATTRS 1708 1783 help 1709 1784 The ESP was an on-board SCSI controller used on Sun 3/80 1710 1785 machines. Say Y here to compile in support for it.
+2 -10
drivers/scsi/Makefile
··· 44 44 obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 45 45 obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o 46 46 obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o 47 - obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o 48 - obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o 49 - obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o 50 - obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o 51 - obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o 52 - obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o 53 47 obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 54 48 obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 55 - obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o 56 49 obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 57 50 obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o 58 51 obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o ··· 88 95 obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 89 96 obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o 90 97 obj-$(CONFIG_SCSI_7000FASST) += wd7000.o 91 - obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o 92 98 obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o 93 99 obj-$(CONFIG_SCSI_EATA) += eata.o 94 100 obj-$(CONFIG_SCSI_DC395x) += dc395x.o ··· 104 112 obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o 105 113 obj-$(CONFIG_SCSI_MESH) += mesh.o 106 114 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 107 - obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o 108 115 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 109 116 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 110 117 obj-$(CONFIG_SCSI_PPA) += ppa.o 111 118 obj-$(CONFIG_SCSI_IMM) += imm.o 112 119 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o 113 - obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 120 + obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o 114 121 obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 115 122 obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o 116 123 obj-$(CONFIG_SCSI_NSP32) += nsp32.o ··· 129 138 obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o 130 139 obj-$(CONFIG_CHR_DEV_SG) += sg.o 131 140 obj-$(CONFIG_CHR_DEV_SCH) += ch.o 141 + obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o 132 142 133 143 # This goes last, so that "real" scsi devices probe earlier 134 144 obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
-3654
drivers/scsi/NCR53C9x.c
··· 1 - /* NCR53C9x.c: Generic SCSI driver code for NCR53C9x chips. 2 - * 3 - * Originally esp.c : EnhancedScsiProcessor Sun SCSI driver code. 4 - * 5 - * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu) 6 - * 7 - * Most DMA dependencies put in driver specific files by 8 - * Jesper Skov (jskov@cygnus.co.uk) 9 - * 10 - * Set up to use esp_read/esp_write (preprocessor macros in NCR53c9x.h) by 11 - * Tymm Twillman (tymm@coe.missouri.edu) 12 - */ 13 - 14 - /* TODO: 15 - * 16 - * 1) Maybe disable parity checking in config register one for SCSI1 17 - * targets. (Gilmore says parity error on the SBus can lock up 18 - * old sun4c's) 19 - * 2) Add support for DMA2 pipelining. 20 - * 3) Add tagged queueing. 21 - * 4) Maybe change use of "esp" to something more "NCR"'ish. 22 - */ 23 - 24 - #include <linux/module.h> 25 - 26 - #include <linux/kernel.h> 27 - #include <linux/delay.h> 28 - #include <linux/types.h> 29 - #include <linux/string.h> 30 - #include <linux/slab.h> 31 - #include <linux/blkdev.h> 32 - #include <linux/interrupt.h> 33 - #include <linux/proc_fs.h> 34 - #include <linux/stat.h> 35 - #include <linux/init.h> 36 - 37 - #include "scsi.h" 38 - #include <scsi/scsi_host.h> 39 - #include "NCR53C9x.h" 40 - 41 - #include <asm/system.h> 42 - #include <asm/ptrace.h> 43 - #include <asm/pgtable.h> 44 - #include <asm/io.h> 45 - #include <asm/irq.h> 46 - 47 - /* Command phase enumeration. */ 48 - enum { 49 - not_issued = 0x00, /* Still in the issue_SC queue. */ 50 - 51 - /* Various forms of selecting a target. */ 52 - #define in_slct_mask 0x10 53 - in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */ 54 - in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */ 55 - in_slct_msg = 0x12, /* select, then send a message */ 56 - in_slct_tag = 0x13, /* select and send tagged queue msg */ 57 - in_slct_sneg = 0x14, /* select and acquire sync capabilities */ 58 - 59 - /* Any post selection activity. */ 60 - #define in_phases_mask 0x20 61 - in_datain = 0x20, /* Data is transferring from the bus */ 62 - in_dataout = 0x21, /* Data is transferring to the bus */ 63 - in_data_done = 0x22, /* Last DMA data operation done (maybe) */ 64 - in_msgin = 0x23, /* Eating message from target */ 65 - in_msgincont = 0x24, /* Eating more msg bytes from target */ 66 - in_msgindone = 0x25, /* Decide what to do with what we got */ 67 - in_msgout = 0x26, /* Sending message to target */ 68 - in_msgoutdone = 0x27, /* Done sending msg out */ 69 - in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */ 70 - in_cmdend = 0x29, /* Done sending slow cmd */ 71 - in_status = 0x2a, /* Was in status phase, finishing cmd */ 72 - in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */ 73 - in_the_dark = 0x2c, /* Don't know what bus phase we are in */ 74 - 75 - /* Special states, ie. not normal bus transitions... */ 76 - #define in_spec_mask 0x80 77 - in_abortone = 0x80, /* Aborting one command currently */ 78 - in_abortall = 0x81, /* Blowing away all commands we have */ 79 - in_resetdev = 0x82, /* SCSI target reset in progress */ 80 - in_resetbus = 0x83, /* SCSI bus reset in progress */ 81 - in_tgterror = 0x84, /* Target did something stupid */ 82 - }; 83 - 84 - enum { 85 - /* Zero has special meaning, see skipahead[12]. */ 86 - /*0*/ do_never, 87 - 88 - /*1*/ do_phase_determine, 89 - /*2*/ do_reset_bus, 90 - /*3*/ do_reset_complete, 91 - /*4*/ do_work_bus, 92 - /*5*/ do_intr_end 93 - }; 94 - 95 - /* The master ring of all esp hosts we are managing in this driver. */ 96 - static struct NCR_ESP *espchain; 97 - int nesps = 0, esps_in_use = 0, esps_running = 0; 98 - EXPORT_SYMBOL(nesps); 99 - EXPORT_SYMBOL(esps_running); 100 - 101 - irqreturn_t esp_intr(int irq, void *dev_id); 102 - 103 - /* Debugging routines */ 104 - static struct esp_cmdstrings { 105 - unchar cmdchar; 106 - char *text; 107 - } esp_cmd_strings[] = { 108 - /* Miscellaneous */ 109 - { ESP_CMD_NULL, "ESP_NOP", }, 110 - { ESP_CMD_FLUSH, "FIFO_FLUSH", }, 111 - { ESP_CMD_RC, "RSTESP", }, 112 - { ESP_CMD_RS, "RSTSCSI", }, 113 - /* Disconnected State Group */ 114 - { ESP_CMD_RSEL, "RESLCTSEQ", }, 115 - { ESP_CMD_SEL, "SLCTNATN", }, 116 - { ESP_CMD_SELA, "SLCTATN", }, 117 - { ESP_CMD_SELAS, "SLCTATNSTOP", }, 118 - { ESP_CMD_ESEL, "ENSLCTRESEL", }, 119 - { ESP_CMD_DSEL, "DISSELRESEL", }, 120 - { ESP_CMD_SA3, "SLCTATN3", }, 121 - { ESP_CMD_RSEL3, "RESLCTSEQ", }, 122 - /* Target State Group */ 123 - { ESP_CMD_SMSG, "SNDMSG", }, 124 - { ESP_CMD_SSTAT, "SNDSTATUS", }, 125 - { ESP_CMD_SDATA, "SNDDATA", }, 126 - { ESP_CMD_DSEQ, "DISCSEQ", }, 127 - { ESP_CMD_TSEQ, "TERMSEQ", }, 128 - { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", }, 129 - { ESP_CMD_DCNCT, "DISC", }, 130 - { ESP_CMD_RMSG, "RCVMSG", }, 131 - { ESP_CMD_RCMD, "RCVCMD", }, 132 - { ESP_CMD_RDATA, "RCVDATA", }, 133 - { ESP_CMD_RCSEQ, "RCVCMDSEQ", }, 134 - /* Initiator State Group */ 135 - { ESP_CMD_TI, "TRANSINFO", }, 136 - { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", }, 137 - { ESP_CMD_MOK, "MSGACCEPTED", }, 138 - { ESP_CMD_TPAD, "TPAD", }, 139 - { ESP_CMD_SATN, "SATN", }, 140 - { ESP_CMD_RATN, "RATN", }, 141 - }; 142 - #define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings))) 143 - 144 - /* Print textual representation of an ESP command */ 145 - static inline void esp_print_cmd(unchar espcmd) 146 - { 147 - unchar dma_bit = espcmd & ESP_CMD_DMA; 148 - int i; 149 - 150 - espcmd &= ~dma_bit; 151 - for(i=0; i<NUM_ESP_COMMANDS; i++) 152 - if(esp_cmd_strings[i].cmdchar == espcmd) 153 - break; 154 - if(i==NUM_ESP_COMMANDS) 155 - printk("ESP_Unknown"); 156 - else 157 - printk("%s%s", esp_cmd_strings[i].text, 158 - ((dma_bit) ? "+DMA" : "")); 159 - } 160 - 161 - /* Print the status register's value */ 162 - static inline void esp_print_statreg(unchar statreg) 163 - { 164 - unchar phase; 165 - 166 - printk("STATUS<"); 167 - phase = statreg & ESP_STAT_PMASK; 168 - printk("%s,", (phase == ESP_DOP ? "DATA-OUT" : 169 - (phase == ESP_DIP ? "DATA-IN" : 170 - (phase == ESP_CMDP ? "COMMAND" : 171 - (phase == ESP_STATP ? "STATUS" : 172 - (phase == ESP_MOP ? "MSG-OUT" : 173 - (phase == ESP_MIP ? "MSG_IN" : 174 - "unknown"))))))); 175 - if(statreg & ESP_STAT_TDONE) 176 - printk("TRANS_DONE,"); 177 - if(statreg & ESP_STAT_TCNT) 178 - printk("TCOUNT_ZERO,"); 179 - if(statreg & ESP_STAT_PERR) 180 - printk("P_ERROR,"); 181 - if(statreg & ESP_STAT_SPAM) 182 - printk("SPAM,"); 183 - if(statreg & ESP_STAT_INTR) 184 - printk("IRQ,"); 185 - printk(">"); 186 - } 187 - 188 - /* Print the interrupt register's value */ 189 - static inline void esp_print_ireg(unchar intreg) 190 - { 191 - printk("INTREG< "); 192 - if(intreg & ESP_INTR_S) 193 - printk("SLCT_NATN "); 194 - if(intreg & ESP_INTR_SATN) 195 - printk("SLCT_ATN "); 196 - if(intreg & ESP_INTR_RSEL) 197 - printk("RSLCT "); 198 - if(intreg & ESP_INTR_FDONE) 199 - printk("FDONE "); 200 - if(intreg & ESP_INTR_BSERV) 201 - printk("BSERV "); 202 - if(intreg & ESP_INTR_DC) 203 - printk("DISCNCT "); 204 - if(intreg & ESP_INTR_IC) 205 - printk("ILL_CMD "); 206 - if(intreg & ESP_INTR_SR) 207 - printk("SCSI_BUS_RESET "); 208 - printk(">"); 209 - } 210 - 211 - /* Print the sequence step registers contents */ 212 - static inline void esp_print_seqreg(unchar stepreg) 213 - { 214 - stepreg &= ESP_STEP_VBITS; 215 - printk("STEP<%s>", 216 - (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" : 217 - (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" : 218 - (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" : 219 - (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" : 220 - (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" : 221 - "UNKNOWN")))))); 222 - } 223 - 224 - static char *phase_string(int phase) 225 - { 226 - switch(phase) { 227 - case not_issued: 228 - return "UNISSUED"; 229 - case in_slct_norm: 230 - return "SLCTNORM"; 231 - case in_slct_stop: 232 - return "SLCTSTOP"; 233 - case in_slct_msg: 234 - return "SLCTMSG"; 235 - case in_slct_tag: 236 - return "SLCTTAG"; 237 - case in_slct_sneg: 238 - return "SLCTSNEG"; 239 - case in_datain: 240 - return "DATAIN"; 241 - case in_dataout: 242 - return "DATAOUT"; 243 - case in_data_done: 244 - return "DATADONE"; 245 - case in_msgin: 246 - return "MSGIN"; 247 - case in_msgincont: 248 - return "MSGINCONT"; 249 - case in_msgindone: 250 - return "MSGINDONE"; 251 - case in_msgout: 252 - return "MSGOUT"; 253 - case in_msgoutdone: 254 - return "MSGOUTDONE"; 255 - case in_cmdbegin: 256 - return "CMDBEGIN"; 257 - case in_cmdend: 258 - return "CMDEND"; 259 - case in_status: 260 - return "STATUS"; 261 - case in_freeing: 262 - return "FREEING"; 263 - case in_the_dark: 264 - return "CLUELESS"; 265 - case in_abortone: 266 - return "ABORTONE"; 267 - case in_abortall: 268 - return "ABORTALL"; 269 - case in_resetdev: 270 - return "RESETDEV"; 271 - case in_resetbus: 272 - return "RESETBUS"; 273 - case in_tgterror: 274 - return "TGTERROR"; 275 - default: 276 - return "UNKNOWN"; 277 - }; 278 - } 279 - 280 - #ifdef DEBUG_STATE_MACHINE 281 - static inline void esp_advance_phase(Scsi_Cmnd *s, int newphase) 282 - { 283 - ESPLOG(("<%s>", phase_string(newphase))); 284 - s->SCp.sent_command = s->SCp.phase; 285 - s->SCp.phase = newphase; 286 - } 287 - #else 288 - #define esp_advance_phase(__s, __newphase) \ 289 - (__s)->SCp.sent_command = (__s)->SCp.phase; \ 290 - (__s)->SCp.phase = (__newphase); 291 - #endif 292 - 293 - #ifdef DEBUG_ESP_CMDS 294 - static inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs, 295 - unchar cmd) 296 - { 297 - esp->espcmdlog[esp->espcmdent] = cmd; 298 - esp->espcmdent = (esp->espcmdent + 1) & 31; 299 - esp_write(eregs->esp_cmnd, cmd); 300 - } 301 - #else 302 - #define esp_cmd(__esp, __eregs, __cmd) esp_write((__eregs)->esp_cmnd, (__cmd)) 303 - #endif 304 - 305 - /* How we use the various Linux SCSI data structures for operation. 306 - * 307 - * struct scsi_cmnd: 308 - * 309 - * We keep track of the syncronous capabilities of a target 310 - * in the device member, using sync_min_period and 311 - * sync_max_offset. These are the values we directly write 312 - * into the ESP registers while running a command. If offset 313 - * is zero the ESP will use asynchronous transfers. 314 - * If the borken flag is set we assume we shouldn't even bother 315 - * trying to negotiate for synchronous transfer as this target 316 - * is really stupid. If we notice the target is dropping the 317 - * bus, and we have been allowing it to disconnect, we clear 318 - * the disconnect flag. 319 - */ 320 - 321 - /* Manipulation of the ESP command queues. Thanks to the aha152x driver 322 - * and its author, Juergen E. Fischer, for the methods used here. 323 - * Note that these are per-ESP queues, not global queues like 324 - * the aha152x driver uses. 325 - */ 326 - static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC) 327 - { 328 - Scsi_Cmnd *end; 329 - 330 - new_SC->host_scribble = (unsigned char *) NULL; 331 - if(!*SC) 332 - *SC = new_SC; 333 - else { 334 - for(end=*SC;end->host_scribble;end=(Scsi_Cmnd *)end->host_scribble) 335 - ; 336 - end->host_scribble = (unsigned char *) new_SC; 337 - } 338 - } 339 - 340 - static inline void prepend_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC) 341 - { 342 - new_SC->host_scribble = (unsigned char *) *SC; 343 - *SC = new_SC; 344 - } 345 - 346 - static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC) 347 - { 348 - Scsi_Cmnd *ptr; 349 - 350 - ptr = *SC; 351 - if(ptr) 352 - *SC = (Scsi_Cmnd *) (*SC)->host_scribble; 353 - return ptr; 354 - } 355 - 356 - static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun) 357 - { 358 - Scsi_Cmnd *ptr, *prev; 359 - 360 - for(ptr = *SC, prev = NULL; 361 - ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); 362 - prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble) 363 - ; 364 - if(ptr) { 365 - if(prev) 366 - prev->host_scribble=ptr->host_scribble; 367 - else 368 - *SC=(Scsi_Cmnd *)ptr->host_scribble; 369 - } 370 - return ptr; 371 - } 372 - 373 - /* Resetting various pieces of the ESP scsi driver chipset */ 374 - 375 - /* Reset the ESP chip, _not_ the SCSI bus. */ 376 - static void esp_reset_esp(struct NCR_ESP *esp, struct ESP_regs *eregs) 377 - { 378 - int family_code, version, i; 379 - volatile int trash; 380 - 381 - /* Now reset the ESP chip */ 382 - esp_cmd(esp, eregs, ESP_CMD_RC); 383 - esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA); 384 - if(esp->erev == fast) 385 - esp_write(eregs->esp_cfg2, ESP_CONFIG2_FENAB); 386 - esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA); 387 - 388 - /* This is the only point at which it is reliable to read 389 - * the ID-code for a fast ESP chip variant. 390 - */ 391 - esp->max_period = ((35 * esp->ccycle) / 1000); 392 - if(esp->erev == fast) { 393 - char *erev2string[] = { 394 - "Emulex FAS236", 395 - "Emulex FPESP100A", 396 - "fast", 397 - "QLogic FAS366", 398 - "Emulex FAS216", 399 - "Symbios Logic 53CF9x-2", 400 - "unknown!" 401 - }; 402 - 403 - version = esp_read(eregs->esp_uid); 404 - family_code = (version & 0xf8) >> 3; 405 - if(family_code == 0x02) { 406 - if ((version & 7) == 2) 407 - esp->erev = fas216; 408 - else 409 - esp->erev = fas236; 410 - } else if(family_code == 0x0a) 411 - esp->erev = fas366; /* Version is usually '5'. */ 412 - else if(family_code == 0x00) { 413 - if ((version & 7) == 2) 414 - esp->erev = fas100a; /* NCR53C9X */ 415 - else 416 - esp->erev = espunknown; 417 - } else if(family_code == 0x14) { 418 - if ((version & 7) == 2) 419 - esp->erev = fsc; 420 - else 421 - esp->erev = espunknown; 422 - } else if(family_code == 0x00) { 423 - if ((version & 7) == 2) 424 - esp->erev = fas100a; /* NCR53C9X */ 425 - else 426 - esp->erev = espunknown; 427 - } else 428 - esp->erev = espunknown; 429 - ESPLOG(("esp%d: FAST chip is %s (family=%d, version=%d)\n", 430 - esp->esp_id, erev2string[esp->erev - fas236], 431 - family_code, (version & 7))); 432 - 433 - esp->min_period = ((4 * esp->ccycle) / 1000); 434 - } else { 435 - esp->min_period = ((5 * esp->ccycle) / 1000); 436 - } 437 - 438 - /* Reload the configuration registers */ 439 - esp_write(eregs->esp_cfact, esp->cfact); 440 - esp->prev_stp = 0; 441 - esp_write(eregs->esp_stp, 0); 442 - esp->prev_soff = 0; 443 - esp_write(eregs->esp_soff, 0); 444 - esp_write(eregs->esp_timeo, esp->neg_defp); 445 - esp->max_period = (esp->max_period + 3)>>2; 446 - esp->min_period = (esp->min_period + 3)>>2; 447 - 448 - esp_write(eregs->esp_cfg1, esp->config1); 449 - switch(esp->erev) { 450 - case esp100: 451 - /* nothing to do */ 452 - break; 453 - case esp100a: 454 - esp_write(eregs->esp_cfg2, esp->config2); 455 - break; 456 - case esp236: 457 - /* Slow 236 */ 458 - esp_write(eregs->esp_cfg2, esp->config2); 459 - esp->prev_cfg3 = esp->config3[0]; 460 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 461 - break; 462 - case fas366: 463 - panic("esp: FAS366 support not present, please notify " 464 - "jongk@cs.utwente.nl"); 465 - break; 466 - case fas216: 467 - case fas236: 468 - case fsc: 469 - /* Fast ESP variants */ 470 - esp_write(eregs->esp_cfg2, esp->config2); 471 - for(i=0; i<8; i++) 472 - esp->config3[i] |= ESP_CONFIG3_FCLK; 473 - esp->prev_cfg3 = esp->config3[0]; 474 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 475 - if(esp->diff) 476 - esp->radelay = 0; 477 - else 478 - esp->radelay = 16; 479 - /* Different timeout constant for these chips */ 480 - esp->neg_defp = 481 - FSC_NEG_DEFP(esp->cfreq, 482 - (esp->cfact == ESP_CCF_F0 ? 483 - ESP_CCF_F7 + 1 : esp->cfact)); 484 - esp_write(eregs->esp_timeo, esp->neg_defp); 485 - /* Enable Active Negotiation if possible */ 486 - if((esp->erev == fsc) && !esp->diff) 487 - esp_write(eregs->esp_cfg4, ESP_CONFIG4_EAN); 488 - break; 489 - case fas100a: 490 - /* Fast 100a */ 491 - esp_write(eregs->esp_cfg2, esp->config2); 492 - for(i=0; i<8; i++) 493 - esp->config3[i] |= ESP_CONFIG3_FCLOCK; 494 - esp->prev_cfg3 = esp->config3[0]; 495 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 496 - esp->radelay = 32; 497 - break; 498 - default: 499 - panic("esp: what could it be... I wonder..."); 500 - break; 501 - }; 502 - 503 - /* Eat any bitrot in the chip */ 504 - trash = esp_read(eregs->esp_intrpt); 505 - udelay(100); 506 - } 507 - 508 - /* This places the ESP into a known state at boot time. */ 509 - void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs) 510 - { 511 - volatile unchar trash; 512 - 513 - /* Reset the DMA */ 514 - if(esp->dma_reset) 515 - esp->dma_reset(esp); 516 - 517 - /* Reset the ESP */ 518 - esp_reset_esp(esp, eregs); 519 - 520 - /* Reset the SCSI bus, but tell ESP not to generate an irq */ 521 - esp_write(eregs->esp_cfg1, (esp_read(eregs->esp_cfg1) | ESP_CONFIG1_SRRDISAB)); 522 - esp_cmd(esp, eregs, ESP_CMD_RS); 523 - udelay(400); 524 - esp_write(eregs->esp_cfg1, esp->config1); 525 - 526 - /* Eat any bitrot in the chip and we are done... */ 527 - trash = esp_read(eregs->esp_intrpt); 528 - } 529 - EXPORT_SYMBOL(esp_bootup_reset); 530 - 531 - /* Allocate structure and insert basic data such as SCSI chip frequency 532 - * data and a pointer to the device 533 - */ 534 - struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev, 535 - int hotplug) 536 - { 537 - struct NCR_ESP *esp, *elink; 538 - struct Scsi_Host *esp_host; 539 - 540 - if (hotplug) 541 - esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP)); 542 - else 543 - esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP)); 544 - if(!esp_host) 545 - panic("Cannot register ESP SCSI host"); 546 - esp = (struct NCR_ESP *) esp_host->hostdata; 547 - if(!esp) 548 - panic("No esp in hostdata"); 549 - esp->ehost = esp_host; 550 - esp->edev = esp_dev; 551 - esp->esp_id = nesps++; 552 - 553 - /* Set bitshift value (only used on Amiga with multiple ESPs) */ 554 - esp->shift = 2; 555 - 556 - /* Put into the chain of esp chips detected */ 557 - if(espchain) { 558 - elink = espchain; 559 - while(elink->next) elink = elink->next; 560 - elink->next = esp; 561 - } else { 562 - espchain = esp; 563 - } 564 - esp->next = NULL; 565 - 566 - return esp; 567 - } 568 - 569 - void esp_deallocate(struct NCR_ESP *esp) 570 - { 571 - struct NCR_ESP *elink; 572 - 573 - if(espchain == esp) { 574 - espchain = NULL; 575 - } else { 576 - for(elink = espchain; elink && (elink->next != esp); elink = elink->next); 577 - if(elink) 578 - elink->next = esp->next; 579 - } 580 - nesps--; 581 - } 582 - 583 - /* Complete initialization of ESP structure and device 584 - * Caller must have initialized appropriate parts of the ESP structure 585 - * between the call to esp_allocate and this function. 586 - */ 587 - void esp_initialize(struct NCR_ESP *esp) 588 - { 589 - struct ESP_regs *eregs = esp->eregs; 590 - unsigned int fmhz; 591 - unchar ccf; 592 - int i; 593 - 594 - /* Check out the clock properties of the chip. */ 595 - 596 - /* This is getting messy but it has to be done 597 - * correctly or else you get weird behavior all 598 - * over the place. We are trying to basically 599 - * figure out three pieces of information. 600 - * 601 - * a) Clock Conversion Factor 602 - * 603 - * This is a representation of the input 604 - * crystal clock frequency going into the 605 - * ESP on this machine. Any operation whose 606 - * timing is longer than 400ns depends on this 607 - * value being correct. For example, you'll 608 - * get blips for arbitration/selection during 609 - * high load or with multiple targets if this 610 - * is not set correctly. 611 - * 612 - * b) Selection Time-Out 613 - * 614 - * The ESP isn't very bright and will arbitrate 615 - * for the bus and try to select a target 616 - * forever if you let it. This value tells 617 - * the ESP when it has taken too long to 618 - * negotiate and that it should interrupt 619 - * the CPU so we can see what happened. 620 - * The value is computed as follows (from 621 - * NCR/Symbios chip docs). 622 - * 623 - * (Time Out Period) * (Input Clock) 624 - * STO = ---------------------------------- 625 - * (8192) * (Clock Conversion Factor) 626 - * 627 - * You usually want the time out period to be 628 - * around 250ms, I think we'll set it a little 629 - * bit higher to account for fully loaded SCSI 630 - * bus's and slow devices that don't respond so 631 - * quickly to selection attempts. (yeah, I know 632 - * this is out of spec. but there is a lot of 633 - * buggy pieces of firmware out there so bite me) 634 - * 635 - * c) Imperical constants for synchronous offset 636 - * and transfer period register values 637 - * 638 - * This entails the smallest and largest sync 639 - * period we could ever handle on this ESP. 640 - */ 641 - 642 - fmhz = esp->cfreq; 643 - 644 - if(fmhz <= (5000000)) 645 - ccf = 0; 646 - else 647 - ccf = (((5000000 - 1) + (fmhz))/(5000000)); 648 - if(!ccf || ccf > 8) { 649 - /* If we can't find anything reasonable, 650 - * just assume 20MHZ. This is the clock 651 - * frequency of the older sun4c's where I've 652 - * been unable to find the clock-frequency 653 - * PROM property. All other machines provide 654 - * useful values it seems. 655 - */ 656 - ccf = ESP_CCF_F4; 657 - fmhz = (20000000); 658 - } 659 - if(ccf==(ESP_CCF_F7+1)) 660 - esp->cfact = ESP_CCF_F0; 661 - else if(ccf == ESP_CCF_NEVER) 662 - esp->cfact = ESP_CCF_F2; 663 - else 664 - esp->cfact = ccf; 665 - esp->cfreq = fmhz; 666 - esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); 667 - esp->ctick = ESP_TICK(ccf, esp->ccycle); 668 - esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); 669 - esp->sync_defp = SYNC_DEFP_SLOW; 670 - 671 - printk("SCSI ID %d Clk %dMHz CCF=%d TOut %d ", 672 - esp->scsi_id, (esp->cfreq / 1000000), 673 - ccf, (int) esp->neg_defp); 674 - 675 - /* Fill in ehost data */ 676 - esp->ehost->base = (unsigned long)eregs; 677 - esp->ehost->this_id = esp->scsi_id; 678 - esp->ehost->irq = esp->irq; 679 - 680 - /* SCSI id mask */ 681 - esp->scsi_id_mask = (1 << esp->scsi_id); 682 - 683 - /* Probe the revision of this esp */ 684 - esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); 685 - esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); 686 - esp_write(eregs->esp_cfg2, esp->config2); 687 - if((esp_read(eregs->esp_cfg2) & ~(ESP_CONFIG2_MAGIC)) != 688 - (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { 689 - printk("NCR53C90(esp100)\n"); 690 - esp->erev = esp100; 691 - } else { 692 - esp->config2 = 0; 693 - esp_write(eregs->esp_cfg2, 0); 694 - esp_write(eregs->esp_cfg3, 5); 695 - if(esp_read(eregs->esp_cfg3) != 5) { 696 - printk("NCR53C90A(esp100a)\n"); 697 - esp->erev = esp100a; 698 - } else { 699 - int target; 700 - 701 - for(target=0; target<8; target++) 702 - esp->config3[target] = 0; 703 - esp->prev_cfg3 = 0; 704 - esp_write(eregs->esp_cfg3, 0); 705 - if(ccf > ESP_CCF_F5) { 706 - printk("NCR53C9XF(espfast)\n"); 707 - esp->erev = fast; 708 - esp->sync_defp = SYNC_DEFP_FAST; 709 - } else { 710 - printk("NCR53C9x(esp236)\n"); 711 - esp->erev = esp236; 712 - } 713 - } 714 - } 715 - 716 - /* Initialize the command queues */ 717 - esp->current_SC = NULL; 718 - esp->disconnected_SC = NULL; 719 - esp->issue_SC = NULL; 720 - 721 - /* Clear the state machines. */ 722 - esp->targets_present = 0; 723 - esp->resetting_bus = 0; 724 - esp->snip = 0; 725 - 726 - init_waitqueue_head(&esp->reset_queue); 727 - 728 - esp->fas_premature_intr_workaround = 0; 729 - for(i = 0; i < 32; i++) 730 - esp->espcmdlog[i] = 0; 731 - esp->espcmdent = 0; 732 - for(i = 0; i < 16; i++) { 733 - esp->cur_msgout[i] = 0; 734 - esp->cur_msgin[i] = 0; 735 - } 736 - esp->prevmsgout = esp->prevmsgin = 0; 737 - esp->msgout_len = esp->msgin_len = 0; 738 - 739 - /* Clear the one behind caches to hold unmatchable values. */ 740 - esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff; 741 - 742 - /* Reset the thing before we try anything... */ 743 - esp_bootup_reset(esp, eregs); 744 - 745 - esps_in_use++; 746 - } 747 - 748 - /* The info function will return whatever useful 749 - * information the developer sees fit. If not provided, then 750 - * the name field will be used instead. 751 - */ 752 - const char *esp_info(struct Scsi_Host *host) 753 - { 754 - struct NCR_ESP *esp; 755 - 756 - esp = (struct NCR_ESP *) host->hostdata; 757 - switch(esp->erev) { 758 - case esp100: 759 - return "ESP100 (NCR53C90)"; 760 - case esp100a: 761 - return "ESP100A (NCR53C90A)"; 762 - case esp236: 763 - return "ESP236 (NCR53C9x)"; 764 - case fas216: 765 - return "Emulex FAS216"; 766 - case fas236: 767 - return "Emulex FAS236"; 768 - case fas366: 769 - return "QLogic FAS366"; 770 - case fas100a: 771 - return "FPESP100A"; 772 - case fsc: 773 - return "Symbios Logic 53CF9x-2"; 774 - default: 775 - panic("Bogon ESP revision"); 776 - }; 777 - } 778 - EXPORT_SYMBOL(esp_info); 779 - 780 - /* From Wolfgang Stanglmeier's NCR scsi driver. */ 781 - struct info_str 782 - { 783 - char *buffer; 784 - int length; 785 - int offset; 786 - int pos; 787 - }; 788 - 789 - static void copy_mem_info(struct info_str *info, char *data, int len) 790 - { 791 - if (info->pos + len > info->length) 792 - len = info->length - info->pos; 793 - 794 - if (info->pos + len < info->offset) { 795 - info->pos += len; 796 - return; 797 - } 798 - if (info->pos < info->offset) { 799 - data += (info->offset - info->pos); 800 - len -= (info->offset - info->pos); 801 - } 802 - 803 - if (len > 0) { 804 - memcpy(info->buffer + info->pos, data, len); 805 - info->pos += len; 806 - } 807 - } 808 - 809 - static int copy_info(struct info_str *info, char *fmt, ...) 810 - { 811 - va_list args; 812 - char buf[81]; 813 - int len; 814 - 815 - va_start(args, fmt); 816 - len = vsprintf(buf, fmt, args); 817 - va_end(args); 818 - 819 - copy_mem_info(info, buf, len); 820 - return len; 821 - } 822 - 823 - static int esp_host_info(struct NCR_ESP *esp, char *ptr, off_t offset, int len) 824 - { 825 - struct scsi_device *sdev; 826 - struct info_str info; 827 - int i; 828 - 829 - info.buffer = ptr; 830 - info.length = len; 831 - info.offset = offset; 832 - info.pos = 0; 833 - 834 - copy_info(&info, "ESP Host Adapter:\n"); 835 - copy_info(&info, "\tESP Model\t\t"); 836 - switch(esp->erev) { 837 - case esp100: 838 - copy_info(&info, "ESP100 (NCR53C90)\n"); 839 - break; 840 - case esp100a: 841 - copy_info(&info, "ESP100A (NCR53C90A)\n"); 842 - break; 843 - case esp236: 844 - copy_info(&info, "ESP236 (NCR53C9x)\n"); 845 - break; 846 - case fas216: 847 - copy_info(&info, "Emulex FAS216\n"); 848 - break; 849 - case fas236: 850 - copy_info(&info, "Emulex FAS236\n"); 851 - break; 852 - case fas100a: 853 - copy_info(&info, "FPESP100A\n"); 854 - break; 855 - case fast: 856 - copy_info(&info, "Generic FAST\n"); 857 - break; 858 - case fas366: 859 - copy_info(&info, "QLogic FAS366\n"); 860 - break; 861 - case fsc: 862 - copy_info(&info, "Symbios Logic 53C9x-2\n"); 863 - break; 864 - case espunknown: 865 - default: 866 - copy_info(&info, "Unknown!\n"); 867 - break; 868 - }; 869 - copy_info(&info, "\tLive Targets\t\t[ "); 870 - for(i = 0; i < 15; i++) { 871 - if(esp->targets_present & (1 << i)) 872 - copy_info(&info, "%d ", i); 873 - } 874 - copy_info(&info, "]\n\n"); 875 - 876 - /* Now describe the state of each existing target. */ 877 - copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\n"); 878 - 879 - shost_for_each_device(sdev, esp->ehost) { 880 - struct esp_device *esp_dev = sdev->hostdata; 881 - uint id = sdev->id; 882 - 883 - if (!(esp->targets_present & (1 << id))) 884 - continue; 885 - 886 - copy_info(&info, "%d\t\t", id); 887 - copy_info(&info, "%08lx\t", esp->config3[id]); 888 - copy_info(&info, "[%02lx,%02lx]\t\t\t", 889 - esp_dev->sync_max_offset, 890 - esp_dev->sync_min_period); 891 - copy_info(&info, "%s\n", esp_dev->disconnect ? "yes" : "no"); 892 - } 893 - 894 - return info.pos > info.offset? info.pos - info.offset : 0; 895 - } 896 - 897 - /* ESP proc filesystem code. */ 898 - int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, 899 - int inout) 900 - { 901 - struct NCR_ESP *esp = (struct NCR_ESP *)shost->hostdata; 902 - 903 - if(inout) 904 - return -EINVAL; /* not yet */ 905 - if(start) 906 - *start = buffer; 907 - return esp_host_info(esp, buffer, offset, length); 908 - } 909 - EXPORT_SYMBOL(esp_proc_info); 910 - 911 - static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) 912 - { 913 - if(sp->use_sg == 0) { 914 - sp->SCp.this_residual = sp->request_bufflen; 915 - sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; 916 - sp->SCp.buffers_residual = 0; 917 - if (esp->dma_mmu_get_scsi_one) 918 - esp->dma_mmu_get_scsi_one(esp, sp); 919 - else 920 - sp->SCp.ptr = 921 - (char *) virt_to_phys(sp->request_buffer); 922 - } else { 923 - sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; 924 - sp->SCp.buffers_residual = sp->use_sg - 1; 925 - sp->SCp.this_residual = sp->SCp.buffer->length; 926 - if (esp->dma_mmu_get_scsi_sgl) 927 - esp->dma_mmu_get_scsi_sgl(esp, sp); 928 - else 929 - sp->SCp.ptr = 930 - (char *) virt_to_phys(sg_virt(sp->SCp.buffer)); 931 - } 932 - } 933 - 934 - static void esp_release_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp) 935 - { 936 - if(sp->use_sg == 0) { 937 - if (esp->dma_mmu_release_scsi_one) 938 - esp->dma_mmu_release_scsi_one(esp, sp); 939 - } else { 940 - if (esp->dma_mmu_release_scsi_sgl) 941 - esp->dma_mmu_release_scsi_sgl(esp, sp); 942 - } 943 - } 944 - 945 - static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp) 946 - { 947 - struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)]; 948 - 949 - sp->SCp.ptr = ep->saved_ptr; 950 - sp->SCp.buffer = ep->saved_buffer; 951 - sp->SCp.this_residual = ep->saved_this_residual; 952 - sp->SCp.buffers_residual = ep->saved_buffers_residual; 953 - } 954 - 955 - static void esp_save_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp) 956 - { 957 - struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)]; 958 - 959 - ep->saved_ptr = sp->SCp.ptr; 960 - ep->saved_buffer = sp->SCp.buffer; 961 - ep->saved_this_residual = sp->SCp.this_residual; 962 - ep->saved_buffers_residual = sp->SCp.buffers_residual; 963 - } 964 - 965 - /* Some rules: 966 - * 967 - * 1) Never ever panic while something is live on the bus. 968 - * If there is to be any chance of syncing the disks this 969 - * rule is to be obeyed. 970 - * 971 - * 2) Any target that causes a foul condition will no longer 972 - * have synchronous transfers done to it, no questions 973 - * asked. 974 - * 975 - * 3) Keep register accesses to a minimum. Think about some 976 - * day when we have Xbus machines this is running on and 977 - * the ESP chip is on the other end of the machine on a 978 - * different board from the cpu where this is running. 979 - */ 980 - 981 - /* Fire off a command. We assume the bus is free and that the only 982 - * case where we could see an interrupt is where we have disconnected 983 - * commands active and they are trying to reselect us. 984 - */ 985 - static inline void esp_check_cmd(struct NCR_ESP *esp, Scsi_Cmnd *sp) 986 - { 987 - switch(sp->cmd_len) { 988 - case 6: 989 - case 10: 990 - case 12: 991 - esp->esp_slowcmd = 0; 992 - break; 993 - 994 - default: 995 - esp->esp_slowcmd = 1; 996 - esp->esp_scmdleft = sp->cmd_len; 997 - esp->esp_scmdp = &sp->cmnd[0]; 998 - break; 999 - }; 1000 - } 1001 - 1002 - static inline void build_sync_nego_msg(struct NCR_ESP *esp, int period, int offset) 1003 - { 1004 - esp->cur_msgout[0] = EXTENDED_MESSAGE; 1005 - esp->cur_msgout[1] = 3; 1006 - esp->cur_msgout[2] = EXTENDED_SDTR; 1007 - esp->cur_msgout[3] = period; 1008 - esp->cur_msgout[4] = offset; 1009 - esp->msgout_len = 5; 1010 - } 1011 - 1012 - static void esp_exec_cmd(struct NCR_ESP *esp) 1013 - { 1014 - struct ESP_regs *eregs = esp->eregs; 1015 - struct esp_device *esp_dev; 1016 - Scsi_Cmnd *SCptr; 1017 - struct scsi_device *SDptr; 1018 - volatile unchar *cmdp = esp->esp_command; 1019 - unsigned char the_esp_command; 1020 - int lun, target; 1021 - int i; 1022 - 1023 - /* Hold off if we have disconnected commands and 1024 - * an IRQ is showing... 1025 - */ 1026 - if(esp->disconnected_SC && esp->dma_irq_p(esp)) 1027 - return; 1028 - 1029 - /* Grab first member of the issue queue. */ 1030 - SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC); 1031 - 1032 - /* Safe to panic here because current_SC is null. */ 1033 - if(!SCptr) 1034 - panic("esp: esp_exec_cmd and issue queue is NULL"); 1035 - 1036 - SDptr = SCptr->device; 1037 - esp_dev = SDptr->hostdata; 1038 - lun = SCptr->device->lun; 1039 - target = SCptr->device->id; 1040 - 1041 - esp->snip = 0; 1042 - esp->msgout_len = 0; 1043 - 1044 - /* Send it out whole, or piece by piece? The ESP 1045 - * only knows how to automatically send out 6, 10, 1046 - * and 12 byte commands. I used to think that the 1047 - * Linux SCSI code would never throw anything other 1048 - * than that to us, but then again there is the 1049 - * SCSI generic driver which can send us anything. 1050 - */ 1051 - esp_check_cmd(esp, SCptr); 1052 - 1053 - /* If arbitration/selection is successful, the ESP will leave 1054 - * ATN asserted, causing the target to go into message out 1055 - * phase. The ESP will feed the target the identify and then 1056 - * the target can only legally go to one of command, 1057 - * datain/out, status, or message in phase, or stay in message 1058 - * out phase (should we be trying to send a sync negotiation 1059 - * message after the identify). It is not allowed to drop 1060 - * BSY, but some buggy targets do and we check for this 1061 - * condition in the selection complete code. Most of the time 1062 - * we'll make the command bytes available to the ESP and it 1063 - * will not interrupt us until it finishes command phase, we 1064 - * cannot do this for command sizes the ESP does not 1065 - * understand and in this case we'll get interrupted right 1066 - * when the target goes into command phase. 1067 - * 1068 - * It is absolutely _illegal_ in the presence of SCSI-2 devices 1069 - * to use the ESP select w/o ATN command. When SCSI-2 devices are 1070 - * present on the bus we _must_ always go straight to message out 1071 - * phase with an identify message for the target. Being that 1072 - * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2 1073 - * selections should not confuse SCSI-1 we hope. 1074 - */ 1075 - 1076 - if(esp_dev->sync) { 1077 - /* this targets sync is known */ 1078 - #ifdef CONFIG_SCSI_MAC_ESP 1079 - do_sync_known: 1080 - #endif 1081 - if(esp_dev->disconnect) 1082 - *cmdp++ = IDENTIFY(1, lun); 1083 - else 1084 - *cmdp++ = IDENTIFY(0, lun); 1085 - 1086 - if(esp->esp_slowcmd) { 1087 - the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); 1088 - esp_advance_phase(SCptr, in_slct_stop); 1089 - } else { 1090 - the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); 1091 - esp_advance_phase(SCptr, in_slct_norm); 1092 - } 1093 - } else if(!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) { 1094 - /* After the bootup SCSI code sends both the 1095 - * TEST_UNIT_READY and INQUIRY commands we want 1096 - * to at least attempt allowing the device to 1097 - * disconnect. 1098 - */ 1099 - ESPMISC(("esp: Selecting device for first time. target=%d " 1100 - "lun=%d\n", target, SCptr->device->lun)); 1101 - if(!SDptr->borken && !esp_dev->disconnect) 1102 - esp_dev->disconnect = 1; 1103 - 1104 - *cmdp++ = IDENTIFY(0, lun); 1105 - esp->prevmsgout = NOP; 1106 - esp_advance_phase(SCptr, in_slct_norm); 1107 - the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); 1108 - 1109 - /* Take no chances... */ 1110 - esp_dev->sync_max_offset = 0; 1111 - esp_dev->sync_min_period = 0; 1112 - } else { 1113 - int toshiba_cdrom_hwbug_wkaround = 0; 1114 - 1115 - #ifdef CONFIG_SCSI_MAC_ESP 1116 - /* Never allow synchronous transfers (disconnect OK) on 1117 - * Macintosh. Well, maybe later when we figured out how to 1118 - * do DMA on the machines that support it ... 1119 - */ 1120 - esp_dev->disconnect = 1; 1121 - esp_dev->sync_max_offset = 0; 1122 - esp_dev->sync_min_period = 0; 1123 - esp_dev->sync = 1; 1124 - esp->snip = 0; 1125 - goto do_sync_known; 1126 - #endif 1127 - /* We've talked to this guy before, 1128 - * but never negotiated. Let's try 1129 - * sync negotiation. 1130 - */ 1131 - if(!SDptr->borken) { 1132 - if((SDptr->type == TYPE_ROM) && 1133 - (!strncmp(SDptr->vendor, "TOSHIBA", 7))) { 1134 - /* Nice try sucker... */ 1135 - ESPMISC(("esp%d: Disabling sync for buggy " 1136 - "Toshiba CDROM.\n", esp->esp_id)); 1137 - toshiba_cdrom_hwbug_wkaround = 1; 1138 - build_sync_nego_msg(esp, 0, 0); 1139 - } else { 1140 - build_sync_nego_msg(esp, esp->sync_defp, 15); 1141 - } 1142 - } else { 1143 - build_sync_nego_msg(esp, 0, 0); 1144 - } 1145 - esp_dev->sync = 1; 1146 - esp->snip = 1; 1147 - 1148 - /* A fix for broken SCSI1 targets, when they disconnect 1149 - * they lock up the bus and confuse ESP. So disallow 1150 - * disconnects for SCSI1 targets for now until we 1151 - * find a better fix. 1152 - * 1153 - * Addendum: This is funny, I figured out what was going 1154 - * on. The blotzed SCSI1 target would disconnect, 1155 - * one of the other SCSI2 targets or both would be 1156 - * disconnected as well. The SCSI1 target would 1157 - * stay disconnected long enough that we start 1158 - * up a command on one of the SCSI2 targets. As 1159 - * the ESP is arbitrating for the bus the SCSI1 1160 - * target begins to arbitrate as well to reselect 1161 - * the ESP. The SCSI1 target refuses to drop it's 1162 - * ID bit on the data bus even though the ESP is 1163 - * at ID 7 and is the obvious winner for any 1164 - * arbitration. The ESP is a poor sport and refuses 1165 - * to lose arbitration, it will continue indefinitely 1166 - * trying to arbitrate for the bus and can only be 1167 - * stopped via a chip reset or SCSI bus reset. 1168 - * Therefore _no_ disconnects for SCSI1 targets 1169 - * thank you very much. ;-) 1170 - */ 1171 - if(((SDptr->scsi_level < 3) && (SDptr->type != TYPE_TAPE)) || 1172 - toshiba_cdrom_hwbug_wkaround || SDptr->borken) { 1173 - ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d " 1174 - "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); 1175 - esp_dev->disconnect = 0; 1176 - *cmdp++ = IDENTIFY(0, lun); 1177 - } else { 1178 - *cmdp++ = IDENTIFY(1, lun); 1179 - } 1180 - 1181 - /* ESP fifo is only so big... 1182 - * Make this look like a slow command. 1183 - */ 1184 - esp->esp_slowcmd = 1; 1185 - esp->esp_scmdleft = SCptr->cmd_len; 1186 - esp->esp_scmdp = &SCptr->cmnd[0]; 1187 - 1188 - the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); 1189 - esp_advance_phase(SCptr, in_slct_msg); 1190 - } 1191 - 1192 - if(!esp->esp_slowcmd) 1193 - for(i = 0; i < SCptr->cmd_len; i++) 1194 - *cmdp++ = SCptr->cmnd[i]; 1195 - 1196 - esp_write(eregs->esp_busid, (target & 7)); 1197 - if (esp->prev_soff != esp_dev->sync_max_offset || 1198 - esp->prev_stp != esp_dev->sync_min_period || 1199 - (esp->erev > esp100a && 1200 - esp->prev_cfg3 != esp->config3[target])) { 1201 - esp->prev_soff = esp_dev->sync_max_offset; 1202 - esp_write(eregs->esp_soff, esp->prev_soff); 1203 - esp->prev_stp = esp_dev->sync_min_period; 1204 - esp_write(eregs->esp_stp, esp->prev_stp); 1205 - if(esp->erev > esp100a) { 1206 - esp->prev_cfg3 = esp->config3[target]; 1207 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 1208 - } 1209 - } 1210 - i = (cmdp - esp->esp_command); 1211 - 1212 - /* Set up the DMA and ESP counters */ 1213 - if(esp->do_pio_cmds){ 1214 - int j = 0; 1215 - 1216 - /* 1217 - * XXX MSch: 1218 - * 1219 - * It seems this is required, at least to clean up 1220 - * after failed commands when using PIO mode ... 1221 - */ 1222 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 1223 - 1224 - for(;j<i;j++) 1225 - esp_write(eregs->esp_fdata, esp->esp_command[j]); 1226 - the_esp_command &= ~ESP_CMD_DMA; 1227 - 1228 - /* Tell ESP to "go". */ 1229 - esp_cmd(esp, eregs, the_esp_command); 1230 - } else { 1231 - /* Set up the ESP counters */ 1232 - esp_write(eregs->esp_tclow, i); 1233 - esp_write(eregs->esp_tcmed, 0); 1234 - esp->dma_init_write(esp, esp->esp_command_dvma, i); 1235 - 1236 - /* Tell ESP to "go". */ 1237 - esp_cmd(esp, eregs, the_esp_command); 1238 - } 1239 - } 1240 - 1241 - /* Queue a SCSI command delivered from the mid-level Linux SCSI code. */ 1242 - int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 1243 - { 1244 - struct NCR_ESP *esp; 1245 - 1246 - /* Set up func ptr and initial driver cmd-phase. */ 1247 - SCpnt->scsi_done = done; 1248 - SCpnt->SCp.phase = not_issued; 1249 - 1250 - esp = (struct NCR_ESP *) SCpnt->device->host->hostdata; 1251 - 1252 - if(esp->dma_led_on) 1253 - esp->dma_led_on(esp); 1254 - 1255 - /* We use the scratch area. */ 1256 - ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->lun)); 1257 - ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->lun)); 1258 - 1259 - esp_get_dmabufs(esp, SCpnt); 1260 - esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */ 1261 - 1262 - SCpnt->SCp.Status = CHECK_CONDITION; 1263 - SCpnt->SCp.Message = 0xff; 1264 - SCpnt->SCp.sent_command = 0; 1265 - 1266 - /* Place into our queue. */ 1267 - if(SCpnt->cmnd[0] == REQUEST_SENSE) { 1268 - ESPQUEUE(("RQSENSE\n")); 1269 - prepend_SC(&esp->issue_SC, SCpnt); 1270 - } else { 1271 - ESPQUEUE(("\n")); 1272 - append_SC(&esp->issue_SC, SCpnt); 1273 - } 1274 - 1275 - /* Run it now if we can. */ 1276 - if(!esp->current_SC && !esp->resetting_bus) 1277 - esp_exec_cmd(esp); 1278 - 1279 - return 0; 1280 - } 1281 - 1282 - /* Dump driver state. */ 1283 - static void esp_dump_cmd(Scsi_Cmnd *SCptr) 1284 - { 1285 - ESPLOG(("[tgt<%02x> lun<%02x> " 1286 - "pphase<%s> cphase<%s>]", 1287 - SCptr->device->id, SCptr->device->lun, 1288 - phase_string(SCptr->SCp.sent_command), 1289 - phase_string(SCptr->SCp.phase))); 1290 - } 1291 - 1292 - static void esp_dump_state(struct NCR_ESP *esp, 1293 - struct ESP_regs *eregs) 1294 - { 1295 - Scsi_Cmnd *SCptr = esp->current_SC; 1296 - #ifdef DEBUG_ESP_CMDS 1297 - int i; 1298 - #endif 1299 - 1300 - ESPLOG(("esp%d: dumping state\n", esp->esp_id)); 1301 - 1302 - /* Print DMA status */ 1303 - esp->dma_dump_state(esp); 1304 - 1305 - ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n", 1306 - esp->esp_id, esp->sreg, esp->seqreg, esp->ireg)); 1307 - ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n", 1308 - esp->esp_id, esp_read(eregs->esp_status), esp_read(eregs->esp_sstep), 1309 - esp_read(eregs->esp_intrpt))); 1310 - #ifdef DEBUG_ESP_CMDS 1311 - printk("esp%d: last ESP cmds [", esp->esp_id); 1312 - i = (esp->espcmdent - 1) & 31; 1313 - printk("<"); 1314 - esp_print_cmd(esp->espcmdlog[i]); 1315 - printk(">"); 1316 - i = (i - 1) & 31; 1317 - printk("<"); 1318 - esp_print_cmd(esp->espcmdlog[i]); 1319 - printk(">"); 1320 - i = (i - 1) & 31; 1321 - printk("<"); 1322 - esp_print_cmd(esp->espcmdlog[i]); 1323 - printk(">"); 1324 - i = (i - 1) & 31; 1325 - printk("<"); 1326 - esp_print_cmd(esp->espcmdlog[i]); 1327 - printk(">"); 1328 - printk("]\n"); 1329 - #endif /* (DEBUG_ESP_CMDS) */ 1330 - 1331 - if(SCptr) { 1332 - ESPLOG(("esp%d: current command ", esp->esp_id)); 1333 - esp_dump_cmd(SCptr); 1334 - } 1335 - ESPLOG(("\n")); 1336 - SCptr = esp->disconnected_SC; 1337 - ESPLOG(("esp%d: disconnected ", esp->esp_id)); 1338 - while(SCptr) { 1339 - esp_dump_cmd(SCptr); 1340 - SCptr = (Scsi_Cmnd *) SCptr->host_scribble; 1341 - } 1342 - ESPLOG(("\n")); 1343 - } 1344 - 1345 - /* Abort a command. The host_lock is acquired by caller. */ 1346 - int esp_abort(Scsi_Cmnd *SCptr) 1347 - { 1348 - struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata; 1349 - struct ESP_regs *eregs = esp->eregs; 1350 - int don; 1351 - 1352 - ESPLOG(("esp%d: Aborting command\n", esp->esp_id)); 1353 - esp_dump_state(esp, eregs); 1354 - 1355 - /* Wheee, if this is the current command on the bus, the 1356 - * best we can do is assert ATN and wait for msgout phase. 1357 - * This should even fix a hung SCSI bus when we lose state 1358 - * in the driver and timeout because the eventual phase change 1359 - * will cause the ESP to (eventually) give an interrupt. 1360 - */ 1361 - if(esp->current_SC == SCptr) { 1362 - esp->cur_msgout[0] = ABORT; 1363 - esp->msgout_len = 1; 1364 - esp->msgout_ctr = 0; 1365 - esp_cmd(esp, eregs, ESP_CMD_SATN); 1366 - return SUCCESS; 1367 - } 1368 - 1369 - /* If it is still in the issue queue then we can safely 1370 - * call the completion routine and report abort success. 1371 - */ 1372 - don = esp->dma_ports_p(esp); 1373 - if(don) { 1374 - esp->dma_ints_off(esp); 1375 - synchronize_irq(esp->irq); 1376 - } 1377 - if(esp->issue_SC) { 1378 - Scsi_Cmnd **prev, *this; 1379 - for(prev = (&esp->issue_SC), this = esp->issue_SC; 1380 - this; 1381 - prev = (Scsi_Cmnd **) &(this->host_scribble), 1382 - this = (Scsi_Cmnd *) this->host_scribble) { 1383 - if(this == SCptr) { 1384 - *prev = (Scsi_Cmnd *) this->host_scribble; 1385 - this->host_scribble = NULL; 1386 - esp_release_dmabufs(esp, this); 1387 - this->result = DID_ABORT << 16; 1388 - this->scsi_done(this); 1389 - if(don) 1390 - esp->dma_ints_on(esp); 1391 - return SUCCESS; 1392 - } 1393 - } 1394 - } 1395 - 1396 - /* Yuck, the command to abort is disconnected, it is not 1397 - * worth trying to abort it now if something else is live 1398 - * on the bus at this time. So, we let the SCSI code wait 1399 - * a little bit and try again later. 1400 - */ 1401 - if(esp->current_SC) { 1402 - if(don) 1403 - esp->dma_ints_on(esp); 1404 - return FAILED; 1405 - } 1406 - 1407 - /* It's disconnected, we have to reconnect to re-establish 1408 - * the nexus and tell the device to abort. However, we really 1409 - * cannot 'reconnect' per se. Don't try to be fancy, just 1410 - * indicate failure, which causes our caller to reset the whole 1411 - * bus. 1412 - */ 1413 - 1414 - if(don) 1415 - esp->dma_ints_on(esp); 1416 - return FAILED; 1417 - } 1418 - 1419 - /* We've sent ESP_CMD_RS to the ESP, the interrupt had just 1420 - * arrived indicating the end of the SCSI bus reset. Our job 1421 - * is to clean out the command queues and begin re-execution 1422 - * of SCSI commands once more. 1423 - */ 1424 - static int esp_finish_reset(struct NCR_ESP *esp, 1425 - struct ESP_regs *eregs) 1426 - { 1427 - Scsi_Cmnd *sp = esp->current_SC; 1428 - 1429 - /* Clean up currently executing command, if any. */ 1430 - if (sp != NULL) { 1431 - esp_release_dmabufs(esp, sp); 1432 - sp->result = (DID_RESET << 16); 1433 - sp->scsi_done(sp); 1434 - esp->current_SC = NULL; 1435 - } 1436 - 1437 - /* Clean up disconnected queue, they have been invalidated 1438 - * by the bus reset. 1439 - */ 1440 - if (esp->disconnected_SC) { 1441 - while((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) { 1442 - esp_release_dmabufs(esp, sp); 1443 - sp->result = (DID_RESET << 16); 1444 - sp->scsi_done(sp); 1445 - } 1446 - } 1447 - 1448 - /* SCSI bus reset is complete. */ 1449 - esp->resetting_bus = 0; 1450 - wake_up(&esp->reset_queue); 1451 - 1452 - /* Ok, now it is safe to get commands going once more. */ 1453 - if(esp->issue_SC) 1454 - esp_exec_cmd(esp); 1455 - 1456 - return do_intr_end; 1457 - } 1458 - 1459 - static int esp_do_resetbus(struct NCR_ESP *esp, 1460 - struct ESP_regs *eregs) 1461 - { 1462 - ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id)); 1463 - esp->resetting_bus = 1; 1464 - esp_cmd(esp, eregs, ESP_CMD_RS); 1465 - 1466 - return do_intr_end; 1467 - } 1468 - 1469 - /* Reset ESP chip, reset hanging bus, then kill active and 1470 - * disconnected commands for targets without soft reset. 1471 - * 1472 - * The host_lock is acquired by caller. 1473 - */ 1474 - int esp_reset(Scsi_Cmnd *SCptr) 1475 - { 1476 - struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata; 1477 - 1478 - spin_lock_irq(esp->ehost->host_lock); 1479 - (void) esp_do_resetbus(esp, esp->eregs); 1480 - spin_unlock_irq(esp->ehost->host_lock); 1481 - 1482 - wait_event(esp->reset_queue, (esp->resetting_bus == 0)); 1483 - 1484 - return SUCCESS; 1485 - } 1486 - 1487 - /* Internal ESP done function. */ 1488 - static void esp_done(struct NCR_ESP *esp, int error) 1489 - { 1490 - Scsi_Cmnd *done_SC; 1491 - 1492 - if(esp->current_SC) { 1493 - done_SC = esp->current_SC; 1494 - esp->current_SC = NULL; 1495 - esp_release_dmabufs(esp, done_SC); 1496 - done_SC->result = error; 1497 - done_SC->scsi_done(done_SC); 1498 - 1499 - /* Bus is free, issue any commands in the queue. */ 1500 - if(esp->issue_SC && !esp->current_SC) 1501 - esp_exec_cmd(esp); 1502 - } else { 1503 - /* Panic is safe as current_SC is null so we may still 1504 - * be able to accept more commands to sync disk buffers. 1505 - */ 1506 - ESPLOG(("panicing\n")); 1507 - panic("esp: done() called with NULL esp->current_SC"); 1508 - } 1509 - } 1510 - 1511 - /* Wheee, ESP interrupt engine. */ 1512 - 1513 - /* Forward declarations. */ 1514 - static int esp_do_phase_determine(struct NCR_ESP *esp, 1515 - struct ESP_regs *eregs); 1516 - static int esp_do_data_finale(struct NCR_ESP *esp, struct ESP_regs *eregs); 1517 - static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs); 1518 - static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs); 1519 - static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs); 1520 - static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs); 1521 - static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs); 1522 - static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs); 1523 - 1524 - #define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP) 1525 - #define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP) 1526 - 1527 - /* We try to avoid some interrupts by jumping ahead and see if the ESP 1528 - * has gotten far enough yet. Hence the following. 1529 - */ 1530 - static inline int skipahead1(struct NCR_ESP *esp, struct ESP_regs *eregs, 1531 - Scsi_Cmnd *scp, int prev_phase, int new_phase) 1532 - { 1533 - if(scp->SCp.sent_command != prev_phase) 1534 - return 0; 1535 - 1536 - if(esp->dma_irq_p(esp)) { 1537 - /* Yes, we are able to save an interrupt. */ 1538 - esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR)); 1539 - esp->ireg = esp_read(eregs->esp_intrpt); 1540 - if(!(esp->ireg & ESP_INTR_SR)) 1541 - return 0; 1542 - else 1543 - return do_reset_complete; 1544 - } 1545 - /* Ho hum, target is taking forever... */ 1546 - scp->SCp.sent_command = new_phase; /* so we don't recurse... */ 1547 - return do_intr_end; 1548 - } 1549 - 1550 - static inline int skipahead2(struct NCR_ESP *esp, 1551 - struct ESP_regs *eregs, 1552 - Scsi_Cmnd *scp, int prev_phase1, int prev_phase2, 1553 - int new_phase) 1554 - { 1555 - if(scp->SCp.sent_command != prev_phase1 && 1556 - scp->SCp.sent_command != prev_phase2) 1557 - return 0; 1558 - if(esp->dma_irq_p(esp)) { 1559 - /* Yes, we are able to save an interrupt. */ 1560 - esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR)); 1561 - esp->ireg = esp_read(eregs->esp_intrpt); 1562 - if(!(esp->ireg & ESP_INTR_SR)) 1563 - return 0; 1564 - else 1565 - return do_reset_complete; 1566 - } 1567 - /* Ho hum, target is taking forever... */ 1568 - scp->SCp.sent_command = new_phase; /* so we don't recurse... */ 1569 - return do_intr_end; 1570 - } 1571 - 1572 - /* Misc. esp helper macros. */ 1573 - #define esp_setcount(__eregs, __cnt) \ 1574 - esp_write((__eregs)->esp_tclow, ((__cnt) & 0xff)); \ 1575 - esp_write((__eregs)->esp_tcmed, (((__cnt) >> 8) & 0xff)) 1576 - 1577 - #define esp_getcount(__eregs) \ 1578 - ((esp_read((__eregs)->esp_tclow)&0xff) | \ 1579 - ((esp_read((__eregs)->esp_tcmed)&0xff) << 8)) 1580 - 1581 - #define fcount(__esp, __eregs) \ 1582 - (esp_read((__eregs)->esp_fflags) & ESP_FF_FBYTES) 1583 - 1584 - #define fnzero(__esp, __eregs) \ 1585 - (esp_read((__eregs)->esp_fflags) & ESP_FF_ONOTZERO) 1586 - 1587 - /* XXX speculative nops unnecessary when continuing amidst a data phase 1588 - * XXX even on esp100!!! another case of flooding the bus with I/O reg 1589 - * XXX writes... 1590 - */ 1591 - #define esp_maybe_nop(__esp, __eregs) \ 1592 - if((__esp)->erev == esp100) \ 1593 - esp_cmd((__esp), (__eregs), ESP_CMD_NULL) 1594 - 1595 - #define sreg_to_dataphase(__sreg) \ 1596 - ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain) 1597 - 1598 - /* The ESP100 when in synchronous data phase, can mistake a long final 1599 - * REQ pulse from the target as an extra byte, it places whatever is on 1600 - * the data lines into the fifo. For now, we will assume when this 1601 - * happens that the target is a bit quirky and we don't want to 1602 - * be talking synchronously to it anyways. Regardless, we need to 1603 - * tell the ESP to eat the extraneous byte so that we can proceed 1604 - * to the next phase. 1605 - */ 1606 - static inline int esp100_sync_hwbug(struct NCR_ESP *esp, struct ESP_regs *eregs, 1607 - Scsi_Cmnd *sp, int fifocnt) 1608 - { 1609 - /* Do not touch this piece of code. */ 1610 - if((!(esp->erev == esp100)) || 1611 - (!(sreg_datainp((esp->sreg = esp_read(eregs->esp_status))) && !fifocnt) && 1612 - !(sreg_dataoutp(esp->sreg) && !fnzero(esp, eregs)))) { 1613 - if(sp->SCp.phase == in_dataout) 1614 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 1615 - return 0; 1616 - } else { 1617 - /* Async mode for this guy. */ 1618 - build_sync_nego_msg(esp, 0, 0); 1619 - 1620 - /* Ack the bogus byte, but set ATN first. */ 1621 - esp_cmd(esp, eregs, ESP_CMD_SATN); 1622 - esp_cmd(esp, eregs, ESP_CMD_MOK); 1623 - return 1; 1624 - } 1625 - } 1626 - 1627 - /* This closes the window during a selection with a reselect pending, because 1628 - * we use DMA for the selection process the FIFO should hold the correct 1629 - * contents if we get reselected during this process. So we just need to 1630 - * ack the possible illegal cmd interrupt pending on the esp100. 1631 - */ 1632 - static inline int esp100_reconnect_hwbug(struct NCR_ESP *esp, 1633 - struct ESP_regs *eregs) 1634 - { 1635 - volatile unchar junk; 1636 - 1637 - if(esp->erev != esp100) 1638 - return 0; 1639 - junk = esp_read(eregs->esp_intrpt); 1640 - 1641 - if(junk & ESP_INTR_SR) 1642 - return 1; 1643 - return 0; 1644 - } 1645 - 1646 - /* This verifies the BUSID bits during a reselection so that we know which 1647 - * target is talking to us. 1648 - */ 1649 - static inline int reconnect_target(struct NCR_ESP *esp, struct ESP_regs *eregs) 1650 - { 1651 - int it, me = esp->scsi_id_mask, targ = 0; 1652 - 1653 - if(2 != fcount(esp, eregs)) 1654 - return -1; 1655 - it = esp_read(eregs->esp_fdata); 1656 - if(!(it & me)) 1657 - return -1; 1658 - it &= ~me; 1659 - if(it & (it - 1)) 1660 - return -1; 1661 - while(!(it & 1)) 1662 - targ++, it >>= 1; 1663 - return targ; 1664 - } 1665 - 1666 - /* This verifies the identify from the target so that we know which lun is 1667 - * being reconnected. 1668 - */ 1669 - static inline int reconnect_lun(struct NCR_ESP *esp, struct ESP_regs *eregs) 1670 - { 1671 - int lun; 1672 - 1673 - if((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) 1674 - return -1; 1675 - lun = esp_read(eregs->esp_fdata); 1676 - 1677 - /* Yes, you read this correctly. We report lun of zero 1678 - * if we see parity error. ESP reports parity error for 1679 - * the lun byte, and this is the only way to hope to recover 1680 - * because the target is connected. 1681 - */ 1682 - if(esp->sreg & ESP_STAT_PERR) 1683 - return 0; 1684 - 1685 - /* Check for illegal bits being set in the lun. */ 1686 - if((lun & 0x40) || !(lun & 0x80)) 1687 - return -1; 1688 - 1689 - return lun & 7; 1690 - } 1691 - 1692 - /* This puts the driver in a state where it can revitalize a command that 1693 - * is being continued due to reselection. 1694 - */ 1695 - static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs, 1696 - Scsi_Cmnd *sp) 1697 - { 1698 - struct scsi_device *dp = sp->device; 1699 - struct esp_device *esp_dev = dp->hostdata; 1700 - 1701 - if(esp->prev_soff != esp_dev->sync_max_offset || 1702 - esp->prev_stp != esp_dev->sync_min_period || 1703 - (esp->erev > esp100a && 1704 - esp->prev_cfg3 != esp->config3[scmd_id(sp)])) { 1705 - esp->prev_soff = esp_dev->sync_max_offset; 1706 - esp_write(eregs->esp_soff, esp->prev_soff); 1707 - esp->prev_stp = esp_dev->sync_min_period; 1708 - esp_write(eregs->esp_stp, esp->prev_stp); 1709 - if(esp->erev > esp100a) { 1710 - esp->prev_cfg3 = esp->config3[scmd_id(sp)]; 1711 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 1712 - } 1713 - } 1714 - esp->current_SC = sp; 1715 - } 1716 - 1717 - /* This will place the current working command back into the issue queue 1718 - * if we are to receive a reselection amidst a selection attempt. 1719 - */ 1720 - static inline void esp_reconnect(struct NCR_ESP *esp, Scsi_Cmnd *sp) 1721 - { 1722 - if(!esp->disconnected_SC) 1723 - ESPLOG(("esp%d: Weird, being reselected but disconnected " 1724 - "command queue is empty.\n", esp->esp_id)); 1725 - esp->snip = 0; 1726 - esp->current_SC = NULL; 1727 - sp->SCp.phase = not_issued; 1728 - append_SC(&esp->issue_SC, sp); 1729 - } 1730 - 1731 - /* Begin message in phase. */ 1732 - static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs) 1733 - { 1734 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 1735 - esp_maybe_nop(esp, eregs); 1736 - esp_cmd(esp, eregs, ESP_CMD_TI); 1737 - esp->msgin_len = 1; 1738 - esp->msgin_ctr = 0; 1739 - esp_advance_phase(esp->current_SC, in_msgindone); 1740 - return do_work_bus; 1741 - } 1742 - 1743 - static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp) 1744 - { 1745 - ++sp->SCp.buffer; 1746 - --sp->SCp.buffers_residual; 1747 - sp->SCp.this_residual = sp->SCp.buffer->length; 1748 - if (esp->dma_advance_sg) 1749 - esp->dma_advance_sg (sp); 1750 - else 1751 - sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer)); 1752 - 1753 - } 1754 - 1755 - /* Please note that the way I've coded these routines is that I _always_ 1756 - * check for a disconnect during any and all information transfer 1757 - * phases. The SCSI standard states that the target _can_ cause a BUS 1758 - * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note 1759 - * that during information transfer phases the target controls every 1760 - * change in phase, the only thing the initiator can do is "ask" for 1761 - * a message out phase by driving ATN true. The target can, and sometimes 1762 - * will, completely ignore this request so we cannot assume anything when 1763 - * we try to force a message out phase to abort/reset a target. Most of 1764 - * the time the target will eventually be nice and go to message out, so 1765 - * we may have to hold on to our state about what we want to tell the target 1766 - * for some period of time. 1767 - */ 1768 - 1769 - /* I think I have things working here correctly. Even partial transfers 1770 - * within a buffer or sub-buffer should not upset us at all no matter 1771 - * how bad the target and/or ESP fucks things up. 1772 - */ 1773 - static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs) 1774 - { 1775 - Scsi_Cmnd *SCptr = esp->current_SC; 1776 - int thisphase, hmuch; 1777 - 1778 - ESPDATA(("esp_do_data: ")); 1779 - esp_maybe_nop(esp, eregs); 1780 - thisphase = sreg_to_dataphase(esp->sreg); 1781 - esp_advance_phase(SCptr, thisphase); 1782 - ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT")); 1783 - hmuch = esp->dma_can_transfer(esp, SCptr); 1784 - 1785 - /* 1786 - * XXX MSch: cater for PIO transfer here; PIO used if hmuch == 0 1787 - */ 1788 - if (hmuch) { /* DMA */ 1789 - /* 1790 - * DMA 1791 - */ 1792 - ESPDATA(("hmuch<%d> ", hmuch)); 1793 - esp->current_transfer_size = hmuch; 1794 - esp_setcount(eregs, (esp->fas_premature_intr_workaround ? 1795 - (hmuch + 0x40) : hmuch)); 1796 - esp->dma_setup(esp, (__u32)((unsigned long)SCptr->SCp.ptr), 1797 - hmuch, (thisphase == in_datain)); 1798 - ESPDATA(("DMA|TI --> do_intr_end\n")); 1799 - esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI); 1800 - return do_intr_end; 1801 - /* 1802 - * end DMA 1803 - */ 1804 - } else { 1805 - /* 1806 - * PIO 1807 - */ 1808 - int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */ 1809 - int fifocnt = 0; 1810 - unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr); 1811 - 1812 - oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK; 1813 - 1814 - /* 1815 - * polled transfer; ugly, can we make this happen in a DRQ 1816 - * interrupt handler ?? 1817 - * requires keeping track of state information in host or 1818 - * command struct! 1819 - * Problem: I've never seen a DRQ happen on Mac, not even 1820 - * with ESP_CMD_DMA ... 1821 - */ 1822 - 1823 - /* figure out how much needs to be transferred */ 1824 - hmuch = SCptr->SCp.this_residual; 1825 - ESPDATA(("hmuch<%d> pio ", hmuch)); 1826 - esp->current_transfer_size = hmuch; 1827 - 1828 - /* tell the ESP ... */ 1829 - esp_setcount(eregs, hmuch); 1830 - 1831 - /* loop */ 1832 - while (hmuch) { 1833 - int j, fifo_stuck = 0, newphase; 1834 - unsigned long timeout; 1835 - #if 0 1836 - unsigned long flags; 1837 - #endif 1838 - #if 0 1839 - if ( i % 10 ) 1840 - ESPDATA(("\r")); 1841 - else 1842 - ESPDATA(( /*"\n"*/ "\r")); 1843 - #endif 1844 - #if 0 1845 - local_irq_save(flags); 1846 - #endif 1847 - if(thisphase == in_datain) { 1848 - /* 'go' ... */ 1849 - esp_cmd(esp, eregs, ESP_CMD_TI); 1850 - 1851 - /* wait for data */ 1852 - timeout = 1000000; 1853 - while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout) 1854 - udelay(2); 1855 - if (timeout == 0) 1856 - printk("DRQ datain timeout! \n"); 1857 - 1858 - newphase = esp->sreg & ESP_STAT_PMASK; 1859 - 1860 - /* see how much we got ... */ 1861 - fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES); 1862 - 1863 - if (!fifocnt) 1864 - fifo_stuck++; 1865 - else 1866 - fifo_stuck = 0; 1867 - 1868 - ESPDATA(("\rgot %d st %x ph %x", fifocnt, esp->sreg, newphase)); 1869 - 1870 - /* read fifo */ 1871 - for(j=0;j<fifocnt;j++) 1872 - p[i++] = esp_read(eregs->esp_fdata); 1873 - 1874 - ESPDATA(("(%d) ", i)); 1875 - 1876 - /* how many to go ?? */ 1877 - hmuch -= fifocnt; 1878 - 1879 - /* break if status phase !! */ 1880 - if(newphase == ESP_STATP) { 1881 - /* clear int. */ 1882 - esp->ireg = esp_read(eregs->esp_intrpt); 1883 - break; 1884 - } 1885 - } else { 1886 - #define MAX_FIFO 8 1887 - /* how much will fit ? */ 1888 - int this_count = MAX_FIFO - fifocnt; 1889 - if (this_count > hmuch) 1890 - this_count = hmuch; 1891 - 1892 - /* fill fifo */ 1893 - for(j=0;j<this_count;j++) 1894 - esp_write(eregs->esp_fdata, p[i++]); 1895 - 1896 - /* how many left if this goes out ?? */ 1897 - hmuch -= this_count; 1898 - 1899 - /* 'go' ... */ 1900 - esp_cmd(esp, eregs, ESP_CMD_TI); 1901 - 1902 - /* wait for 'got it' */ 1903 - timeout = 1000000; 1904 - while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout) 1905 - udelay(2); 1906 - if (timeout == 0) 1907 - printk("DRQ dataout timeout! \n"); 1908 - 1909 - newphase = esp->sreg & ESP_STAT_PMASK; 1910 - 1911 - /* need to check how much was sent ?? */ 1912 - fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES); 1913 - 1914 - ESPDATA(("\rsent %d st %x ph %x", this_count - fifocnt, esp->sreg, newphase)); 1915 - 1916 - ESPDATA(("(%d) ", i)); 1917 - 1918 - /* break if status phase !! */ 1919 - if(newphase == ESP_STATP) { 1920 - /* clear int. */ 1921 - esp->ireg = esp_read(eregs->esp_intrpt); 1922 - break; 1923 - } 1924 - 1925 - } 1926 - 1927 - /* clear int. */ 1928 - esp->ireg = esp_read(eregs->esp_intrpt); 1929 - 1930 - ESPDATA(("ir %x ... ", esp->ireg)); 1931 - 1932 - if (hmuch == 0) 1933 - ESPDATA(("done! \n")); 1934 - 1935 - #if 0 1936 - local_irq_restore(flags); 1937 - #endif 1938 - 1939 - /* check new bus phase */ 1940 - if (newphase != oldphase && i < esp->current_transfer_size) { 1941 - /* something happened; disconnect ?? */ 1942 - ESPDATA(("phase change, dropped out with %d done ... ", i)); 1943 - break; 1944 - } 1945 - 1946 - /* check int. status */ 1947 - if (esp->ireg & ESP_INTR_DC) { 1948 - /* disconnect */ 1949 - ESPDATA(("disconnect; %d transferred ... ", i)); 1950 - break; 1951 - } else if (esp->ireg & ESP_INTR_FDONE) { 1952 - /* function done */ 1953 - ESPDATA(("function done; %d transferred ... ", i)); 1954 - break; 1955 - } 1956 - 1957 - /* XXX fixme: bail out on stall */ 1958 - if (fifo_stuck > 10) { 1959 - /* we're stuck */ 1960 - ESPDATA(("fifo stall; %d transferred ... ", i)); 1961 - break; 1962 - } 1963 - } 1964 - 1965 - ESPDATA(("\n")); 1966 - /* check successful completion ?? */ 1967 - 1968 - if (thisphase == in_dataout) 1969 - hmuch += fifocnt; /* stuck?? adjust data pointer ...*/ 1970 - 1971 - /* tell do_data_finale how much was transferred */ 1972 - esp->current_transfer_size -= hmuch; 1973 - 1974 - /* still not completely sure on this one ... */ 1975 - return /*do_intr_end*/ do_work_bus /*do_phase_determine*/ ; 1976 - 1977 - /* 1978 - * end PIO 1979 - */ 1980 - } 1981 - return do_intr_end; 1982 - } 1983 - 1984 - /* See how successful the data transfer was. */ 1985 - static int esp_do_data_finale(struct NCR_ESP *esp, 1986 - struct ESP_regs *eregs) 1987 - { 1988 - Scsi_Cmnd *SCptr = esp->current_SC; 1989 - struct esp_device *esp_dev = SCptr->device->hostdata; 1990 - int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0; 1991 - 1992 - if(esp->dma_led_off) 1993 - esp->dma_led_off(esp); 1994 - 1995 - ESPDATA(("esp_do_data_finale: ")); 1996 - 1997 - if(SCptr->SCp.phase == in_datain) { 1998 - if(esp->sreg & ESP_STAT_PERR) { 1999 - /* Yuck, parity error. The ESP asserts ATN 2000 - * so that we can go to message out phase 2001 - * immediately and inform the target that 2002 - * something bad happened. 2003 - */ 2004 - ESPLOG(("esp%d: data bad parity detected.\n", 2005 - esp->esp_id)); 2006 - esp->cur_msgout[0] = INITIATOR_ERROR; 2007 - esp->msgout_len = 1; 2008 - } 2009 - if(esp->dma_drain) 2010 - esp->dma_drain(esp); 2011 - } 2012 - if(esp->dma_invalidate) 2013 - esp->dma_invalidate(esp); 2014 - 2015 - /* This could happen for the above parity error case. */ 2016 - if(!(esp->ireg == ESP_INTR_BSERV)) { 2017 - /* Please go to msgout phase, please please please... */ 2018 - ESPLOG(("esp%d: !BSERV after data, probably to msgout\n", 2019 - esp->esp_id)); 2020 - return esp_do_phase_determine(esp, eregs); 2021 - } 2022 - 2023 - /* Check for partial transfers and other horrible events. */ 2024 - fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES); 2025 - ecount = esp_getcount(eregs); 2026 - if(esp->fas_premature_intr_workaround) 2027 - ecount -= 0x40; 2028 - bytes_sent = esp->current_transfer_size; 2029 - 2030 - ESPDATA(("trans_sz=%d, ", bytes_sent)); 2031 - if(!(esp->sreg & ESP_STAT_TCNT)) 2032 - bytes_sent -= ecount; 2033 - if(SCptr->SCp.phase == in_dataout) 2034 - bytes_sent -= fifocnt; 2035 - 2036 - ESPDATA(("bytes_sent=%d (ecount=%d, fifocnt=%d), ", bytes_sent, 2037 - ecount, fifocnt)); 2038 - 2039 - /* If we were in synchronous mode, check for peculiarities. */ 2040 - if(esp_dev->sync_max_offset) 2041 - bogus_data = esp100_sync_hwbug(esp, eregs, SCptr, fifocnt); 2042 - else 2043 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 2044 - 2045 - /* Until we are sure of what has happened, we are certainly 2046 - * in the dark. 2047 - */ 2048 - esp_advance_phase(SCptr, in_the_dark); 2049 - 2050 - /* Check for premature interrupt condition. Can happen on FAS2x6 2051 - * chips. QLogic recommends a workaround by overprogramming the 2052 - * transfer counters, but this makes doing scatter-gather impossible. 2053 - * Until there is a way to disable scatter-gather for a single target, 2054 - * and not only for the entire host adapter as it is now, the workaround 2055 - * is way to expensive performance wise. 2056 - * Instead, it turns out that when this happens the target has disconnected 2057 - * already but it doesn't show in the interrupt register. Compensate for 2058 - * that here to try and avoid a SCSI bus reset. 2059 - */ 2060 - if(!esp->fas_premature_intr_workaround && (fifocnt == 1) && 2061 - sreg_dataoutp(esp->sreg)) { 2062 - ESPLOG(("esp%d: Premature interrupt, enabling workaround\n", 2063 - esp->esp_id)); 2064 - #if 0 2065 - /* Disable scatter-gather operations, they are not possible 2066 - * when using this workaround. 2067 - */ 2068 - esp->ehost->sg_tablesize = 0; 2069 - esp->ehost->use_clustering = ENABLE_CLUSTERING; 2070 - esp->fas_premature_intr_workaround = 1; 2071 - bytes_sent = 0; 2072 - if(SCptr->use_sg) { 2073 - ESPLOG(("esp%d: Aborting scatter-gather operation\n", 2074 - esp->esp_id)); 2075 - esp->cur_msgout[0] = ABORT; 2076 - esp->msgout_len = 1; 2077 - esp->msgout_ctr = 0; 2078 - esp_cmd(esp, eregs, ESP_CMD_SATN); 2079 - esp_setcount(eregs, 0xffff); 2080 - esp_cmd(esp, eregs, ESP_CMD_NULL); 2081 - esp_cmd(esp, eregs, ESP_CMD_TPAD | ESP_CMD_DMA); 2082 - return do_intr_end; 2083 - } 2084 - #else 2085 - /* Just set the disconnected bit. That's what appears to 2086 - * happen anyway. The state machine will pick it up when 2087 - * we return. 2088 - */ 2089 - esp->ireg |= ESP_INTR_DC; 2090 - #endif 2091 - } 2092 - 2093 - if(bytes_sent < 0) { 2094 - /* I've seen this happen due to lost state in this 2095 - * driver. No idea why it happened, but allowing 2096 - * this value to be negative caused things to 2097 - * lock up. This allows greater chance of recovery. 2098 - * In fact every time I've seen this, it has been 2099 - * a driver bug without question. 2100 - */ 2101 - ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id)); 2102 - ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n", 2103 - esp->esp_id, 2104 - esp->current_transfer_size, fifocnt, ecount)); 2105 - ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n", 2106 - esp->esp_id, 2107 - SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual)); 2108 - ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, 2109 - SCptr->device->id)); 2110 - SCptr->device->borken = 1; 2111 - esp_dev->sync = 0; 2112 - bytes_sent = 0; 2113 - } 2114 - 2115 - /* Update the state of our transfer. */ 2116 - SCptr->SCp.ptr += bytes_sent; 2117 - SCptr->SCp.this_residual -= bytes_sent; 2118 - if(SCptr->SCp.this_residual < 0) { 2119 - /* shit */ 2120 - ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id)); 2121 - SCptr->SCp.this_residual = 0; 2122 - } 2123 - 2124 - /* Maybe continue. */ 2125 - if(!bogus_data) { 2126 - ESPDATA(("!bogus_data, ")); 2127 - /* NO MATTER WHAT, we advance the scatterlist, 2128 - * if the target should decide to disconnect 2129 - * in between scatter chunks (which is common) 2130 - * we could die horribly! I used to have the sg 2131 - * advance occur only if we are going back into 2132 - * (or are staying in) a data phase, you can 2133 - * imagine the hell I went through trying to 2134 - * figure this out. 2135 - */ 2136 - if(!SCptr->SCp.this_residual && SCptr->SCp.buffers_residual) 2137 - advance_sg(esp, SCptr); 2138 - #ifdef DEBUG_ESP_DATA 2139 - if(sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) { 2140 - ESPDATA(("to more data\n")); 2141 - } else { 2142 - ESPDATA(("to new phase\n")); 2143 - } 2144 - #endif 2145 - return esp_do_phase_determine(esp, eregs); 2146 - } 2147 - /* Bogus data, just wait for next interrupt. */ 2148 - ESPLOG(("esp%d: bogus_data during end of data phase\n", 2149 - esp->esp_id)); 2150 - return do_intr_end; 2151 - } 2152 - 2153 - /* We received a non-good status return at the end of 2154 - * running a SCSI command. This is used to decide if 2155 - * we should clear our synchronous transfer state for 2156 - * such a device when that happens. 2157 - * 2158 - * The idea is that when spinning up a disk or rewinding 2159 - * a tape, we don't want to go into a loop re-negotiating 2160 - * synchronous capabilities over and over. 2161 - */ 2162 - static int esp_should_clear_sync(Scsi_Cmnd *sp) 2163 - { 2164 - unchar cmd = sp->cmnd[0]; 2165 - 2166 - /* These cases are for spinning up a disk and 2167 - * waiting for that spinup to complete. 2168 - */ 2169 - if(cmd == START_STOP) 2170 - return 0; 2171 - 2172 - if(cmd == TEST_UNIT_READY) 2173 - return 0; 2174 - 2175 - /* One more special case for SCSI tape drives, 2176 - * this is what is used to probe the device for 2177 - * completion of a rewind or tape load operation. 2178 - */ 2179 - if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE) 2180 - return 0; 2181 - 2182 - return 1; 2183 - } 2184 - 2185 - /* Either a command is completing or a target is dropping off the bus 2186 - * to continue the command in the background so we can do other work. 2187 - */ 2188 - static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs) 2189 - { 2190 - Scsi_Cmnd *SCptr = esp->current_SC; 2191 - int rval; 2192 - 2193 - rval = skipahead2(esp, eregs, SCptr, in_status, in_msgindone, in_freeing); 2194 - if(rval) 2195 - return rval; 2196 - 2197 - if(esp->ireg != ESP_INTR_DC) { 2198 - ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id)); 2199 - return do_reset_bus; /* target will not drop BSY... */ 2200 - } 2201 - esp->msgout_len = 0; 2202 - esp->prevmsgout = NOP; 2203 - if(esp->prevmsgin == COMMAND_COMPLETE) { 2204 - struct esp_device *esp_dev = SCptr->device->hostdata; 2205 - /* Normal end of nexus. */ 2206 - if(esp->disconnected_SC) 2207 - esp_cmd(esp, eregs, ESP_CMD_ESEL); 2208 - 2209 - if(SCptr->SCp.Status != GOOD && 2210 - SCptr->SCp.Status != CONDITION_GOOD && 2211 - ((1<<scmd_id(SCptr)) & esp->targets_present) && 2212 - esp_dev->sync && esp_dev->sync_max_offset) { 2213 - /* SCSI standard says that the synchronous capabilities 2214 - * should be renegotiated at this point. Most likely 2215 - * we are about to request sense from this target 2216 - * in which case we want to avoid using sync 2217 - * transfers until we are sure of the current target 2218 - * state. 2219 - */ 2220 - ESPMISC(("esp: Status <%d> for target %d lun %d\n", 2221 - SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun)); 2222 - 2223 - /* But don't do this when spinning up a disk at 2224 - * boot time while we poll for completion as it 2225 - * fills up the console with messages. Also, tapes 2226 - * can report not ready many times right after 2227 - * loading up a tape. 2228 - */ 2229 - if(esp_should_clear_sync(SCptr) != 0) 2230 - esp_dev->sync = 0; 2231 - } 2232 - ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); 2233 - esp_done(esp, ((SCptr->SCp.Status & 0xff) | 2234 - ((SCptr->SCp.Message & 0xff)<<8) | 2235 - (DID_OK << 16))); 2236 - } else if(esp->prevmsgin == DISCONNECT) { 2237 - /* Normal disconnect. */ 2238 - esp_cmd(esp, eregs, ESP_CMD_ESEL); 2239 - ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); 2240 - append_SC(&esp->disconnected_SC, SCptr); 2241 - esp->current_SC = NULL; 2242 - if(esp->issue_SC) 2243 - esp_exec_cmd(esp); 2244 - } else { 2245 - /* Driver bug, we do not expect a disconnect here 2246 - * and should not have advanced the state engine 2247 - * to in_freeing. 2248 - */ 2249 - ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n", 2250 - esp->esp_id)); 2251 - return do_reset_bus; 2252 - } 2253 - return do_intr_end; 2254 - } 2255 - 2256 - /* When a reselect occurs, and we cannot find the command to 2257 - * reconnect to in our queues, we do this. 2258 - */ 2259 - static int esp_bad_reconnect(struct NCR_ESP *esp) 2260 - { 2261 - Scsi_Cmnd *sp; 2262 - 2263 - ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n", 2264 - esp->esp_id)); 2265 - ESPLOG(("QUEUE DUMP\n")); 2266 - sp = esp->issue_SC; 2267 - ESPLOG(("esp%d: issue_SC[", esp->esp_id)); 2268 - while(sp) { 2269 - ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); 2270 - sp = (Scsi_Cmnd *) sp->host_scribble; 2271 - } 2272 - ESPLOG(("]\n")); 2273 - sp = esp->current_SC; 2274 - ESPLOG(("esp%d: current_SC[", esp->esp_id)); 2275 - while(sp) { 2276 - ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); 2277 - sp = (Scsi_Cmnd *) sp->host_scribble; 2278 - } 2279 - ESPLOG(("]\n")); 2280 - sp = esp->disconnected_SC; 2281 - ESPLOG(("esp%d: disconnected_SC[", esp->esp_id)); 2282 - while(sp) { 2283 - ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); 2284 - sp = (Scsi_Cmnd *) sp->host_scribble; 2285 - } 2286 - ESPLOG(("]\n")); 2287 - return do_reset_bus; 2288 - } 2289 - 2290 - /* Do the needy when a target tries to reconnect to us. */ 2291 - static int esp_do_reconnect(struct NCR_ESP *esp, 2292 - struct ESP_regs *eregs) 2293 - { 2294 - int lun, target; 2295 - Scsi_Cmnd *SCptr; 2296 - 2297 - /* Check for all bogus conditions first. */ 2298 - target = reconnect_target(esp, eregs); 2299 - if(target < 0) { 2300 - ESPDISC(("bad bus bits\n")); 2301 - return do_reset_bus; 2302 - } 2303 - lun = reconnect_lun(esp, eregs); 2304 - if(lun < 0) { 2305 - ESPDISC(("target=%2x, bad identify msg\n", target)); 2306 - return do_reset_bus; 2307 - } 2308 - 2309 - /* Things look ok... */ 2310 - ESPDISC(("R<%02x,%02x>", target, lun)); 2311 - 2312 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 2313 - if(esp100_reconnect_hwbug(esp, eregs)) 2314 - return do_reset_bus; 2315 - esp_cmd(esp, eregs, ESP_CMD_NULL); 2316 - 2317 - SCptr = remove_SC(&esp->disconnected_SC, (unchar) target, (unchar) lun); 2318 - if(!SCptr) 2319 - return esp_bad_reconnect(esp); 2320 - 2321 - esp_connect(esp, eregs, SCptr); 2322 - esp_cmd(esp, eregs, ESP_CMD_MOK); 2323 - 2324 - /* Reconnect implies a restore pointers operation. */ 2325 - esp_restore_pointers(esp, SCptr); 2326 - 2327 - esp->snip = 0; 2328 - esp_advance_phase(SCptr, in_the_dark); 2329 - return do_intr_end; 2330 - } 2331 - 2332 - /* End of NEXUS (hopefully), pick up status + message byte then leave if 2333 - * all goes well. 2334 - */ 2335 - static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs) 2336 - { 2337 - Scsi_Cmnd *SCptr = esp->current_SC; 2338 - int intr, rval; 2339 - 2340 - rval = skipahead1(esp, eregs, SCptr, in_the_dark, in_status); 2341 - if(rval) 2342 - return rval; 2343 - 2344 - intr = esp->ireg; 2345 - ESPSTAT(("esp_do_status: ")); 2346 - if(intr != ESP_INTR_DC) { 2347 - int message_out = 0; /* for parity problems */ 2348 - 2349 - /* Ack the message. */ 2350 - ESPSTAT(("ack msg, ")); 2351 - esp_cmd(esp, eregs, ESP_CMD_MOK); 2352 - 2353 - if(esp->dma_poll) 2354 - esp->dma_poll(esp, (unsigned char *) esp->esp_command); 2355 - 2356 - ESPSTAT(("got something, ")); 2357 - /* ESP chimes in with one of 2358 - * 2359 - * 1) function done interrupt: 2360 - * both status and message in bytes 2361 - * are available 2362 - * 2363 - * 2) bus service interrupt: 2364 - * only status byte was acquired 2365 - * 2366 - * 3) Anything else: 2367 - * can't happen, but we test for it 2368 - * anyways 2369 - * 2370 - * ALSO: If bad parity was detected on either 2371 - * the status _or_ the message byte then 2372 - * the ESP has asserted ATN on the bus 2373 - * and we must therefore wait for the 2374 - * next phase change. 2375 - */ 2376 - if(intr & ESP_INTR_FDONE) { 2377 - /* We got it all, hallejulia. */ 2378 - ESPSTAT(("got both, ")); 2379 - SCptr->SCp.Status = esp->esp_command[0]; 2380 - SCptr->SCp.Message = esp->esp_command[1]; 2381 - esp->prevmsgin = SCptr->SCp.Message; 2382 - esp->cur_msgin[0] = SCptr->SCp.Message; 2383 - if(esp->sreg & ESP_STAT_PERR) { 2384 - /* There was bad parity for the 2385 - * message byte, the status byte 2386 - * was ok. 2387 - */ 2388 - message_out = MSG_PARITY_ERROR; 2389 - } 2390 - } else if(intr == ESP_INTR_BSERV) { 2391 - /* Only got status byte. */ 2392 - ESPLOG(("esp%d: got status only, ", esp->esp_id)); 2393 - if(!(esp->sreg & ESP_STAT_PERR)) { 2394 - SCptr->SCp.Status = esp->esp_command[0]; 2395 - SCptr->SCp.Message = 0xff; 2396 - } else { 2397 - /* The status byte had bad parity. 2398 - * we leave the scsi_pointer Status 2399 - * field alone as we set it to a default 2400 - * of CHECK_CONDITION in esp_queue. 2401 - */ 2402 - message_out = INITIATOR_ERROR; 2403 - } 2404 - } else { 2405 - /* This shouldn't happen ever. */ 2406 - ESPSTAT(("got bolixed\n")); 2407 - esp_advance_phase(SCptr, in_the_dark); 2408 - return esp_do_phase_determine(esp, eregs); 2409 - } 2410 - 2411 - if(!message_out) { 2412 - ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status, 2413 - SCptr->SCp.Message)); 2414 - if(SCptr->SCp.Message == COMMAND_COMPLETE) { 2415 - ESPSTAT(("and was COMMAND_COMPLETE\n")); 2416 - esp_advance_phase(SCptr, in_freeing); 2417 - return esp_do_freebus(esp, eregs); 2418 - } else { 2419 - ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n", 2420 - esp->esp_id)); 2421 - esp->msgin_len = esp->msgin_ctr = 1; 2422 - esp_advance_phase(SCptr, in_msgindone); 2423 - return esp_do_msgindone(esp, eregs); 2424 - } 2425 - } else { 2426 - /* With luck we'll be able to let the target 2427 - * know that bad parity happened, it will know 2428 - * which byte caused the problems and send it 2429 - * again. For the case where the status byte 2430 - * receives bad parity, I do not believe most 2431 - * targets recover very well. We'll see. 2432 - */ 2433 - ESPLOG(("esp%d: bad parity somewhere mout=%2x\n", 2434 - esp->esp_id, message_out)); 2435 - esp->cur_msgout[0] = message_out; 2436 - esp->msgout_len = esp->msgout_ctr = 1; 2437 - esp_advance_phase(SCptr, in_the_dark); 2438 - return esp_do_phase_determine(esp, eregs); 2439 - } 2440 - } else { 2441 - /* If we disconnect now, all hell breaks loose. */ 2442 - ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id)); 2443 - esp_advance_phase(SCptr, in_the_dark); 2444 - return esp_do_phase_determine(esp, eregs); 2445 - } 2446 - } 2447 - 2448 - static int esp_enter_status(struct NCR_ESP *esp, 2449 - struct ESP_regs *eregs) 2450 - { 2451 - unchar thecmd = ESP_CMD_ICCSEQ; 2452 - 2453 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 2454 - 2455 - if(esp->do_pio_cmds) { 2456 - esp_advance_phase(esp->current_SC, in_status); 2457 - esp_cmd(esp, eregs, thecmd); 2458 - while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR)); 2459 - esp->esp_command[0] = esp_read(eregs->esp_fdata); 2460 - while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR)); 2461 - esp->esp_command[1] = esp_read(eregs->esp_fdata); 2462 - } else { 2463 - esp->esp_command[0] = esp->esp_command[1] = 0xff; 2464 - esp_write(eregs->esp_tclow, 2); 2465 - esp_write(eregs->esp_tcmed, 0); 2466 - esp->dma_init_read(esp, esp->esp_command_dvma, 2); 2467 - thecmd |= ESP_CMD_DMA; 2468 - esp_cmd(esp, eregs, thecmd); 2469 - esp_advance_phase(esp->current_SC, in_status); 2470 - } 2471 - 2472 - return esp_do_status(esp, eregs); 2473 - } 2474 - 2475 - static int esp_disconnect_amidst_phases(struct NCR_ESP *esp, 2476 - struct ESP_regs *eregs) 2477 - { 2478 - Scsi_Cmnd *sp = esp->current_SC; 2479 - struct esp_device *esp_dev = sp->device->hostdata; 2480 - 2481 - /* This means real problems if we see this 2482 - * here. Unless we were actually trying 2483 - * to force the device to abort/reset. 2484 - */ 2485 - ESPLOG(("esp%d: Disconnect amidst phases, ", esp->esp_id)); 2486 - ESPLOG(("pphase<%s> cphase<%s>, ", 2487 - phase_string(sp->SCp.phase), 2488 - phase_string(sp->SCp.sent_command))); 2489 - 2490 - if(esp->disconnected_SC) 2491 - esp_cmd(esp, eregs, ESP_CMD_ESEL); 2492 - 2493 - switch(esp->cur_msgout[0]) { 2494 - default: 2495 - /* We didn't expect this to happen at all. */ 2496 - ESPLOG(("device is bolixed\n")); 2497 - esp_advance_phase(sp, in_tgterror); 2498 - esp_done(esp, (DID_ERROR << 16)); 2499 - break; 2500 - 2501 - case BUS_DEVICE_RESET: 2502 - ESPLOG(("device reset successful\n")); 2503 - esp_dev->sync_max_offset = 0; 2504 - esp_dev->sync_min_period = 0; 2505 - esp_dev->sync = 0; 2506 - esp_advance_phase(sp, in_resetdev); 2507 - esp_done(esp, (DID_RESET << 16)); 2508 - break; 2509 - 2510 - case ABORT: 2511 - ESPLOG(("device abort successful\n")); 2512 - esp_advance_phase(sp, in_abortone); 2513 - esp_done(esp, (DID_ABORT << 16)); 2514 - break; 2515 - 2516 - }; 2517 - return do_intr_end; 2518 - } 2519 - 2520 - static int esp_enter_msgout(struct NCR_ESP *esp, 2521 - struct ESP_regs *eregs) 2522 - { 2523 - esp_advance_phase(esp->current_SC, in_msgout); 2524 - return esp_do_msgout(esp, eregs); 2525 - } 2526 - 2527 - static int esp_enter_msgin(struct NCR_ESP *esp, 2528 - struct ESP_regs *eregs) 2529 - { 2530 - esp_advance_phase(esp->current_SC, in_msgin); 2531 - return esp_do_msgin(esp, eregs); 2532 - } 2533 - 2534 - static int esp_enter_cmd(struct NCR_ESP *esp, 2535 - struct ESP_regs *eregs) 2536 - { 2537 - esp_advance_phase(esp->current_SC, in_cmdbegin); 2538 - return esp_do_cmdbegin(esp, eregs); 2539 - } 2540 - 2541 - static int esp_enter_badphase(struct NCR_ESP *esp, 2542 - struct ESP_regs *eregs) 2543 - { 2544 - ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id, 2545 - esp->sreg & ESP_STAT_PMASK)); 2546 - return do_reset_bus; 2547 - } 2548 - 2549 - typedef int (*espfunc_t)(struct NCR_ESP *, 2550 - struct ESP_regs *); 2551 - 2552 - static espfunc_t phase_vector[] = { 2553 - esp_do_data, /* ESP_DOP */ 2554 - esp_do_data, /* ESP_DIP */ 2555 - esp_enter_cmd, /* ESP_CMDP */ 2556 - esp_enter_status, /* ESP_STATP */ 2557 - esp_enter_badphase, /* ESP_STAT_PMSG */ 2558 - esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */ 2559 - esp_enter_msgout, /* ESP_MOP */ 2560 - esp_enter_msgin, /* ESP_MIP */ 2561 - }; 2562 - 2563 - /* The target has control of the bus and we have to see where it has 2564 - * taken us. 2565 - */ 2566 - static int esp_do_phase_determine(struct NCR_ESP *esp, 2567 - struct ESP_regs *eregs) 2568 - { 2569 - if ((esp->ireg & ESP_INTR_DC) != 0) 2570 - return esp_disconnect_amidst_phases(esp, eregs); 2571 - return phase_vector[esp->sreg & ESP_STAT_PMASK](esp, eregs); 2572 - } 2573 - 2574 - /* First interrupt after exec'ing a cmd comes here. */ 2575 - static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs) 2576 - { 2577 - Scsi_Cmnd *SCptr = esp->current_SC; 2578 - struct esp_device *esp_dev = SCptr->device->hostdata; 2579 - int cmd_bytes_sent, fcnt; 2580 - 2581 - fcnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES); 2582 - cmd_bytes_sent = esp->dma_bytes_sent(esp, fcnt); 2583 - if(esp->dma_invalidate) 2584 - esp->dma_invalidate(esp); 2585 - 2586 - /* Let's check to see if a reselect happened 2587 - * while we we're trying to select. This must 2588 - * be checked first. 2589 - */ 2590 - if(esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { 2591 - esp_reconnect(esp, SCptr); 2592 - return esp_do_reconnect(esp, eregs); 2593 - } 2594 - 2595 - /* Looks like things worked, we should see a bus service & 2596 - * a function complete interrupt at this point. Note we 2597 - * are doing a direct comparison because we don't want to 2598 - * be fooled into thinking selection was successful if 2599 - * ESP_INTR_DC is set, see below. 2600 - */ 2601 - if(esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { 2602 - /* target speaks... */ 2603 - esp->targets_present |= (1<<scmd_id(SCptr)); 2604 - 2605 - /* What if the target ignores the sdtr? */ 2606 - if(esp->snip) 2607 - esp_dev->sync = 1; 2608 - 2609 - /* See how far, if at all, we got in getting 2610 - * the information out to the target. 2611 - */ 2612 - switch(esp->seqreg) { 2613 - default: 2614 - 2615 - case ESP_STEP_ASEL: 2616 - /* Arbitration won, target selected, but 2617 - * we are in some phase which is not command 2618 - * phase nor is it message out phase. 2619 - * 2620 - * XXX We've confused the target, obviously. 2621 - * XXX So clear it's state, but we also end 2622 - * XXX up clearing everyone elses. That isn't 2623 - * XXX so nice. I'd like to just reset this 2624 - * XXX target, but if I cannot even get it's 2625 - * XXX attention and finish selection to talk 2626 - * XXX to it, there is not much more I can do. 2627 - * XXX If we have a loaded bus we're going to 2628 - * XXX spend the next second or so renegotiating 2629 - * XXX for synchronous transfers. 2630 - */ 2631 - ESPLOG(("esp%d: STEP_ASEL for tgt %d\n", 2632 - esp->esp_id, SCptr->device->id)); 2633 - 2634 - case ESP_STEP_SID: 2635 - /* Arbitration won, target selected, went 2636 - * to message out phase, sent one message 2637 - * byte, then we stopped. ATN is asserted 2638 - * on the SCSI bus and the target is still 2639 - * there hanging on. This is a legal 2640 - * sequence step if we gave the ESP a select 2641 - * and stop command. 2642 - * 2643 - * XXX See above, I could set the borken flag 2644 - * XXX in the device struct and retry the 2645 - * XXX command. But would that help for 2646 - * XXX tagged capable targets? 2647 - */ 2648 - 2649 - case ESP_STEP_NCMD: 2650 - /* Arbitration won, target selected, maybe 2651 - * sent the one message byte in message out 2652 - * phase, but we did not go to command phase 2653 - * in the end. Actually, we could have sent 2654 - * only some of the message bytes if we tried 2655 - * to send out the entire identify and tag 2656 - * message using ESP_CMD_SA3. 2657 - */ 2658 - cmd_bytes_sent = 0; 2659 - break; 2660 - 2661 - case ESP_STEP_PPC: 2662 - /* No, not the powerPC pinhead. Arbitration 2663 - * won, all message bytes sent if we went to 2664 - * message out phase, went to command phase 2665 - * but only part of the command was sent. 2666 - * 2667 - * XXX I've seen this, but usually in conjunction 2668 - * XXX with a gross error which appears to have 2669 - * XXX occurred between the time I told the 2670 - * XXX ESP to arbitrate and when I got the 2671 - * XXX interrupt. Could I have misloaded the 2672 - * XXX command bytes into the fifo? Actually, 2673 - * XXX I most likely missed a phase, and therefore 2674 - * XXX went into never never land and didn't even 2675 - * XXX know it. That was the old driver though. 2676 - * XXX What is even more peculiar is that the ESP 2677 - * XXX showed the proper function complete and 2678 - * XXX bus service bits in the interrupt register. 2679 - */ 2680 - 2681 - case ESP_STEP_FINI4: 2682 - case ESP_STEP_FINI5: 2683 - case ESP_STEP_FINI6: 2684 - case ESP_STEP_FINI7: 2685 - /* Account for the identify message */ 2686 - if(SCptr->SCp.phase == in_slct_norm) 2687 - cmd_bytes_sent -= 1; 2688 - }; 2689 - esp_cmd(esp, eregs, ESP_CMD_NULL); 2690 - 2691 - /* Be careful, we could really get fucked during synchronous 2692 - * data transfers if we try to flush the fifo now. 2693 - */ 2694 - if(!fcnt && /* Fifo is empty and... */ 2695 - /* either we are not doing synchronous transfers or... */ 2696 - (!esp_dev->sync_max_offset || 2697 - /* We are not going into data in phase. */ 2698 - ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) 2699 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); /* flush is safe */ 2700 - 2701 - /* See how far we got if this is not a slow command. */ 2702 - if(!esp->esp_slowcmd) { 2703 - if(cmd_bytes_sent < 0) 2704 - cmd_bytes_sent = 0; 2705 - if(cmd_bytes_sent != SCptr->cmd_len) { 2706 - /* Crapola, mark it as a slowcmd 2707 - * so that we have some chance of 2708 - * keeping the command alive with 2709 - * good luck. 2710 - * 2711 - * XXX Actually, if we didn't send it all 2712 - * XXX this means either we didn't set things 2713 - * XXX up properly (driver bug) or the target 2714 - * XXX or the ESP detected parity on one of 2715 - * XXX the command bytes. This makes much 2716 - * XXX more sense, and therefore this code 2717 - * XXX should be changed to send out a 2718 - * XXX parity error message or if the status 2719 - * XXX register shows no parity error then 2720 - * XXX just expect the target to bring the 2721 - * XXX bus into message in phase so that it 2722 - * XXX can send us the parity error message. 2723 - * XXX SCSI sucks... 2724 - */ 2725 - esp->esp_slowcmd = 1; 2726 - esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]); 2727 - esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent); 2728 - } 2729 - } 2730 - 2731 - /* Now figure out where we went. */ 2732 - esp_advance_phase(SCptr, in_the_dark); 2733 - return esp_do_phase_determine(esp, eregs); 2734 - } 2735 - 2736 - /* Did the target even make it? */ 2737 - if(esp->ireg == ESP_INTR_DC) { 2738 - /* wheee... nobody there or they didn't like 2739 - * what we told it to do, clean up. 2740 - */ 2741 - 2742 - /* If anyone is off the bus, but working on 2743 - * a command in the background for us, tell 2744 - * the ESP to listen for them. 2745 - */ 2746 - if(esp->disconnected_SC) 2747 - esp_cmd(esp, eregs, ESP_CMD_ESEL); 2748 - 2749 - if(((1<<SCptr->device->id) & esp->targets_present) && 2750 - esp->seqreg && esp->cur_msgout[0] == EXTENDED_MESSAGE && 2751 - (SCptr->SCp.phase == in_slct_msg || 2752 - SCptr->SCp.phase == in_slct_stop)) { 2753 - /* shit */ 2754 - esp->snip = 0; 2755 - ESPLOG(("esp%d: Failed synchronous negotiation for target %d " 2756 - "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); 2757 - esp_dev->sync_max_offset = 0; 2758 - esp_dev->sync_min_period = 0; 2759 - esp_dev->sync = 1; /* so we don't negotiate again */ 2760 - 2761 - /* Run the command again, this time though we 2762 - * won't try to negotiate for synchronous transfers. 2763 - * 2764 - * XXX I'd like to do something like send an 2765 - * XXX INITIATOR_ERROR or ABORT message to the 2766 - * XXX target to tell it, "Sorry I confused you, 2767 - * XXX please come back and I will be nicer next 2768 - * XXX time". But that requires having the target 2769 - * XXX on the bus, and it has dropped BSY on us. 2770 - */ 2771 - esp->current_SC = NULL; 2772 - esp_advance_phase(SCptr, not_issued); 2773 - prepend_SC(&esp->issue_SC, SCptr); 2774 - esp_exec_cmd(esp); 2775 - return do_intr_end; 2776 - } 2777 - 2778 - /* Ok, this is normal, this is what we see during boot 2779 - * or whenever when we are scanning the bus for targets. 2780 - * But first make sure that is really what is happening. 2781 - */ 2782 - if(((1<<SCptr->device->id) & esp->targets_present)) { 2783 - ESPLOG(("esp%d: Warning, live target %d not responding to " 2784 - "selection.\n", esp->esp_id, SCptr->device->id)); 2785 - 2786 - /* This _CAN_ happen. The SCSI standard states that 2787 - * the target is to _not_ respond to selection if 2788 - * _it_ detects bad parity on the bus for any reason. 2789 - * Therefore, we assume that if we've talked successfully 2790 - * to this target before, bad parity is the problem. 2791 - */ 2792 - esp_done(esp, (DID_PARITY << 16)); 2793 - } else { 2794 - /* Else, there really isn't anyone there. */ 2795 - ESPMISC(("esp: selection failure, maybe nobody there?\n")); 2796 - ESPMISC(("esp: target %d lun %d\n", 2797 - SCptr->device->id, SCptr->device->lun)); 2798 - esp_done(esp, (DID_BAD_TARGET << 16)); 2799 - } 2800 - return do_intr_end; 2801 - } 2802 - 2803 - 2804 - ESPLOG(("esp%d: Selection failure.\n", esp->esp_id)); 2805 - printk("esp%d: Currently -- ", esp->esp_id); 2806 - esp_print_ireg(esp->ireg); 2807 - printk(" "); 2808 - esp_print_statreg(esp->sreg); 2809 - printk(" "); 2810 - esp_print_seqreg(esp->seqreg); 2811 - printk("\n"); 2812 - printk("esp%d: New -- ", esp->esp_id); 2813 - esp->sreg = esp_read(eregs->esp_status); 2814 - esp->seqreg = esp_read(eregs->esp_sstep); 2815 - esp->ireg = esp_read(eregs->esp_intrpt); 2816 - esp_print_ireg(esp->ireg); 2817 - printk(" "); 2818 - esp_print_statreg(esp->sreg); 2819 - printk(" "); 2820 - esp_print_seqreg(esp->seqreg); 2821 - printk("\n"); 2822 - ESPLOG(("esp%d: resetting bus\n", esp->esp_id)); 2823 - return do_reset_bus; /* ugh... */ 2824 - } 2825 - 2826 - /* Continue reading bytes for msgin phase. */ 2827 - static int esp_do_msgincont(struct NCR_ESP *esp, struct ESP_regs *eregs) 2828 - { 2829 - if(esp->ireg & ESP_INTR_BSERV) { 2830 - /* in the right phase too? */ 2831 - if((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) { 2832 - /* phew... */ 2833 - esp_cmd(esp, eregs, ESP_CMD_TI); 2834 - esp_advance_phase(esp->current_SC, in_msgindone); 2835 - return do_intr_end; 2836 - } 2837 - 2838 - /* We changed phase but ESP shows bus service, 2839 - * in this case it is most likely that we, the 2840 - * hacker who has been up for 20hrs straight 2841 - * staring at the screen, drowned in coffee 2842 - * smelling like retched cigarette ashes 2843 - * have miscoded something..... so, try to 2844 - * recover as best we can. 2845 - */ 2846 - ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id)); 2847 - } 2848 - esp_advance_phase(esp->current_SC, in_the_dark); 2849 - return do_phase_determine; 2850 - } 2851 - 2852 - static int check_singlebyte_msg(struct NCR_ESP *esp, 2853 - struct ESP_regs *eregs) 2854 - { 2855 - esp->prevmsgin = esp->cur_msgin[0]; 2856 - if(esp->cur_msgin[0] & 0x80) { 2857 - /* wheee... */ 2858 - ESPLOG(("esp%d: target sends identify amidst phases\n", 2859 - esp->esp_id)); 2860 - esp_advance_phase(esp->current_SC, in_the_dark); 2861 - return 0; 2862 - } else if(((esp->cur_msgin[0] & 0xf0) == 0x20) || 2863 - (esp->cur_msgin[0] == EXTENDED_MESSAGE)) { 2864 - esp->msgin_len = 2; 2865 - esp_advance_phase(esp->current_SC, in_msgincont); 2866 - return 0; 2867 - } 2868 - esp_advance_phase(esp->current_SC, in_the_dark); 2869 - switch(esp->cur_msgin[0]) { 2870 - default: 2871 - /* We don't want to hear about it. */ 2872 - ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id, 2873 - esp->cur_msgin[0])); 2874 - return MESSAGE_REJECT; 2875 - 2876 - case NOP: 2877 - ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id, 2878 - esp->current_SC->device->id)); 2879 - return 0; 2880 - 2881 - case RESTORE_POINTERS: 2882 - /* In this case we might also have to backup the 2883 - * "slow command" pointer. It is rare to get such 2884 - * a save/restore pointer sequence so early in the 2885 - * bus transition sequences, but cover it. 2886 - */ 2887 - if(esp->esp_slowcmd) { 2888 - esp->esp_scmdleft = esp->current_SC->cmd_len; 2889 - esp->esp_scmdp = &esp->current_SC->cmnd[0]; 2890 - } 2891 - esp_restore_pointers(esp, esp->current_SC); 2892 - return 0; 2893 - 2894 - case SAVE_POINTERS: 2895 - esp_save_pointers(esp, esp->current_SC); 2896 - return 0; 2897 - 2898 - case COMMAND_COMPLETE: 2899 - case DISCONNECT: 2900 - /* Freeing the bus, let it go. */ 2901 - esp->current_SC->SCp.phase = in_freeing; 2902 - return 0; 2903 - 2904 - case MESSAGE_REJECT: 2905 - ESPMISC(("msg reject, ")); 2906 - if(esp->prevmsgout == EXTENDED_MESSAGE) { 2907 - struct esp_device *esp_dev = esp->current_SC->device->hostdata; 2908 - 2909 - /* Doesn't look like this target can 2910 - * do synchronous or WIDE transfers. 2911 - */ 2912 - ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n")); 2913 - esp_dev->sync = 1; 2914 - esp_dev->wide = 1; 2915 - esp_dev->sync_min_period = 0; 2916 - esp_dev->sync_max_offset = 0; 2917 - return 0; 2918 - } else { 2919 - ESPMISC(("not sync nego, sending ABORT\n")); 2920 - return ABORT; 2921 - } 2922 - }; 2923 - } 2924 - 2925 - /* Target negotiates for synchronous transfers before we do, this 2926 - * is legal although very strange. What is even funnier is that 2927 - * the SCSI2 standard specifically recommends against targets doing 2928 - * this because so many initiators cannot cope with this occurring. 2929 - */ 2930 - static int target_with_ants_in_pants(struct NCR_ESP *esp, 2931 - Scsi_Cmnd *SCptr, 2932 - struct esp_device *esp_dev) 2933 - { 2934 - if(esp_dev->sync || SCptr->device->borken) { 2935 - /* sorry, no can do */ 2936 - ESPSDTR(("forcing to async, ")); 2937 - build_sync_nego_msg(esp, 0, 0); 2938 - esp_dev->sync = 1; 2939 - esp->snip = 1; 2940 - ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id)); 2941 - esp_advance_phase(SCptr, in_the_dark); 2942 - return EXTENDED_MESSAGE; 2943 - } 2944 - 2945 - /* Ok, we'll check them out... */ 2946 - return 0; 2947 - } 2948 - 2949 - static void sync_report(struct NCR_ESP *esp) 2950 - { 2951 - int msg3, msg4; 2952 - char *type; 2953 - 2954 - msg3 = esp->cur_msgin[3]; 2955 - msg4 = esp->cur_msgin[4]; 2956 - if(msg4) { 2957 - int hz = 1000000000 / (msg3 * 4); 2958 - int integer = hz / 1000000; 2959 - int fraction = (hz - (integer * 1000000)) / 10000; 2960 - if((msg3 * 4) < 200) { 2961 - type = "FAST"; 2962 - } else { 2963 - type = "synchronous"; 2964 - } 2965 - 2966 - /* Do not transform this back into one big printk 2967 - * again, it triggers a bug in our sparc64-gcc272 2968 - * sibling call optimization. -DaveM 2969 - */ 2970 - ESPLOG((KERN_INFO "esp%d: target %d ", 2971 - esp->esp_id, esp->current_SC->device->id)); 2972 - ESPLOG(("[period %dns offset %d %d.%02dMHz ", 2973 - (int) msg3 * 4, (int) msg4, 2974 - integer, fraction)); 2975 - ESPLOG(("%s SCSI%s]\n", type, 2976 - (((msg3 * 4) < 200) ? "-II" : ""))); 2977 - } else { 2978 - ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n", 2979 - esp->esp_id, esp->current_SC->device->id)); 2980 - } 2981 - } 2982 - 2983 - static int check_multibyte_msg(struct NCR_ESP *esp, 2984 - struct ESP_regs *eregs) 2985 - { 2986 - Scsi_Cmnd *SCptr = esp->current_SC; 2987 - struct esp_device *esp_dev = SCptr->device->hostdata; 2988 - unchar regval = 0; 2989 - int message_out = 0; 2990 - 2991 - ESPSDTR(("chk multibyte msg: ")); 2992 - if(esp->cur_msgin[2] == EXTENDED_SDTR) { 2993 - int period = esp->cur_msgin[3]; 2994 - int offset = esp->cur_msgin[4]; 2995 - 2996 - ESPSDTR(("is sync nego response, ")); 2997 - if(!esp->snip) { 2998 - int rval; 2999 - 3000 - /* Target negotiates first! */ 3001 - ESPSDTR(("target jumps the gun, ")); 3002 - message_out = EXTENDED_MESSAGE; /* we must respond */ 3003 - rval = target_with_ants_in_pants(esp, SCptr, esp_dev); 3004 - if(rval) 3005 - return rval; 3006 - } 3007 - 3008 - ESPSDTR(("examining sdtr, ")); 3009 - 3010 - /* Offset cannot be larger than ESP fifo size. */ 3011 - if(offset > 15) { 3012 - ESPSDTR(("offset too big %2x, ", offset)); 3013 - offset = 15; 3014 - ESPSDTR(("sending back new offset\n")); 3015 - build_sync_nego_msg(esp, period, offset); 3016 - return EXTENDED_MESSAGE; 3017 - } 3018 - 3019 - if(offset && period > esp->max_period) { 3020 - /* Yeee, async for this slow device. */ 3021 - ESPSDTR(("period too long %2x, ", period)); 3022 - build_sync_nego_msg(esp, 0, 0); 3023 - ESPSDTR(("hoping for msgout\n")); 3024 - esp_advance_phase(esp->current_SC, in_the_dark); 3025 - return EXTENDED_MESSAGE; 3026 - } else if (offset && period < esp->min_period) { 3027 - ESPSDTR(("period too short %2x, ", period)); 3028 - period = esp->min_period; 3029 - if(esp->erev > esp236) 3030 - regval = 4; 3031 - else 3032 - regval = 5; 3033 - } else if(offset) { 3034 - int tmp; 3035 - 3036 - ESPSDTR(("period is ok, ")); 3037 - tmp = esp->ccycle / 1000; 3038 - regval = (((period << 2) + tmp - 1) / tmp); 3039 - if(regval && (esp->erev > esp236)) { 3040 - if(period >= 50) 3041 - regval--; 3042 - } 3043 - } 3044 - 3045 - if(offset) { 3046 - unchar bit; 3047 - 3048 - esp_dev->sync_min_period = (regval & 0x1f); 3049 - esp_dev->sync_max_offset = (offset | esp->radelay); 3050 - if(esp->erev > esp236) { 3051 - if(esp->erev == fas100a) 3052 - bit = ESP_CONFIG3_FAST; 3053 - else 3054 - bit = ESP_CONFIG3_FSCSI; 3055 - if(period < 50) 3056 - esp->config3[SCptr->device->id] |= bit; 3057 - else 3058 - esp->config3[SCptr->device->id] &= ~bit; 3059 - esp->prev_cfg3 = esp->config3[SCptr->device->id]; 3060 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 3061 - } 3062 - esp->prev_soff = esp_dev->sync_min_period; 3063 - esp_write(eregs->esp_soff, esp->prev_soff); 3064 - esp->prev_stp = esp_dev->sync_max_offset; 3065 - esp_write(eregs->esp_stp, esp->prev_stp); 3066 - 3067 - ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n", 3068 - esp_dev->sync_max_offset, 3069 - esp_dev->sync_min_period, 3070 - esp->config3[scmd_id(SCptr)])); 3071 - 3072 - esp->snip = 0; 3073 - } else if(esp_dev->sync_max_offset) { 3074 - unchar bit; 3075 - 3076 - /* back to async mode */ 3077 - ESPSDTR(("unaccaptable sync nego, forcing async\n")); 3078 - esp_dev->sync_max_offset = 0; 3079 - esp_dev->sync_min_period = 0; 3080 - esp->prev_soff = 0; 3081 - esp_write(eregs->esp_soff, 0); 3082 - esp->prev_stp = 0; 3083 - esp_write(eregs->esp_stp, 0); 3084 - if(esp->erev > esp236) { 3085 - if(esp->erev == fas100a) 3086 - bit = ESP_CONFIG3_FAST; 3087 - else 3088 - bit = ESP_CONFIG3_FSCSI; 3089 - esp->config3[SCptr->device->id] &= ~bit; 3090 - esp->prev_cfg3 = esp->config3[SCptr->device->id]; 3091 - esp_write(eregs->esp_cfg3, esp->prev_cfg3); 3092 - } 3093 - } 3094 - 3095 - sync_report(esp); 3096 - 3097 - ESPSDTR(("chk multibyte msg: sync is known, ")); 3098 - esp_dev->sync = 1; 3099 - 3100 - if(message_out) { 3101 - ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n", 3102 - esp->esp_id)); 3103 - build_sync_nego_msg(esp, period, offset); 3104 - esp_advance_phase(SCptr, in_the_dark); 3105 - return EXTENDED_MESSAGE; 3106 - } 3107 - 3108 - ESPSDTR(("returning zero\n")); 3109 - esp_advance_phase(SCptr, in_the_dark); /* ...or else! */ 3110 - return 0; 3111 - } else if(esp->cur_msgin[2] == EXTENDED_WDTR) { 3112 - ESPLOG(("esp%d: AIEEE wide msg received\n", esp->esp_id)); 3113 - message_out = MESSAGE_REJECT; 3114 - } else if(esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) { 3115 - ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id)); 3116 - message_out = MESSAGE_REJECT; 3117 - } 3118 - esp_advance_phase(SCptr, in_the_dark); 3119 - return message_out; 3120 - } 3121 - 3122 - static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs) 3123 - { 3124 - Scsi_Cmnd *SCptr = esp->current_SC; 3125 - int message_out = 0, it = 0, rval; 3126 - 3127 - rval = skipahead1(esp, eregs, SCptr, in_msgin, in_msgindone); 3128 - if(rval) 3129 - return rval; 3130 - if(SCptr->SCp.sent_command != in_status) { 3131 - if(!(esp->ireg & ESP_INTR_DC)) { 3132 - if(esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) { 3133 - message_out = MSG_PARITY_ERROR; 3134 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 3135 - } else if((it = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES))!=1) { 3136 - /* We certainly dropped the ball somewhere. */ 3137 - message_out = INITIATOR_ERROR; 3138 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 3139 - } else if(!esp->msgin_len) { 3140 - it = esp_read(eregs->esp_fdata); 3141 - esp_advance_phase(SCptr, in_msgincont); 3142 - } else { 3143 - /* it is ok and we want it */ 3144 - it = esp->cur_msgin[esp->msgin_ctr] = 3145 - esp_read(eregs->esp_fdata); 3146 - esp->msgin_ctr++; 3147 - } 3148 - } else { 3149 - esp_advance_phase(SCptr, in_the_dark); 3150 - return do_work_bus; 3151 - } 3152 - } else { 3153 - it = esp->cur_msgin[0]; 3154 - } 3155 - if(!message_out && esp->msgin_len) { 3156 - if(esp->msgin_ctr < esp->msgin_len) { 3157 - esp_advance_phase(SCptr, in_msgincont); 3158 - } else if(esp->msgin_len == 1) { 3159 - message_out = check_singlebyte_msg(esp, eregs); 3160 - } else if(esp->msgin_len == 2) { 3161 - if(esp->cur_msgin[0] == EXTENDED_MESSAGE) { 3162 - if((it+2) >= 15) { 3163 - message_out = MESSAGE_REJECT; 3164 - } else { 3165 - esp->msgin_len = (it + 2); 3166 - esp_advance_phase(SCptr, in_msgincont); 3167 - } 3168 - } else { 3169 - message_out = MESSAGE_REJECT; /* foo on you */ 3170 - } 3171 - } else { 3172 - message_out = check_multibyte_msg(esp, eregs); 3173 - } 3174 - } 3175 - if(message_out < 0) { 3176 - return -message_out; 3177 - } else if(message_out) { 3178 - if(((message_out != 1) && 3179 - ((message_out < 0x20) || (message_out & 0x80)))) 3180 - esp->msgout_len = 1; 3181 - esp->cur_msgout[0] = message_out; 3182 - esp_cmd(esp, eregs, ESP_CMD_SATN); 3183 - esp_advance_phase(SCptr, in_the_dark); 3184 - esp->msgin_len = 0; 3185 - } 3186 - esp->sreg = esp_read(eregs->esp_status); 3187 - esp->sreg &= ~(ESP_STAT_INTR); 3188 - if((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD)) 3189 - esp_cmd(esp, eregs, ESP_CMD_MOK); 3190 - if((SCptr->SCp.sent_command == in_msgindone) && 3191 - (SCptr->SCp.phase == in_freeing)) 3192 - return esp_do_freebus(esp, eregs); 3193 - return do_intr_end; 3194 - } 3195 - 3196 - static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs) 3197 - { 3198 - unsigned char tmp; 3199 - Scsi_Cmnd *SCptr = esp->current_SC; 3200 - 3201 - esp_advance_phase(SCptr, in_cmdend); 3202 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 3203 - tmp = *esp->esp_scmdp++; 3204 - esp->esp_scmdleft--; 3205 - esp_write(eregs->esp_fdata, tmp); 3206 - esp_cmd(esp, eregs, ESP_CMD_TI); 3207 - return do_intr_end; 3208 - } 3209 - 3210 - static int esp_do_cmddone(struct NCR_ESP *esp, struct ESP_regs *eregs) 3211 - { 3212 - esp_cmd(esp, eregs, ESP_CMD_NULL); 3213 - if(esp->ireg & ESP_INTR_BSERV) { 3214 - esp_advance_phase(esp->current_SC, in_the_dark); 3215 - return esp_do_phase_determine(esp, eregs); 3216 - } 3217 - ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n", 3218 - esp->esp_id)); 3219 - return do_reset_bus; 3220 - } 3221 - 3222 - static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs) 3223 - { 3224 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 3225 - switch(esp->msgout_len) { 3226 - case 1: 3227 - esp_write(eregs->esp_fdata, esp->cur_msgout[0]); 3228 - esp_cmd(esp, eregs, ESP_CMD_TI); 3229 - break; 3230 - 3231 - case 2: 3232 - if(esp->do_pio_cmds){ 3233 - esp_write(eregs->esp_fdata, esp->cur_msgout[0]); 3234 - esp_write(eregs->esp_fdata, esp->cur_msgout[1]); 3235 - esp_cmd(esp, eregs, ESP_CMD_TI); 3236 - } else { 3237 - esp->esp_command[0] = esp->cur_msgout[0]; 3238 - esp->esp_command[1] = esp->cur_msgout[1]; 3239 - esp->dma_setup(esp, esp->esp_command_dvma, 2, 0); 3240 - esp_setcount(eregs, 2); 3241 - esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI); 3242 - } 3243 - break; 3244 - 3245 - case 4: 3246 - esp->snip = 1; 3247 - if(esp->do_pio_cmds){ 3248 - esp_write(eregs->esp_fdata, esp->cur_msgout[0]); 3249 - esp_write(eregs->esp_fdata, esp->cur_msgout[1]); 3250 - esp_write(eregs->esp_fdata, esp->cur_msgout[2]); 3251 - esp_write(eregs->esp_fdata, esp->cur_msgout[3]); 3252 - esp_cmd(esp, eregs, ESP_CMD_TI); 3253 - } else { 3254 - esp->esp_command[0] = esp->cur_msgout[0]; 3255 - esp->esp_command[1] = esp->cur_msgout[1]; 3256 - esp->esp_command[2] = esp->cur_msgout[2]; 3257 - esp->esp_command[3] = esp->cur_msgout[3]; 3258 - esp->dma_setup(esp, esp->esp_command_dvma, 4, 0); 3259 - esp_setcount(eregs, 4); 3260 - esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI); 3261 - } 3262 - break; 3263 - 3264 - case 5: 3265 - esp->snip = 1; 3266 - if(esp->do_pio_cmds){ 3267 - esp_write(eregs->esp_fdata, esp->cur_msgout[0]); 3268 - esp_write(eregs->esp_fdata, esp->cur_msgout[1]); 3269 - esp_write(eregs->esp_fdata, esp->cur_msgout[2]); 3270 - esp_write(eregs->esp_fdata, esp->cur_msgout[3]); 3271 - esp_write(eregs->esp_fdata, esp->cur_msgout[4]); 3272 - esp_cmd(esp, eregs, ESP_CMD_TI); 3273 - } else { 3274 - esp->esp_command[0] = esp->cur_msgout[0]; 3275 - esp->esp_command[1] = esp->cur_msgout[1]; 3276 - esp->esp_command[2] = esp->cur_msgout[2]; 3277 - esp->esp_command[3] = esp->cur_msgout[3]; 3278 - esp->esp_command[4] = esp->cur_msgout[4]; 3279 - esp->dma_setup(esp, esp->esp_command_dvma, 5, 0); 3280 - esp_setcount(eregs, 5); 3281 - esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI); 3282 - } 3283 - break; 3284 - 3285 - default: 3286 - /* whoops */ 3287 - ESPMISC(("bogus msgout sending NOP\n")); 3288 - esp->cur_msgout[0] = NOP; 3289 - esp_write(eregs->esp_fdata, esp->cur_msgout[0]); 3290 - esp->msgout_len = 1; 3291 - esp_cmd(esp, eregs, ESP_CMD_TI); 3292 - break; 3293 - } 3294 - esp_advance_phase(esp->current_SC, in_msgoutdone); 3295 - return do_intr_end; 3296 - } 3297 - 3298 - static int esp_do_msgoutdone(struct NCR_ESP *esp, 3299 - struct ESP_regs *eregs) 3300 - { 3301 - if((esp->msgout_len > 1) && esp->dma_barrier) 3302 - esp->dma_barrier(esp); 3303 - 3304 - if(!(esp->ireg & ESP_INTR_DC)) { 3305 - esp_cmd(esp, eregs, ESP_CMD_NULL); 3306 - switch(esp->sreg & ESP_STAT_PMASK) { 3307 - case ESP_MOP: 3308 - /* whoops, parity error */ 3309 - ESPLOG(("esp%d: still in msgout, parity error assumed\n", 3310 - esp->esp_id)); 3311 - if(esp->msgout_len > 1) 3312 - esp_cmd(esp, eregs, ESP_CMD_SATN); 3313 - esp_advance_phase(esp->current_SC, in_msgout); 3314 - return do_work_bus; 3315 - 3316 - case ESP_DIP: 3317 - break; 3318 - 3319 - default: 3320 - if(!fcount(esp, eregs) && 3321 - !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset)) 3322 - esp_cmd(esp, eregs, ESP_CMD_FLUSH); 3323 - break; 3324 - 3325 - }; 3326 - } 3327 - 3328 - /* If we sent out a synchronous negotiation message, update 3329 - * our state. 3330 - */ 3331 - if(esp->cur_msgout[2] == EXTENDED_MESSAGE && 3332 - esp->cur_msgout[4] == EXTENDED_SDTR) { 3333 - esp->snip = 1; /* anal retentiveness... */ 3334 - } 3335 - 3336 - esp->prevmsgout = esp->cur_msgout[0]; 3337 - esp->msgout_len = 0; 3338 - esp_advance_phase(esp->current_SC, in_the_dark); 3339 - return esp_do_phase_determine(esp, eregs); 3340 - } 3341 - 3342 - static int esp_bus_unexpected(struct NCR_ESP *esp, struct ESP_regs *eregs) 3343 - { 3344 - ESPLOG(("esp%d: command in weird state %2x\n", 3345 - esp->esp_id, esp->current_SC->SCp.phase)); 3346 - return do_reset_bus; 3347 - } 3348 - 3349 - static espfunc_t bus_vector[] = { 3350 - esp_do_data_finale, 3351 - esp_do_data_finale, 3352 - esp_bus_unexpected, 3353 - esp_do_msgin, 3354 - esp_do_msgincont, 3355 - esp_do_msgindone, 3356 - esp_do_msgout, 3357 - esp_do_msgoutdone, 3358 - esp_do_cmdbegin, 3359 - esp_do_cmddone, 3360 - esp_do_status, 3361 - esp_do_freebus, 3362 - esp_do_phase_determine, 3363 - esp_bus_unexpected, 3364 - esp_bus_unexpected, 3365 - esp_bus_unexpected, 3366 - }; 3367 - 3368 - /* This is the second tier in our dual-level SCSI state machine. */ 3369 - static int esp_work_bus(struct NCR_ESP *esp, struct ESP_regs *eregs) 3370 - { 3371 - Scsi_Cmnd *SCptr = esp->current_SC; 3372 - unsigned int phase; 3373 - 3374 - ESPBUS(("esp_work_bus: ")); 3375 - if(!SCptr) { 3376 - ESPBUS(("reconnect\n")); 3377 - return esp_do_reconnect(esp, eregs); 3378 - } 3379 - phase = SCptr->SCp.phase; 3380 - if ((phase & 0xf0) == in_phases_mask) 3381 - return bus_vector[(phase & 0x0f)](esp, eregs); 3382 - else if((phase & 0xf0) == in_slct_mask) 3383 - return esp_select_complete(esp, eregs); 3384 - else 3385 - return esp_bus_unexpected(esp, eregs); 3386 - } 3387 - 3388 - static espfunc_t isvc_vector[] = { 3389 - NULL, 3390 - esp_do_phase_determine, 3391 - esp_do_resetbus, 3392 - esp_finish_reset, 3393 - esp_work_bus 3394 - }; 3395 - 3396 - /* Main interrupt handler for an esp adapter. */ 3397 - void esp_handle(struct NCR_ESP *esp) 3398 - { 3399 - struct ESP_regs *eregs; 3400 - Scsi_Cmnd *SCptr; 3401 - int what_next = do_intr_end; 3402 - eregs = esp->eregs; 3403 - SCptr = esp->current_SC; 3404 - 3405 - if(esp->dma_irq_entry) 3406 - esp->dma_irq_entry(esp); 3407 - 3408 - /* Check for errors. */ 3409 - esp->sreg = esp_read(eregs->esp_status); 3410 - esp->sreg &= (~ESP_STAT_INTR); 3411 - esp->seqreg = (esp_read(eregs->esp_sstep) & ESP_STEP_VBITS); 3412 - esp->ireg = esp_read(eregs->esp_intrpt); /* Unlatch intr and stat regs */ 3413 - ESPIRQ(("handle_irq: [sreg<%02x> sstep<%02x> ireg<%02x>]\n", 3414 - esp->sreg, esp->seqreg, esp->ireg)); 3415 - if(esp->sreg & (ESP_STAT_SPAM)) { 3416 - /* Gross error, could be due to one of: 3417 - * 3418 - * - top of fifo overwritten, could be because 3419 - * we tried to do a synchronous transfer with 3420 - * an offset greater than ESP fifo size 3421 - * 3422 - * - top of command register overwritten 3423 - * 3424 - * - DMA setup to go in one direction, SCSI 3425 - * bus points in the other, whoops 3426 - * 3427 - * - weird phase change during asynchronous 3428 - * data phase while we are initiator 3429 - */ 3430 - ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg)); 3431 - 3432 - /* If a command is live on the bus we cannot safely 3433 - * reset the bus, so we'll just let the pieces fall 3434 - * where they may. Here we are hoping that the 3435 - * target will be able to cleanly go away soon 3436 - * so we can safely reset things. 3437 - */ 3438 - if(!SCptr) { 3439 - ESPLOG(("esp%d: No current cmd during gross error, " 3440 - "resetting bus\n", esp->esp_id)); 3441 - what_next = do_reset_bus; 3442 - goto state_machine; 3443 - } 3444 - } 3445 - 3446 - /* No current cmd is only valid at this point when there are 3447 - * commands off the bus or we are trying a reset. 3448 - */ 3449 - if(!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) { 3450 - /* Panic is safe, since current_SC is null. */ 3451 - ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id)); 3452 - panic("esp_handle: current_SC == penguin within interrupt!"); 3453 - } 3454 - 3455 - if(esp->ireg & (ESP_INTR_IC)) { 3456 - /* Illegal command fed to ESP. Outside of obvious 3457 - * software bugs that could cause this, there is 3458 - * a condition with ESP100 where we can confuse the 3459 - * ESP into an erroneous illegal command interrupt 3460 - * because it does not scrape the FIFO properly 3461 - * for reselection. See esp100_reconnect_hwbug() 3462 - * to see how we try very hard to avoid this. 3463 - */ 3464 - ESPLOG(("esp%d: invalid command\n", esp->esp_id)); 3465 - 3466 - esp_dump_state(esp, eregs); 3467 - 3468 - if(SCptr) { 3469 - /* Devices with very buggy firmware can drop BSY 3470 - * during a scatter list interrupt when using sync 3471 - * mode transfers. We continue the transfer as 3472 - * expected, the target drops the bus, the ESP 3473 - * gets confused, and we get a illegal command 3474 - * interrupt because the bus is in the disconnected 3475 - * state now and ESP_CMD_TI is only allowed when 3476 - * a nexus is alive on the bus. 3477 - */ 3478 - ESPLOG(("esp%d: Forcing async and disabling disconnect for " 3479 - "target %d\n", esp->esp_id, SCptr->device->id)); 3480 - SCptr->device->borken = 1; /* foo on you */ 3481 - } 3482 - 3483 - what_next = do_reset_bus; 3484 - } else if(!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) { 3485 - int phase; 3486 - 3487 - if(SCptr) { 3488 - phase = SCptr->SCp.phase; 3489 - if(phase & in_phases_mask) { 3490 - what_next = esp_work_bus(esp, eregs); 3491 - } else if(phase & in_slct_mask) { 3492 - what_next = esp_select_complete(esp, eregs); 3493 - } else { 3494 - ESPLOG(("esp%d: interrupt for no good reason...\n", 3495 - esp->esp_id)); 3496 - what_next = do_intr_end; 3497 - } 3498 - } else { 3499 - ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n", 3500 - esp->esp_id)); 3501 - what_next = do_reset_bus; 3502 - } 3503 - } else if(esp->ireg & ESP_INTR_SR) { 3504 - ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id)); 3505 - what_next = do_reset_complete; 3506 - } else if(esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) { 3507 - ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n", 3508 - esp->esp_id)); 3509 - what_next = do_reset_bus; 3510 - } else if(esp->ireg & ESP_INTR_RSEL) { 3511 - if(!SCptr) { 3512 - /* This is ok. */ 3513 - what_next = esp_do_reconnect(esp, eregs); 3514 - } else if(SCptr->SCp.phase & in_slct_mask) { 3515 - /* Only selection code knows how to clean 3516 - * up properly. 3517 - */ 3518 - ESPDISC(("Reselected during selection attempt\n")); 3519 - what_next = esp_select_complete(esp, eregs); 3520 - } else { 3521 - ESPLOG(("esp%d: Reselected while bus is busy\n", 3522 - esp->esp_id)); 3523 - what_next = do_reset_bus; 3524 - } 3525 - } 3526 - 3527 - /* This is tier-one in our dual level SCSI state machine. */ 3528 - state_machine: 3529 - while(what_next != do_intr_end) { 3530 - if (what_next >= do_phase_determine && 3531 - what_next < do_intr_end) 3532 - what_next = isvc_vector[what_next](esp, eregs); 3533 - else { 3534 - /* state is completely lost ;-( */ 3535 - ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n", 3536 - esp->esp_id)); 3537 - what_next = do_reset_bus; 3538 - } 3539 - } 3540 - if(esp->dma_irq_exit) 3541 - esp->dma_irq_exit(esp); 3542 - } 3543 - EXPORT_SYMBOL(esp_handle); 3544 - 3545 - #ifndef CONFIG_SMP 3546 - irqreturn_t esp_intr(int irq, void *dev_id) 3547 - { 3548 - struct NCR_ESP *esp; 3549 - unsigned long flags; 3550 - int again; 3551 - struct Scsi_Host *dev = dev_id; 3552 - 3553 - /* Handle all ESP interrupts showing at this IRQ level. */ 3554 - spin_lock_irqsave(dev->host_lock, flags); 3555 - repeat: 3556 - again = 0; 3557 - for_each_esp(esp) { 3558 - #ifndef __mips__ 3559 - if(((esp)->irq & 0xff) == irq) { 3560 - #endif 3561 - if(esp->dma_irq_p(esp)) { 3562 - again = 1; 3563 - 3564 - esp->dma_ints_off(esp); 3565 - 3566 - ESPIRQ(("I%d(", esp->esp_id)); 3567 - esp_handle(esp); 3568 - ESPIRQ((")")); 3569 - 3570 - esp->dma_ints_on(esp); 3571 - } 3572 - #ifndef __mips__ 3573 - } 3574 - #endif 3575 - } 3576 - if(again) 3577 - goto repeat; 3578 - spin_unlock_irqrestore(dev->host_lock, flags); 3579 - return IRQ_HANDLED; 3580 - } 3581 - #else 3582 - /* For SMP we only service one ESP on the list list at our IRQ level! */ 3583 - irqreturn_t esp_intr(int irq, void *dev_id) 3584 - { 3585 - struct NCR_ESP *esp; 3586 - unsigned long flags; 3587 - struct Scsi_Host *dev = dev_id; 3588 - 3589 - /* Handle all ESP interrupts showing at this IRQ level. */ 3590 - spin_lock_irqsave(dev->host_lock, flags); 3591 - for_each_esp(esp) { 3592 - if(((esp)->irq & 0xf) == irq) { 3593 - if(esp->dma_irq_p(esp)) { 3594 - esp->dma_ints_off(esp); 3595 - 3596 - ESPIRQ(("I[%d:%d](", 3597 - smp_processor_id(), esp->esp_id)); 3598 - esp_handle(esp); 3599 - ESPIRQ((")")); 3600 - 3601 - esp->dma_ints_on(esp); 3602 - goto out; 3603 - } 3604 - } 3605 - } 3606 - out: 3607 - spin_unlock_irqrestore(dev->host_lock, flags); 3608 - return IRQ_HANDLED; 3609 - } 3610 - #endif 3611 - 3612 - int esp_slave_alloc(struct scsi_device *SDptr) 3613 - { 3614 - struct esp_device *esp_dev = 3615 - kzalloc(sizeof(struct esp_device), GFP_ATOMIC); 3616 - 3617 - if (!esp_dev) 3618 - return -ENOMEM; 3619 - SDptr->hostdata = esp_dev; 3620 - return 0; 3621 - } 3622 - 3623 - void esp_slave_destroy(struct scsi_device *SDptr) 3624 - { 3625 - struct NCR_ESP *esp = (struct NCR_ESP *) SDptr->host->hostdata; 3626 - 3627 - esp->targets_present &= ~(1 << sdev_id(SDptr)); 3628 - kfree(SDptr->hostdata); 3629 - SDptr->hostdata = NULL; 3630 - } 3631 - 3632 - #ifdef MODULE 3633 - int init_module(void) { return 0; } 3634 - void cleanup_module(void) {} 3635 - void esp_release(void) 3636 - { 3637 - esps_in_use--; 3638 - esps_running = esps_in_use; 3639 - } 3640 - EXPORT_SYMBOL(esp_release); 3641 - #endif 3642 - 3643 - EXPORT_SYMBOL(esp_abort); 3644 - EXPORT_SYMBOL(esp_allocate); 3645 - EXPORT_SYMBOL(esp_deallocate); 3646 - EXPORT_SYMBOL(esp_initialize); 3647 - EXPORT_SYMBOL(esp_intr); 3648 - EXPORT_SYMBOL(esp_queue); 3649 - EXPORT_SYMBOL(esp_reset); 3650 - EXPORT_SYMBOL(esp_slave_alloc); 3651 - EXPORT_SYMBOL(esp_slave_destroy); 3652 - EXPORT_SYMBOL(esps_in_use); 3653 - 3654 - MODULE_LICENSE("GPL");
-668
drivers/scsi/NCR53C9x.h
··· 1 - /* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver. 2 - * 3 - * Originally esp.h: Defines and structures for the Sparc ESP 4 - * (Enhanced SCSI Processor) driver under Linux. 5 - * 6 - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 7 - * 8 - * Generalization by Jesper Skov (jskov@cygnus.co.uk) 9 - * 10 - * More generalization (for i386 stuff) by Tymm Twillman (tymm@computer.org) 11 - */ 12 - 13 - #ifndef NCR53C9X_H 14 - #define NCR53C9X_H 15 - 16 - #include <linux/interrupt.h> 17 - 18 - /* djweis for mac driver */ 19 - #if defined(CONFIG_MAC) 20 - #define PAD_SIZE 15 21 - #else 22 - #define PAD_SIZE 3 23 - #endif 24 - 25 - /* Handle multiple hostadapters on Amiga 26 - * generally PAD_SIZE = 3 27 - * but there is one exception: Oktagon (PAD_SIZE = 1) */ 28 - #if defined(CONFIG_OKTAGON_SCSI) || defined(CONFIG_OKTAGON_SCSI_MODULE) 29 - #undef PAD_SIZE 30 - #if defined(CONFIG_BLZ1230_SCSI) || defined(CONFIG_BLZ1230_SCSI_MODULE) || \ 31 - defined(CONFIG_BLZ2060_SCSI) || defined(CONFIG_BLZ2060_SCSI_MODULE) || \ 32 - defined(CONFIG_CYBERSTORM_SCSI) || defined(CONFIG_CYBERSTORM_SCSI_MODULE) || \ 33 - defined(CONFIG_CYBERSTORMII_SCSI) || defined(CONFIG_CYBERSTORMII_SCSI_MODULE) || \ 34 - defined(CONFIG_FASTLANE_SCSI) || defined(CONFIG_FASTLANE_SCSI_MODULE) 35 - #define MULTIPLE_PAD_SIZES 36 - #else 37 - #define PAD_SIZE 1 38 - #endif 39 - #endif 40 - 41 - /* Macros for debugging messages */ 42 - 43 - #define DEBUG_ESP 44 - /* #define DEBUG_ESP_DATA */ 45 - /* #define DEBUG_ESP_QUEUE */ 46 - /* #define DEBUG_ESP_DISCONNECT */ 47 - /* #define DEBUG_ESP_STATUS */ 48 - /* #define DEBUG_ESP_PHASES */ 49 - /* #define DEBUG_ESP_WORKBUS */ 50 - /* #define DEBUG_STATE_MACHINE */ 51 - /* #define DEBUG_ESP_CMDS */ 52 - /* #define DEBUG_ESP_IRQS */ 53 - /* #define DEBUG_SDTR */ 54 - /* #define DEBUG_ESP_SG */ 55 - 56 - /* Use the following to sprinkle debugging messages in a way which 57 - * suits you if combinations of the above become too verbose when 58 - * trying to track down a specific problem. 59 - */ 60 - /* #define DEBUG_ESP_MISC */ 61 - 62 - #if defined(DEBUG_ESP) 63 - #define ESPLOG(foo) printk foo 64 - #else 65 - #define ESPLOG(foo) 66 - #endif /* (DEBUG_ESP) */ 67 - 68 - #if defined(DEBUG_ESP_DATA) 69 - #define ESPDATA(foo) printk foo 70 - #else 71 - #define ESPDATA(foo) 72 - #endif 73 - 74 - #if defined(DEBUG_ESP_QUEUE) 75 - #define ESPQUEUE(foo) printk foo 76 - #else 77 - #define ESPQUEUE(foo) 78 - #endif 79 - 80 - #if defined(DEBUG_ESP_DISCONNECT) 81 - #define ESPDISC(foo) printk foo 82 - #else 83 - #define ESPDISC(foo) 84 - #endif 85 - 86 - #if defined(DEBUG_ESP_STATUS) 87 - #define ESPSTAT(foo) printk foo 88 - #else 89 - #define ESPSTAT(foo) 90 - #endif 91 - 92 - #if defined(DEBUG_ESP_PHASES) 93 - #define ESPPHASE(foo) printk foo 94 - #else 95 - #define ESPPHASE(foo) 96 - #endif 97 - 98 - #if defined(DEBUG_ESP_WORKBUS) 99 - #define ESPBUS(foo) printk foo 100 - #else 101 - #define ESPBUS(foo) 102 - #endif 103 - 104 - #if defined(DEBUG_ESP_IRQS) 105 - #define ESPIRQ(foo) printk foo 106 - #else 107 - #define ESPIRQ(foo) 108 - #endif 109 - 110 - #if defined(DEBUG_SDTR) 111 - #define ESPSDTR(foo) printk foo 112 - #else 113 - #define ESPSDTR(foo) 114 - #endif 115 - 116 - #if defined(DEBUG_ESP_MISC) 117 - #define ESPMISC(foo) printk foo 118 - #else 119 - #define ESPMISC(foo) 120 - #endif 121 - 122 - /* 123 - * padding for register structure 124 - */ 125 - #ifdef CONFIG_JAZZ_ESP 126 - #define EREGS_PAD(n) 127 - #else 128 - #ifndef MULTIPLE_PAD_SIZES 129 - #define EREGS_PAD(n) unchar n[PAD_SIZE]; 130 - #endif 131 - #endif 132 - 133 - /* The ESP SCSI controllers have their register sets in three 134 - * "classes": 135 - * 136 - * 1) Registers which are both read and write. 137 - * 2) Registers which are read only. 138 - * 3) Registers which are write only. 139 - * 140 - * Yet, they all live within the same IO space. 141 - */ 142 - 143 - #if !defined(__i386__) && !defined(__x86_64__) 144 - 145 - #ifndef MULTIPLE_PAD_SIZES 146 - 147 - #ifdef CONFIG_CPU_HAS_WB 148 - #include <asm/wbflush.h> 149 - #define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0) 150 - #else 151 - #define esp_write(__reg, __val) ((__reg) = (__val)) 152 - #endif 153 - #define esp_read(__reg) (__reg) 154 - 155 - struct ESP_regs { 156 - /* Access Description Offset */ 157 - volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */ 158 - EREGS_PAD(tlpad1); 159 - volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */ 160 - EREGS_PAD(fdpad); 161 - volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */ 162 - EREGS_PAD(cbpad); 163 - volatile unchar esp_cmnd; /* rw SCSI command bits 0x0c */ 164 - EREGS_PAD(stpad); 165 - volatile unchar esp_status; /* ro ESP status register 0x10 */ 166 - #define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */ 167 - EREGS_PAD(irqpd); 168 - volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */ 169 - #define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */ 170 - EREGS_PAD(sspad); 171 - volatile unchar esp_sstep; /* ro Sequence step register 0x18 */ 172 - #define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */ 173 - EREGS_PAD(ffpad); 174 - volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */ 175 - #define esp_soff esp_fflags /* wo Sync offset 0x1c */ 176 - EREGS_PAD(cf1pd); 177 - volatile unchar esp_cfg1; /* rw First configuration register 0x20 */ 178 - EREGS_PAD(cfpad); 179 - volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */ 180 - EREGS_PAD(ctpad); 181 - volatile unchar esp_ctest; /* wo Chip test register 0x28 */ 182 - EREGS_PAD(cf2pd); 183 - volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */ 184 - EREGS_PAD(cf3pd); 185 - 186 - /* The following is only found on the 53C9X series SCSI chips */ 187 - volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */ 188 - EREGS_PAD(cf4pd); 189 - volatile unchar esp_cfg4; /* rw Fourth configuration register 0x34 */ 190 - EREGS_PAD(thpd); 191 - /* The following is found on all chips except the NCR53C90 (ESP100) */ 192 - volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */ 193 - #define esp_uid esp_tchi /* ro Unique ID code 0x38 */ 194 - EREGS_PAD(fgpad); 195 - volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */ 196 - }; 197 - 198 - #else /* MULTIPLE_PAD_SIZES */ 199 - 200 - #define esp_write(__reg, __val) (*(__reg) = (__val)) 201 - #define esp_read(__reg) (*(__reg)) 202 - 203 - struct ESP_regs { 204 - unsigned char io_addr[64]; /* dummy */ 205 - /* Access Description Offset */ 206 - #define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */ 207 - #define esp_tcmed io_addr + (1<<(esp->shift)) /* rw Mid bits of the transfer count 0x04 */ 208 - #define esp_fdata io_addr + (2<<(esp->shift)) /* rw FIFO data bits 0x08 */ 209 - #define esp_cmnd io_addr + (3<<(esp->shift)) /* rw SCSI command bits 0x0c */ 210 - #define esp_status io_addr + (4<<(esp->shift)) /* ro ESP status register 0x10 */ 211 - #define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */ 212 - #define esp_intrpt io_addr + (5<<(esp->shift)) /* ro Kind of interrupt 0x14 */ 213 - #define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */ 214 - #define esp_sstep io_addr + (6<<(esp->shift)) /* ro Sequence step register 0x18 */ 215 - #define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */ 216 - #define esp_fflags io_addr + (7<<(esp->shift)) /* ro Bits of current FIFO info 0x1c */ 217 - #define esp_soff esp_fflags /* wo Sync offset 0x1c */ 218 - #define esp_cfg1 io_addr + (8<<(esp->shift)) /* rw First configuration register 0x20 */ 219 - #define esp_cfact io_addr + (9<<(esp->shift)) /* wo Clock conversion factor 0x24 */ 220 - #define esp_ctest io_addr + (10<<(esp->shift)) /* wo Chip test register 0x28 */ 221 - #define esp_cfg2 io_addr + (11<<(esp->shift)) /* rw Second configuration register 0x2c */ 222 - 223 - /* The following is only found on the 53C9X series SCSI chips */ 224 - #define esp_cfg3 io_addr + (12<<(esp->shift)) /* rw Third configuration register 0x30 */ 225 - #define esp_cfg4 io_addr + (13<<(esp->shift)) /* rw Fourth configuration register 0x34 */ 226 - 227 - /* The following is found on all chips except the NCR53C90 (ESP100) */ 228 - #define esp_tchi io_addr + (14<<(esp->shift)) /* rw High bits of transfer count 0x38 */ 229 - #define esp_uid esp_tchi /* ro Unique ID code 0x38 */ 230 - #define esp_fgrnd io_addr + (15<<(esp->shift)) /* rw Data base for fifo 0x3c */ 231 - }; 232 - 233 - #endif 234 - 235 - #else /* !defined(__i386__) && !defined(__x86_64__) */ 236 - 237 - #define esp_write(__reg, __val) outb((__val), (__reg)) 238 - #define esp_read(__reg) inb((__reg)) 239 - 240 - struct ESP_regs { 241 - unsigned int io_addr; 242 - /* Access Description Offset */ 243 - #define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */ 244 - #define esp_tcmed io_addr + 1 /* rw Mid bits of the transfer count 0x04 */ 245 - #define esp_fdata io_addr + 2 /* rw FIFO data bits 0x08 */ 246 - #define esp_cmnd io_addr + 3 /* rw SCSI command bits 0x0c */ 247 - #define esp_status io_addr + 4 /* ro ESP status register 0x10 */ 248 - #define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */ 249 - #define esp_intrpt io_addr + 5 /* ro Kind of interrupt 0x14 */ 250 - #define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */ 251 - #define esp_sstep io_addr + 6 /* ro Sequence step register 0x18 */ 252 - #define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */ 253 - #define esp_fflags io_addr + 7 /* ro Bits of current FIFO info 0x1c */ 254 - #define esp_soff esp_fflags /* wo Sync offset 0x1c */ 255 - #define esp_cfg1 io_addr + 8 /* rw First configuration register 0x20 */ 256 - #define esp_cfact io_addr + 9 /* wo Clock conversion factor 0x24 */ 257 - #define esp_ctest io_addr + 10 /* wo Chip test register 0x28 */ 258 - #define esp_cfg2 io_addr + 11 /* rw Second configuration register 0x2c */ 259 - 260 - /* The following is only found on the 53C9X series SCSI chips */ 261 - #define esp_cfg3 io_addr + 12 /* rw Third configuration register 0x30 */ 262 - #define esp_cfg4 io_addr + 13 /* rw Fourth configuration register 0x34 */ 263 - 264 - /* The following is found on all chips except the NCR53C90 (ESP100) */ 265 - #define esp_tchi io_addr + 14 /* rw High bits of transfer count 0x38 */ 266 - #define esp_uid esp_tchi /* ro Unique ID code 0x38 */ 267 - #define esp_fgrnd io_addr + 15 /* rw Data base for fifo 0x3c */ 268 - }; 269 - 270 - #endif /* !defined(__i386__) && !defined(__x86_64__) */ 271 - 272 - /* Various revisions of the ESP board. */ 273 - enum esp_rev { 274 - esp100 = 0x00, /* NCR53C90 - very broken */ 275 - esp100a = 0x01, /* NCR53C90A */ 276 - esp236 = 0x02, 277 - fas236 = 0x03, 278 - fas100a = 0x04, 279 - fast = 0x05, 280 - fas366 = 0x06, 281 - fas216 = 0x07, 282 - fsc = 0x08, /* SYM53C94-2 */ 283 - espunknown = 0x09 284 - }; 285 - 286 - /* We allocate one of these for each scsi device and attach it to 287 - * SDptr->hostdata for use in the driver 288 - */ 289 - struct esp_device { 290 - unsigned char sync_min_period; 291 - unsigned char sync_max_offset; 292 - unsigned sync:1; 293 - unsigned wide:1; 294 - unsigned disconnect:1; 295 - }; 296 - 297 - /* We get one of these for each ESP probed. */ 298 - struct NCR_ESP { 299 - struct NCR_ESP *next; /* Next ESP on probed or NULL */ 300 - struct ESP_regs *eregs; /* All esp registers */ 301 - int dma; /* Who I do transfers with. */ 302 - void *dregs; /* And his registers. */ 303 - struct Scsi_Host *ehost; /* Backpointer to SCSI Host */ 304 - 305 - void *edev; /* Pointer to controller base/SBus */ 306 - int esp_id; /* Unique per-ESP ID number */ 307 - 308 - /* ESP Configuration Registers */ 309 - unsigned char config1; /* Copy of the 1st config register */ 310 - unsigned char config2; /* Copy of the 2nd config register */ 311 - unsigned char config3[16]; /* Copy of the 3rd config register */ 312 - 313 - /* The current command we are sending to the ESP chip. This esp_command 314 - * ptr needs to be mapped in DVMA area so we can send commands and read 315 - * from the ESP fifo without burning precious CPU cycles. Programmed I/O 316 - * sucks when we have the DVMA to do it for us. The ESP is stupid and will 317 - * only send out 6, 10, and 12 byte SCSI commands, others we need to send 318 - * one byte at a time. esp_slowcmd being set says that we are doing one 319 - * of the command types ESP doesn't understand, esp_scmdp keeps track of 320 - * which byte we are sending, esp_scmdleft says how many bytes to go. 321 - */ 322 - volatile unchar *esp_command; /* Location of command (CPU view) */ 323 - __u32 esp_command_dvma; /* Location of command (DVMA view) */ 324 - unsigned char esp_clen; /* Length of this command */ 325 - unsigned char esp_slowcmd; 326 - unsigned char *esp_scmdp; 327 - unsigned char esp_scmdleft; 328 - 329 - /* The following are used to determine the cause of an IRQ. Upon every 330 - * IRQ entry we synchronize these with the hardware registers. 331 - */ 332 - unchar ireg; /* Copy of ESP interrupt register */ 333 - unchar sreg; /* Same for ESP status register */ 334 - unchar seqreg; /* The ESP sequence register */ 335 - 336 - /* The following is set when a premature interrupt condition is detected 337 - * in some FAS revisions. 338 - */ 339 - unchar fas_premature_intr_workaround; 340 - 341 - /* To save register writes to the ESP, which can be expensive, we 342 - * keep track of the previous value that various registers had for 343 - * the last target we connected to. If they are the same for the 344 - * current target, we skip the register writes as they are not needed. 345 - */ 346 - unchar prev_soff, prev_stp, prev_cfg3; 347 - 348 - /* For each target we keep track of save/restore data 349 - * pointer information. This needs to be updated majorly 350 - * when we add support for tagged queueing. -DaveM 351 - */ 352 - struct esp_pointers { 353 - char *saved_ptr; 354 - struct scatterlist *saved_buffer; 355 - int saved_this_residual; 356 - int saved_buffers_residual; 357 - } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/; 358 - 359 - /* Clock periods, frequencies, synchronization, etc. */ 360 - unsigned int cfreq; /* Clock frequency in HZ */ 361 - unsigned int cfact; /* Clock conversion factor */ 362 - unsigned int ccycle; /* One ESP clock cycle */ 363 - unsigned int ctick; /* One ESP clock time */ 364 - unsigned int radelay; /* FAST chip req/ack delay */ 365 - unsigned int neg_defp; /* Default negotiation period */ 366 - unsigned int sync_defp; /* Default sync transfer period */ 367 - unsigned int max_period; /* longest our period can be */ 368 - unsigned int min_period; /* shortest period we can withstand */ 369 - /* For slow to medium speed input clock rates we shoot for 5mb/s, 370 - * but for high input clock rates we try to do 10mb/s although I 371 - * don't think a transfer can even run that fast with an ESP even 372 - * with DMA2 scatter gather pipelining. 373 - */ 374 - #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ 375 - #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ 376 - 377 - unsigned int snip; /* Sync. negotiation in progress */ 378 - unsigned int wnip; /* WIDE negotiation in progress */ 379 - unsigned int targets_present; /* targets spoken to before */ 380 - 381 - int current_transfer_size; /* Set at beginning of data dma */ 382 - 383 - unchar espcmdlog[32]; /* Log of current esp cmds sent. */ 384 - unchar espcmdent; /* Current entry in esp cmd log. */ 385 - 386 - /* Misc. info about this ESP */ 387 - enum esp_rev erev; /* ESP revision */ 388 - int irq; /* IRQ for this ESP */ 389 - int scsi_id; /* Who am I as initiator? */ 390 - int scsi_id_mask; /* Bitmask of 'me'. */ 391 - int diff; /* Differential SCSI bus? */ 392 - int slot; /* Slot the adapter occupies */ 393 - 394 - /* Our command queues, only one cmd lives in the current_SC queue. */ 395 - Scsi_Cmnd *issue_SC; /* Commands to be issued */ 396 - Scsi_Cmnd *current_SC; /* Who is currently working the bus */ 397 - Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */ 398 - 399 - /* Message goo */ 400 - unchar cur_msgout[16]; 401 - unchar cur_msgin[16]; 402 - unchar prevmsgout, prevmsgin; 403 - unchar msgout_len, msgin_len; 404 - unchar msgout_ctr, msgin_ctr; 405 - 406 - /* States that we cannot keep in the per cmd structure because they 407 - * cannot be assosciated with any specific command. 408 - */ 409 - unchar resetting_bus; 410 - wait_queue_head_t reset_queue; 411 - 412 - unchar do_pio_cmds; /* Do command transfer with pio */ 413 - 414 - /* How much bits do we have to shift the registers */ 415 - unsigned char shift; 416 - 417 - /* Functions handling DMA 418 - */ 419 - /* Required functions */ 420 - int (*dma_bytes_sent)(struct NCR_ESP *, int); 421 - int (*dma_can_transfer)(struct NCR_ESP *, Scsi_Cmnd *); 422 - void (*dma_dump_state)(struct NCR_ESP *); 423 - void (*dma_init_read)(struct NCR_ESP *, __u32, int); 424 - void (*dma_init_write)(struct NCR_ESP *, __u32, int); 425 - void (*dma_ints_off)(struct NCR_ESP *); 426 - void (*dma_ints_on)(struct NCR_ESP *); 427 - int (*dma_irq_p)(struct NCR_ESP *); 428 - int (*dma_ports_p)(struct NCR_ESP *); 429 - void (*dma_setup)(struct NCR_ESP *, __u32, int, int); 430 - 431 - /* Optional functions (i.e. may be initialized to 0) */ 432 - void (*dma_barrier)(struct NCR_ESP *); 433 - void (*dma_drain)(struct NCR_ESP *); 434 - void (*dma_invalidate)(struct NCR_ESP *); 435 - void (*dma_irq_entry)(struct NCR_ESP *); 436 - void (*dma_irq_exit)(struct NCR_ESP *); 437 - void (*dma_led_off)(struct NCR_ESP *); 438 - void (*dma_led_on)(struct NCR_ESP *); 439 - void (*dma_poll)(struct NCR_ESP *, unsigned char *); 440 - void (*dma_reset)(struct NCR_ESP *); 441 - 442 - /* Optional virtual DMA functions */ 443 - void (*dma_mmu_get_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *); 444 - void (*dma_mmu_get_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *); 445 - void (*dma_mmu_release_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *); 446 - void (*dma_mmu_release_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *); 447 - void (*dma_advance_sg)(Scsi_Cmnd *); 448 - }; 449 - 450 - /* Bitfield meanings for the above registers. */ 451 - 452 - /* ESP config reg 1, read-write, found on all ESP chips */ 453 - #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ 454 - #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ 455 - #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ 456 - #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ 457 - #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ 458 - #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ 459 - 460 - /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236+fsc chips */ 461 - #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236,fsc) */ 462 - #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236,fsc) */ 463 - #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ 464 - #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */ 465 - #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ 466 - #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ 467 - #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236,fsc) */ 468 - #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216,fsc) */ 469 - #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */ 470 - #define ESP_CONFIG2_RFB 0x80 /* Reserve FIFO byte (fsc) */ 471 - #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ 472 - 473 - /* ESP config register 3 read-write, found only esp236+fas236+fas100a+fsc chips */ 474 - #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/fas366) */ 475 - #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236/fsc) */ 476 - #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */ 477 - #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236/fsc) */ 478 - #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */ 479 - #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236/fsc) */ 480 - #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */ 481 - #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236/fsc) */ 482 - #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */ 483 - #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236/fsc) */ 484 - #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236/fsc) */ 485 - #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236/fsc) */ 486 - #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236/fsc) */ 487 - 488 - /* ESP config register 4 read-write, found only on fsc chips */ 489 - #define ESP_CONFIG4_BBTE 0x01 /* Back-to-Back transfer enable */ 490 - #define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode */ 491 - #define ESP_CONFIG4_EAN 0x04 /* Enable Active Negotiation */ 492 - 493 - /* ESP command register read-write */ 494 - /* Group 1 commands: These may be sent at any point in time to the ESP 495 - * chip. None of them can generate interrupts 'cept 496 - * the "SCSI bus reset" command if you have not disabled 497 - * SCSI reset interrupts in the config1 ESP register. 498 - */ 499 - #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ 500 - #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ 501 - #define ESP_CMD_RC 0x02 /* Chip reset */ 502 - #define ESP_CMD_RS 0x03 /* SCSI bus reset */ 503 - 504 - /* Group 2 commands: ESP must be an initiator and connected to a target 505 - * for these commands to work. 506 - */ 507 - #define ESP_CMD_TI 0x10 /* Transfer Information */ 508 - #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ 509 - #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ 510 - #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ 511 - #define ESP_CMD_SATN 0x1a /* Set ATN */ 512 - #define ESP_CMD_RATN 0x1b /* De-assert ATN */ 513 - 514 - /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected 515 - * to a target as the initiator for these commands to work. 516 - */ 517 - #define ESP_CMD_SMSG 0x20 /* Send message */ 518 - #define ESP_CMD_SSTAT 0x21 /* Send status */ 519 - #define ESP_CMD_SDATA 0x22 /* Send data */ 520 - #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ 521 - #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ 522 - #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ 523 - #define ESP_CMD_DCNCT 0x27 /* Disconnect */ 524 - #define ESP_CMD_RMSG 0x28 /* Receive Message */ 525 - #define ESP_CMD_RCMD 0x29 /* Receive Command */ 526 - #define ESP_CMD_RDATA 0x2a /* Receive Data */ 527 - #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ 528 - 529 - /* Group 4 commands: The ESP must be in the disconnected state and must 530 - * not be connected to any targets as initiator for 531 - * these commands to work. 532 - */ 533 - #define ESP_CMD_RSEL 0x40 /* Reselect */ 534 - #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ 535 - #define ESP_CMD_SELA 0x42 /* Select w/ATN */ 536 - #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ 537 - #define ESP_CMD_ESEL 0x44 /* Enable selection */ 538 - #define ESP_CMD_DSEL 0x45 /* Disable selections */ 539 - #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ 540 - #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ 541 - 542 - /* This bit enables the ESP's DMA */ 543 - #define ESP_CMD_DMA 0x80 /* Do DMA? */ 544 - 545 - /* ESP status register read-only */ 546 - #define ESP_STAT_PIO 0x01 /* IO phase bit */ 547 - #define ESP_STAT_PCD 0x02 /* CD phase bit */ 548 - #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ 549 - #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ 550 - #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ 551 - #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ 552 - #define ESP_STAT_PERR 0x20 /* Parity error */ 553 - #define ESP_STAT_SPAM 0x40 /* Real bad error */ 554 - /* This indicates the 'interrupt pending' condition, it is a reserved 555 - * bit on old revs of the ESP (ESP100, ESP100A, FAS100A). 556 - */ 557 - #define ESP_STAT_INTR 0x80 /* Interrupt */ 558 - 559 - /* The status register can be masked with ESP_STAT_PMASK and compared 560 - * with the following values to determine the current phase the ESP 561 - * (at least thinks it) is in. For our purposes we also add our own 562 - * software 'done' bit for our phase management engine. 563 - */ 564 - #define ESP_DOP (0) /* Data Out */ 565 - #define ESP_DIP (ESP_STAT_PIO) /* Data In */ 566 - #define ESP_CMDP (ESP_STAT_PCD) /* Command */ 567 - #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ 568 - #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ 569 - #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ 570 - 571 - /* ESP interrupt register read-only */ 572 - #define ESP_INTR_S 0x01 /* Select w/o ATN */ 573 - #define ESP_INTR_SATN 0x02 /* Select w/ATN */ 574 - #define ESP_INTR_RSEL 0x04 /* Reselected */ 575 - #define ESP_INTR_FDONE 0x08 /* Function done */ 576 - #define ESP_INTR_BSERV 0x10 /* Bus service */ 577 - #define ESP_INTR_DC 0x20 /* Disconnect */ 578 - #define ESP_INTR_IC 0x40 /* Illegal command given */ 579 - #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ 580 - 581 - /* Interrupt status macros */ 582 - #define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR)) 583 - #define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC)) 584 - #define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN)) 585 - #define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S)) 586 - #define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \ 587 - (ESP_SELECT_WITHOUT_ATN_IRQ(esp))) 588 - #define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL)) 589 - 590 - /* ESP sequence step register read-only */ 591 - #define ESP_STEP_VBITS 0x07 /* Valid bits */ 592 - #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ 593 - #define ESP_STEP_SID 0x01 /* One msg byte sent */ 594 - #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ 595 - #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd 596 - * bytes to be lost 597 - */ 598 - #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ 599 - 600 - /* Ho hum, some ESP's set the step register to this as well... */ 601 - #define ESP_STEP_FINI5 0x05 602 - #define ESP_STEP_FINI6 0x06 603 - #define ESP_STEP_FINI7 0x07 604 - #define ESP_STEP_SOM 0x08 /* Synchronous Offset Max */ 605 - 606 - /* ESP chip-test register read-write */ 607 - #define ESP_TEST_TARG 0x01 /* Target test mode */ 608 - #define ESP_TEST_INI 0x02 /* Initiator test mode */ 609 - #define ESP_TEST_TS 0x04 /* Tristate test mode */ 610 - 611 - /* ESP unique ID register read-only, found on fas236+fas100a+fsc only */ 612 - #define ESP_UID_F100A 0x00 /* FAS100A */ 613 - #define ESP_UID_F236 0x02 /* FAS236 */ 614 - #define ESP_UID_FSC 0xa2 /* NCR53CF9x-2 */ 615 - #define ESP_UID_REV 0x07 /* ESP revision */ 616 - #define ESP_UID_FAM 0xf8 /* ESP family */ 617 - 618 - /* ESP fifo flags register read-only */ 619 - /* Note that the following implies a 16 byte FIFO on the ESP. */ 620 - #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ 621 - #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100,fsc) */ 622 - #define ESP_FF_SSTEP 0xe0 /* Sequence step */ 623 - 624 - /* ESP clock conversion factor register write-only */ 625 - #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ 626 - #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ 627 - #define ESP_CCF_F2 0x02 /* 10MHz */ 628 - #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ 629 - #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ 630 - #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ 631 - #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ 632 - #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ 633 - 634 - #define ESP_BUS_TIMEOUT 275 /* In milli-seconds */ 635 - #define ESP_TIMEO_CONST 8192 636 - #define FSC_TIMEO_CONST 7668 637 - #define ESP_NEG_DEFP(mhz, cfact) \ 638 - ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) 639 - #define FSC_NEG_DEFP(mhz, cfact) \ 640 - ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (7668 * (cfact))) 641 - #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) 642 - #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) 643 - 644 - 645 - /* UGLY, UGLY, UGLY! */ 646 - extern int nesps, esps_in_use, esps_running; 647 - 648 - /* For our interrupt engine. */ 649 - #define for_each_esp(esp) \ 650 - for((esp) = espchain; (esp); (esp) = (esp)->next) 651 - 652 - 653 - /* External functions */ 654 - extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs); 655 - extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int); 656 - extern void esp_deallocate(struct NCR_ESP *); 657 - extern void esp_release(void); 658 - extern void esp_initialize(struct NCR_ESP *); 659 - extern irqreturn_t esp_intr(int, void *); 660 - extern const char *esp_info(struct Scsi_Host *); 661 - extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 662 - extern int esp_abort(Scsi_Cmnd *); 663 - extern int esp_reset(Scsi_Cmnd *); 664 - extern int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length, 665 - int inout); 666 - extern int esp_slave_alloc(struct scsi_device *); 667 - extern void esp_slave_destroy(struct scsi_device *); 668 - #endif /* !(NCR53C9X_H) */
+30 -51
drivers/scsi/aacraid/aachba.c
··· 859 859 le32_to_cpu(dev->adapter_info.serial[0]), cid); 860 860 } 861 861 862 - static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, 863 - u8 a_sense_code, u8 incorrect_length, 864 - u8 bit_pointer, u16 field_pointer, 865 - u32 residue) 862 + static inline void set_sense(struct sense_data *sense_data, u8 sense_key, 863 + u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer) 866 864 { 867 - sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */ 865 + u8 *sense_buf = (u8 *)sense_data; 866 + /* Sense data valid, err code 70h */ 867 + sense_buf[0] = 0x70; /* No info field */ 868 868 sense_buf[1] = 0; /* Segment number, always zero */ 869 869 870 - if (incorrect_length) { 871 - sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */ 872 - sense_buf[3] = BYTE3(residue); 873 - sense_buf[4] = BYTE2(residue); 874 - sense_buf[5] = BYTE1(residue); 875 - sense_buf[6] = BYTE0(residue); 876 - } else 877 - sense_buf[2] = sense_key; /* Sense key */ 878 - 879 - if (sense_key == ILLEGAL_REQUEST) 880 - sense_buf[7] = 10; /* Additional sense length */ 881 - else 882 - sense_buf[7] = 6; /* Additional sense length */ 870 + sense_buf[2] = sense_key; /* Sense key */ 883 871 884 872 sense_buf[12] = sense_code; /* Additional sense code */ 885 873 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ 874 + 886 875 if (sense_key == ILLEGAL_REQUEST) { 887 - sense_buf[15] = 0; 876 + sense_buf[7] = 10; /* Additional sense length */ 888 877 889 - if (sense_code == SENCODE_INVALID_PARAM_FIELD) 890 - sense_buf[15] = 0x80;/* Std sense key specific field */ 878 + sense_buf[15] = bit_pointer; 891 879 /* Illegal parameter is in the parameter block */ 892 - 893 880 if (sense_code == SENCODE_INVALID_CDB_FIELD) 894 - sense_buf[15] = 0xc0;/* Std sense key specific field */ 881 + sense_buf[15] |= 0xc0;/* Std sense key specific field */ 895 882 /* Illegal parameter is in the CDB block */ 896 - sense_buf[15] |= bit_pointer; 897 883 sense_buf[16] = field_pointer >> 8; /* MSB */ 898 884 sense_buf[17] = field_pointer; /* LSB */ 899 - } 885 + } else 886 + sense_buf[7] = 6; /* Additional sense length */ 900 887 } 901 888 902 889 static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) ··· 893 906 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 894 907 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 895 908 SAM_STAT_CHECK_CONDITION; 896 - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 897 - HARDWARE_ERROR, 898 - SENCODE_INTERNAL_TARGET_FAILURE, 899 - ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 900 - 0, 0); 909 + set_sense(&dev->fsa_dev[cid].sense_data, 910 + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 911 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 901 912 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 902 913 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 903 914 SCSI_SENSE_BUFFERSIZE)); ··· 1505 1520 le32_to_cpu(readreply->status)); 1506 1521 #endif 1507 1522 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1508 - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1509 - HARDWARE_ERROR, 1510 - SENCODE_INTERNAL_TARGET_FAILURE, 1511 - ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 1512 - 0, 0); 1523 + set_sense(&dev->fsa_dev[cid].sense_data, 1524 + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 1525 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 1513 1526 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1514 1527 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1515 1528 SCSI_SENSE_BUFFERSIZE)); ··· 1716 1733 le32_to_cpu(synchronizereply->status)); 1717 1734 cmd->result = DID_OK << 16 | 1718 1735 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1719 - set_sense((u8 *)&dev->fsa_dev[cid].sense_data, 1720 - HARDWARE_ERROR, 1721 - SENCODE_INTERNAL_TARGET_FAILURE, 1722 - ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, 1723 - 0, 0); 1736 + set_sense(&dev->fsa_dev[cid].sense_data, 1737 + HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 1738 + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 1724 1739 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1725 1740 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1726 1741 SCSI_SENSE_BUFFERSIZE)); ··· 1926 1945 { 1927 1946 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 1928 1947 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1929 - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1930 - ILLEGAL_REQUEST, 1931 - SENCODE_INVALID_COMMAND, 1932 - ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 1948 + set_sense(&dev->fsa_dev[cid].sense_data, 1949 + ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 1950 + ASENCODE_INVALID_COMMAND, 0, 0); 1933 1951 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1934 1952 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1935 1953 SCSI_SENSE_BUFFERSIZE)); ··· 1975 1995 scsicmd->result = DID_OK << 16 | 1976 1996 COMMAND_COMPLETE << 8 | 1977 1997 SAM_STAT_CHECK_CONDITION; 1978 - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1979 - ILLEGAL_REQUEST, 1980 - SENCODE_INVALID_CDB_FIELD, 1981 - ASENCODE_NO_SENSE, 0, 7, 2, 0); 1998 + set_sense(&dev->fsa_dev[cid].sense_data, 1999 + ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD, 2000 + ASENCODE_NO_SENSE, 7, 2); 1982 2001 memcpy(scsicmd->sense_buffer, 1983 2002 &dev->fsa_dev[cid].sense_data, 1984 2003 min_t(size_t, ··· 2233 2254 */ 2234 2255 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); 2235 2256 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2236 - set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 2237 - ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2238 - ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 2257 + set_sense(&dev->fsa_dev[cid].sense_data, 2258 + ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2259 + ASENCODE_INVALID_COMMAND, 0, 0); 2239 2260 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2240 2261 min_t(size_t, 2241 2262 sizeof(dev->fsa_dev[cid].sense_data),
+14 -12
drivers/scsi/aacraid/commctrl.c
··· 243 243 * Search the list of AdapterFibContext addresses on the adapter 244 244 * to be sure this is a valid address 245 245 */ 246 + spin_lock_irqsave(&dev->fib_lock, flags); 246 247 entry = dev->fib_list.next; 247 248 fibctx = NULL; 248 249 ··· 252 251 /* 253 252 * Extract the AdapterFibContext from the Input parameters. 254 253 */ 255 - if (fibctx->unique == f.fibctx) { /* We found a winner */ 254 + if (fibctx->unique == f.fibctx) { /* We found a winner */ 256 255 break; 257 256 } 258 257 entry = entry->next; 259 258 fibctx = NULL; 260 259 } 261 260 if (!fibctx) { 261 + spin_unlock_irqrestore(&dev->fib_lock, flags); 262 262 dprintk ((KERN_INFO "Fib Context not found\n")); 263 263 return -EINVAL; 264 264 } 265 265 266 266 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 267 267 (fibctx->size != sizeof(struct aac_fib_context))) { 268 + spin_unlock_irqrestore(&dev->fib_lock, flags); 268 269 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 269 270 return -EINVAL; 270 271 } 271 272 status = 0; 272 - spin_lock_irqsave(&dev->fib_lock, flags); 273 273 /* 274 274 * If there are no fibs to send back, then either wait or return 275 275 * -EAGAIN ··· 416 414 * @arg: ioctl arguments 417 415 * 418 416 * This routine returns the driver version. 419 - * Under Linux, there have been no version incompatibilities, so this is 420 - * simple! 417 + * Under Linux, there have been no version incompatibilities, so this is 418 + * simple! 421 419 */ 422 420 423 421 static int check_revision(struct aac_dev *dev, void __user *arg) ··· 465 463 u32 data_dir; 466 464 void __user *sg_user[32]; 467 465 void *sg_list[32]; 468 - u32 sg_indx = 0; 466 + u32 sg_indx = 0; 469 467 u32 byte_count = 0; 470 468 u32 actual_fibsize64, actual_fibsize = 0; 471 469 int i; ··· 519 517 // Fix up srb for endian and force some values 520 518 521 519 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 522 - srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 520 + srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 523 521 srbcmd->id = cpu_to_le32(user_srbcmd->id); 524 - srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 525 - srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 526 - srbcmd->flags = cpu_to_le32(flags); 522 + srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 523 + srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 524 + srbcmd->flags = cpu_to_le32(flags); 527 525 srbcmd->retry_limit = 0; // Obsolete parameter 528 526 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 529 527 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); ··· 788 786 pci_info.bus = dev->pdev->bus->number; 789 787 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 790 788 791 - if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 792 - dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 793 - return -EFAULT; 789 + if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 790 + dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 791 + return -EFAULT; 794 792 } 795 793 return 0; 796 794 }
+13 -15
drivers/scsi/aacraid/linit.c
··· 1130 1130 if (error < 0) 1131 1131 goto out_deinit; 1132 1132 1133 - if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) { 1134 - error = pci_set_dma_max_seg_size(pdev, 65536); 1135 - if (error) 1136 - goto out_deinit; 1137 - } 1138 - 1139 1133 /* 1140 1134 * Lets override negotiations and drop the maximum SG limit to 34 1141 1135 */ 1142 1136 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1143 - (aac->scsi_host_ptr->sg_tablesize > 34)) { 1144 - aac->scsi_host_ptr->sg_tablesize = 34; 1145 - aac->scsi_host_ptr->max_sectors 1146 - = (aac->scsi_host_ptr->sg_tablesize * 8) + 112; 1137 + (shost->sg_tablesize > 34)) { 1138 + shost->sg_tablesize = 34; 1139 + shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1147 1140 } 1148 1141 1149 1142 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && 1150 - (aac->scsi_host_ptr->sg_tablesize > 17)) { 1151 - aac->scsi_host_ptr->sg_tablesize = 17; 1152 - aac->scsi_host_ptr->max_sectors 1153 - = (aac->scsi_host_ptr->sg_tablesize * 8) + 112; 1143 + (shost->sg_tablesize > 17)) { 1144 + shost->sg_tablesize = 17; 1145 + shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1154 1146 } 1155 1147 1148 + error = pci_set_dma_max_seg_size(pdev, 1149 + (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1150 + (shost->max_sectors << 9) : 65536); 1151 + if (error) 1152 + goto out_deinit; 1153 + 1156 1154 /* 1157 - * Firware printf works only with older firmware. 1155 + * Firmware printf works only with older firmware. 1158 1156 */ 1159 1157 if (aac_drivers[index].quirks & AAC_QUIRK_34SG) 1160 1158 aac->printf_enabled = 1;
+3 -3
drivers/scsi/advansys.c
··· 12261 12261 /* 12262 12262 * Write the EEPROM from 'cfg_buf'. 12263 12263 */ 12264 - void __devinit 12264 + static void __devinit 12265 12265 AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) 12266 12266 { 12267 12267 ushort *wbuf; ··· 12328 12328 /* 12329 12329 * Write the EEPROM from 'cfg_buf'. 12330 12330 */ 12331 - void __devinit 12331 + static void __devinit 12332 12332 AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) 12333 12333 { 12334 12334 ushort *wbuf; ··· 12395 12395 /* 12396 12396 * Write the EEPROM from 'cfg_buf'. 12397 12397 */ 12398 - void __devinit 12398 + static void __devinit 12399 12399 AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) 12400 12400 { 12401 12401 ushort *wbuf;
+3 -1
drivers/scsi/arcmsr/arcmsr.h
··· 48 48 /*The limit of outstanding scsi command that firmware can handle*/ 49 49 #define ARCMSR_MAX_OUTSTANDING_CMD 256 50 50 #define ARCMSR_MAX_FREECCB_NUM 320 51 - #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30" 51 + #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" 52 52 #define ARCMSR_SCSI_INITIATOR_ID 255 53 53 #define ARCMSR_MAX_XFER_SECTORS 512 54 54 #define ARCMSR_MAX_XFER_SECTORS_B 4096 ··· 248 248 #define ARCMSR_MESSAGE_START_BGRB 0x00060008 249 249 #define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008 250 250 #define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008 251 + #define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008 251 252 /* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */ 252 253 #define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 253 254 /* ioctl transfer */ ··· 257 256 #define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 258 257 #define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004 259 258 #define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008 259 + #define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010 260 260 261 261 /* data tunnel buffer between user space program and its firmware */ 262 262 /* user space data to iop 128bytes */
+61 -26
drivers/scsi/arcmsr/arcmsr_hba.c
··· 315 315 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 316 316 } 317 317 318 - reg = (struct MessageUnit_B *)(dma_coherent + 319 - ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 320 - 321 318 dma_addr = dma_coherent_handle; 322 319 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 323 320 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ··· 368 371 369 372 out: 370 373 dma_free_coherent(&acb->pdev->dev, 371 - ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20, 372 - acb->dma_coherent, acb->dma_coherent_handle); 374 + (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + 375 + sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 373 376 return -ENOMEM; 374 377 } 375 378 ··· 506 509 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 507 510 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 508 511 , reg->iop2drv_doorbell_reg); 512 + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 509 513 return 0x00; 510 514 } 511 515 msleep(10); ··· 746 748 , ccb->startdone 747 749 , atomic_read(&acb->ccboutstandingcount)); 748 750 } 751 + else 749 752 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 750 753 } 751 754 ··· 885 886 } 886 887 } 887 888 888 - static void arcmsr_build_ccb(struct AdapterControlBlock *acb, 889 + static int arcmsr_build_ccb(struct AdapterControlBlock *acb, 889 890 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 890 891 { 891 892 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; ··· 905 906 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 906 907 907 908 nseg = scsi_dma_map(pcmd); 909 + if (nseg > ARCMSR_MAX_SG_ENTRIES) 910 + return FAILED; 908 911 BUG_ON(nseg < 0); 909 912 910 913 if (nseg) { ··· 947 946 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 948 947 ccb->ccb_flags |= CCB_FLAG_WRITE; 949 948 } 949 + return SUCCESS; 950 950 } 951 951 952 952 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) ··· 1038 1036 switch (acb->adapter_type) { 1039 1037 case ACB_ADAPTER_TYPE_A: { 1040 1038 iounmap(acb->pmuA); 1039 + dma_free_coherent(&acb->pdev->dev, 1040 + ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, 1041 + acb->dma_coherent, 1042 + acb->dma_coherent_handle); 1041 1043 break; 1042 1044 } 1043 1045 case ACB_ADAPTER_TYPE_B: { 1044 1046 struct MessageUnit_B *reg = acb->pmuB; 1045 1047 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); 1046 1048 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); 1049 + dma_free_coherent(&acb->pdev->dev, 1050 + (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + 1051 + sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 1047 1052 } 1048 1053 } 1049 - dma_free_coherent(&acb->pdev->dev, 1050 - ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, 1051 - acb->dma_coherent, 1052 - acb->dma_coherent_handle); 1054 + 1053 1055 } 1054 1056 1055 1057 void arcmsr_iop_message_read(struct AdapterControlBlock *acb) ··· 1279 1273 return 1; 1280 1274 1281 1275 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1282 - 1276 + /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ 1277 + readl(reg->iop2drv_doorbell_reg); 1278 + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 1283 1279 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1284 1280 arcmsr_iop2drv_data_wrote_handle(acb); 1285 1281 } ··· 1388 1380 1389 1381 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1390 1382 unsigned long *ver_addr; 1391 - dma_addr_t buf_handle; 1392 1383 uint8_t *pQbuffer, *ptmpQbuffer; 1393 1384 int32_t allxfer_len = 0; 1385 + void *tmp; 1394 1386 1395 - ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1396 - if (!ver_addr) { 1387 + tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1388 + ver_addr = (unsigned long *)tmp; 1389 + if (!tmp) { 1397 1390 retvalue = ARCMSR_MESSAGE_FAIL; 1398 1391 goto message_out; 1399 1392 } ··· 1430 1421 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1431 1422 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1432 1423 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1433 - pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1424 + kfree(tmp); 1434 1425 } 1435 1426 break; 1436 1427 1437 1428 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1438 1429 unsigned long *ver_addr; 1439 - dma_addr_t buf_handle; 1440 1430 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1441 1431 uint8_t *pQbuffer, *ptmpuserbuffer; 1432 + void *tmp; 1442 1433 1443 - ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1444 - if (!ver_addr) { 1434 + tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1435 + ver_addr = (unsigned long *)tmp; 1436 + if (!tmp) { 1445 1437 retvalue = ARCMSR_MESSAGE_FAIL; 1446 1438 goto message_out; 1447 1439 } ··· 1492 1482 retvalue = ARCMSR_MESSAGE_FAIL; 1493 1483 } 1494 1484 } 1495 - pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1485 + kfree(tmp); 1496 1486 } 1497 1487 break; 1498 1488 ··· 1692 1682 ccb = arcmsr_get_freeccb(acb); 1693 1683 if (!ccb) 1694 1684 return SCSI_MLQUEUE_HOST_BUSY; 1695 - 1696 - arcmsr_build_ccb(acb, ccb, cmd); 1685 + if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) { 1686 + cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1); 1687 + cmd->scsi_done(cmd); 1688 + return 0; 1689 + } 1697 1690 arcmsr_post_ccb(acb, ccb); 1698 1691 return 0; 1699 1692 } ··· 1857 1844 } 1858 1845 } 1859 1846 1860 - static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ 1847 + static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, 1861 1848 struct CommandControlBlock *poll_ccb) 1862 1849 { 1863 1850 struct MessageUnit_B *reg = acb->pmuB; ··· 1891 1878 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1892 1879 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 1893 1880 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 1894 - if (ccb->startdone == ARCMSR_CCB_ABORTED) { 1881 + if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 1895 1882 printk(KERN_NOTICE "arcmsr%d: \ 1896 1883 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n" 1897 1884 ,acb->host->host_no ··· 1914 1901 } /*drain reply FIFO*/ 1915 1902 } 1916 1903 1917 - static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \ 1904 + static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 1918 1905 struct CommandControlBlock *poll_ccb) 1919 1906 { 1920 1907 switch (acb->adapter_type) { ··· 2039 2026 do { 2040 2027 firmware_state = readl(reg->iop2drv_doorbell_reg); 2041 2028 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 2029 + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 2042 2030 } 2043 2031 break; 2044 2032 } ··· 2104 2090 } 2105 2091 } 2106 2092 2093 + static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 2094 + { 2095 + switch (acb->adapter_type) { 2096 + case ACB_ADAPTER_TYPE_A: 2097 + return; 2098 + case ACB_ADAPTER_TYPE_B: 2099 + { 2100 + struct MessageUnit_B *reg = acb->pmuB; 2101 + writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg); 2102 + if(arcmsr_hbb_wait_msgint_ready(acb)) { 2103 + printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 2104 + return; 2105 + } 2106 + } 2107 + break; 2108 + } 2109 + return; 2110 + } 2111 + 2107 2112 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2108 2113 { 2109 2114 uint32_t intmask_org; 2110 2115 2111 - arcmsr_wait_firmware_ready(acb); 2112 - arcmsr_iop_confirm(acb); 2113 2116 /* disable all outbound interrupt */ 2114 2117 intmask_org = arcmsr_disable_outbound_ints(acb); 2118 + arcmsr_wait_firmware_ready(acb); 2119 + arcmsr_iop_confirm(acb); 2115 2120 arcmsr_get_firmware_spec(acb); 2116 2121 /*start background rebuild*/ 2117 2122 arcmsr_start_adapter_bgrb(acb); 2118 2123 /* empty doorbell Qbuffer if door bell ringed */ 2119 2124 arcmsr_clear_doorbell_queue_buffer(acb); 2125 + arcmsr_enable_eoi_mode(acb); 2120 2126 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2121 2127 arcmsr_enable_outbound_ints(acb, intmask_org); 2122 2128 acb->acb_flags |= ACB_F_IOP_INITED; ··· 2309 2275 arcmsr_start_adapter_bgrb(acb); 2310 2276 /* empty doorbell Qbuffer if door bell ringed */ 2311 2277 arcmsr_clear_doorbell_queue_buffer(acb); 2278 + arcmsr_enable_eoi_mode(acb); 2312 2279 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2313 2280 arcmsr_enable_outbound_ints(acb, intmask_org); 2314 2281 acb->acb_flags |= ACB_F_IOP_INITED;
+7 -7
drivers/scsi/arm/acornscsi.c
··· 1790 1790 return 0; 1791 1791 } 1792 1792 1793 - residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred; 1793 + residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred; 1794 1794 1795 1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1796 1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16); ··· 2270 2270 case 0x4b: /* -> PHASE_STATUSIN */ 2271 2271 case 0x8b: /* -> PHASE_STATUSIN */ 2272 2272 /* DATA IN -> STATUS */ 2273 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2273 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2274 2274 acornscsi_sbic_xfcount(host); 2275 2275 acornscsi_dma_stop(host); 2276 2276 acornscsi_readstatusbyte(host); ··· 2281 2281 case 0x4e: /* -> PHASE_MSGOUT */ 2282 2282 case 0x8e: /* -> PHASE_MSGOUT */ 2283 2283 /* DATA IN -> MESSAGE OUT */ 2284 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2284 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2285 2285 acornscsi_sbic_xfcount(host); 2286 2286 acornscsi_dma_stop(host); 2287 2287 acornscsi_sendmessage(host); ··· 2291 2291 case 0x4f: /* message in */ 2292 2292 case 0x8f: /* message in */ 2293 2293 /* DATA IN -> MESSAGE IN */ 2294 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2294 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2295 2295 acornscsi_sbic_xfcount(host); 2296 2296 acornscsi_dma_stop(host); 2297 2297 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ ··· 2319 2319 case 0x4b: /* -> PHASE_STATUSIN */ 2320 2320 case 0x8b: /* -> PHASE_STATUSIN */ 2321 2321 /* DATA OUT -> STATUS */ 2322 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2322 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2323 2323 acornscsi_sbic_xfcount(host); 2324 2324 acornscsi_dma_stop(host); 2325 2325 acornscsi_dma_adjust(host); ··· 2331 2331 case 0x4e: /* -> PHASE_MSGOUT */ 2332 2332 case 0x8e: /* -> PHASE_MSGOUT */ 2333 2333 /* DATA OUT -> MESSAGE OUT */ 2334 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2334 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2335 2335 acornscsi_sbic_xfcount(host); 2336 2336 acornscsi_dma_stop(host); 2337 2337 acornscsi_dma_adjust(host); ··· 2342 2342 case 0x4f: /* message in */ 2343 2343 case 0x8f: /* message in */ 2344 2344 /* DATA OUT -> MESSAGE IN */ 2345 - host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2345 + host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) - 2346 2346 acornscsi_sbic_xfcount(host); 2347 2347 acornscsi_dma_stop(host); 2348 2348 acornscsi_dma_adjust(host);
+50 -35
drivers/scsi/arm/scsi.h
··· 18 18 * The scatter-gather list handling. This contains all 19 19 * the yucky stuff that needs to be fixed properly. 20 20 */ 21 + 22 + /* 23 + * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max 24 + * entries of uninitialized memory. SCp is from scsi-ml and has a valid 25 + * (possibly chained) sg-list 26 + */ 21 27 static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) 22 28 { 23 29 int bufs = SCp->buffers_residual; 24 30 31 + /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg(). 32 + * and to remove this BUG_ON. Use min() in-its-place 33 + */ 25 34 BUG_ON(bufs + 1 > max); 26 35 27 36 sg_set_buf(sg, SCp->ptr, SCp->this_residual); 28 37 29 - if (bufs) 30 - memcpy(sg + 1, SCp->buffer + 1, 31 - sizeof(struct scatterlist) * bufs); 38 + if (bufs) { 39 + struct scatterlist *src_sg; 40 + unsigned i; 41 + 42 + for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i) 43 + *(++sg) = *src_sg; 44 + sg_mark_end(sg); 45 + } 46 + 32 47 return bufs + 1; 33 48 } 34 49 ··· 51 36 { 52 37 int ret = SCp->buffers_residual; 53 38 if (ret) { 54 - SCp->buffer++; 39 + SCp->buffer = sg_next(SCp->buffer); 55 40 SCp->buffers_residual--; 56 41 SCp->ptr = sg_virt(SCp->buffer); 57 42 SCp->this_residual = SCp->buffer->length; ··· 83 68 { 84 69 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer)); 85 70 86 - if (SCpnt->use_sg) { 71 + if (scsi_bufflen(SCpnt)) { 87 72 unsigned long len = 0; 88 - int buf; 89 73 90 - SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; 91 - SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 74 + SCpnt->SCp.buffer = scsi_sglist(SCpnt); 75 + SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1; 92 76 SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer); 93 77 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 94 - SCpnt->SCp.phase = SCpnt->request_bufflen; 78 + SCpnt->SCp.phase = scsi_bufflen(SCpnt); 95 79 96 80 #ifdef BELT_AND_BRACES 97 - /* 98 - * Calculate correct buffer length. Some commands 99 - * come in with the wrong request_bufflen. 100 - */ 101 - for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++) 102 - len += SCpnt->SCp.buffer[buf].length; 81 + { /* 82 + * Calculate correct buffer length. Some commands 83 + * come in with the wrong scsi_bufflen. 84 + */ 85 + struct scatterlist *sg; 86 + unsigned i, sg_count = scsi_sg_count(SCpnt); 103 87 104 - if (SCpnt->request_bufflen != len) 105 - printk(KERN_WARNING "scsi%d.%c: bad request buffer " 106 - "length %d, should be %ld\n", SCpnt->device->host->host_no, 107 - '0' + SCpnt->device->id, SCpnt->request_bufflen, len); 108 - SCpnt->request_bufflen = len; 88 + scsi_for_each_sg(SCpnt, sg, sg_count, i) 89 + len += sg->length; 90 + 91 + if (scsi_bufflen(SCpnt) != len) { 92 + printk(KERN_WARNING 93 + "scsi%d.%c: bad request buffer " 94 + "length %d, should be %ld\n", 95 + SCpnt->device->host->host_no, 96 + '0' + SCpnt->device->id, 97 + scsi_bufflen(SCpnt), len); 98 + /* 99 + * FIXME: Totaly naive fixup. We should abort 100 + * with error 101 + */ 102 + SCpnt->SCp.phase = 103 + min_t(unsigned long, len, 104 + scsi_bufflen(SCpnt)); 105 + } 106 + } 109 107 #endif 110 108 } else { 111 - SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer; 112 - SCpnt->SCp.this_residual = SCpnt->request_bufflen; 113 - SCpnt->SCp.phase = SCpnt->request_bufflen; 114 - } 115 - 116 - /* 117 - * If the upper SCSI layers pass a buffer, but zero length, 118 - * we aren't interested in the buffer pointer. 119 - */ 120 - if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) { 121 - #if 0 //def BELT_AND_BRACES 122 - printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for " 123 - "command ", SCpnt->host->host_no, '0' + SCpnt->target); 124 - __scsi_print_command(SCpnt->cmnd); 125 - #endif 126 109 SCpnt->SCp.ptr = NULL; 110 + SCpnt->SCp.this_residual = 0; 111 + SCpnt->SCp.phase = 0; 127 112 } 128 113 }
-353
drivers/scsi/blz1230.c
··· 1 - /* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller. 2 - * 3 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 4 - * 5 - * This driver is based on the CyberStorm driver, hence the occasional 6 - * reference to CyberStorm. 7 - */ 8 - 9 - /* TODO: 10 - * 11 - * 1) Figure out how to make a cleaner merge with the sparc driver with regard 12 - * to the caches and the Sparc MMU mapping. 13 - * 2) Make as few routines required outside the generic driver. A lot of the 14 - * routines in this file used to be inline! 15 - */ 16 - 17 - #include <linux/module.h> 18 - 19 - #include <linux/init.h> 20 - #include <linux/kernel.h> 21 - #include <linux/delay.h> 22 - #include <linux/types.h> 23 - #include <linux/string.h> 24 - #include <linux/slab.h> 25 - #include <linux/blkdev.h> 26 - #include <linux/proc_fs.h> 27 - #include <linux/stat.h> 28 - #include <linux/interrupt.h> 29 - 30 - #include "scsi.h" 31 - #include <scsi/scsi_host.h> 32 - #include "NCR53C9x.h" 33 - 34 - #include <linux/zorro.h> 35 - #include <asm/irq.h> 36 - #include <asm/amigaints.h> 37 - #include <asm/amigahw.h> 38 - 39 - #include <asm/pgtable.h> 40 - 41 - #define MKIV 1 42 - 43 - /* The controller registers can be found in the Z2 config area at these 44 - * offsets: 45 - */ 46 - #define BLZ1230_ESP_ADDR 0x8000 47 - #define BLZ1230_DMA_ADDR 0x10000 48 - #define BLZ1230II_ESP_ADDR 0x10000 49 - #define BLZ1230II_DMA_ADDR 0x10021 50 - 51 - 52 - /* The Blizzard 1230 DMA interface 53 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 54 - * Only two things can be programmed in the Blizzard DMA: 55 - * 1) The data direction is controlled by the status of bit 31 (1 = write) 56 - * 2) The source/dest address (word aligned, shifted one right) in bits 30-0 57 - * 58 - * Program DMA by first latching the highest byte of the address/direction 59 - * (i.e. bits 31-24 of the long word constructed as described in steps 1+2 60 - * above). Then write each byte of the address/direction (starting with the 61 - * top byte, working down) to the DMA address register. 62 - * 63 - * Figure out interrupt status by reading the ESP status byte. 64 - */ 65 - struct blz1230_dma_registers { 66 - volatile unsigned char dma_addr; /* DMA address [0x0000] */ 67 - unsigned char dmapad2[0x7fff]; 68 - volatile unsigned char dma_latch; /* DMA latch [0x8000] */ 69 - }; 70 - 71 - struct blz1230II_dma_registers { 72 - volatile unsigned char dma_addr; /* DMA address [0x0000] */ 73 - unsigned char dmapad2[0xf]; 74 - volatile unsigned char dma_latch; /* DMA latch [0x0010] */ 75 - }; 76 - 77 - #define BLZ1230_DMA_WRITE 0x80000000 78 - 79 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 80 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 81 - static void dma_dump_state(struct NCR_ESP *esp); 82 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); 83 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length); 84 - static void dma_ints_off(struct NCR_ESP *esp); 85 - static void dma_ints_on(struct NCR_ESP *esp); 86 - static int dma_irq_p(struct NCR_ESP *esp); 87 - static int dma_ports_p(struct NCR_ESP *esp); 88 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 89 - 90 - static volatile unsigned char cmd_buffer[16]; 91 - /* This is where all commands are put 92 - * before they are transferred to the ESP chip 93 - * via PIO. 94 - */ 95 - 96 - /***************************************************************** Detection */ 97 - int __init blz1230_esp_detect(struct scsi_host_template *tpnt) 98 - { 99 - struct NCR_ESP *esp; 100 - struct zorro_dev *z = NULL; 101 - unsigned long address; 102 - struct ESP_regs *eregs; 103 - unsigned long board; 104 - 105 - #if MKIV 106 - #define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260 107 - #define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR 108 - #define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR 109 - #else 110 - #define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060 111 - #define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR 112 - #define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR 113 - #endif 114 - 115 - if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) { 116 - board = z->resource.start; 117 - if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR, 118 - sizeof(struct ESP_regs), "NCR53C9x")) { 119 - /* Do some magic to figure out if the blizzard is 120 - * equipped with a SCSI controller 121 - */ 122 - address = ZTWO_VADDR(board); 123 - eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR); 124 - esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR, 125 - 0); 126 - 127 - esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); 128 - udelay(5); 129 - if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) 130 - goto err_out; 131 - 132 - /* Do command transfer with programmed I/O */ 133 - esp->do_pio_cmds = 1; 134 - 135 - /* Required functions */ 136 - esp->dma_bytes_sent = &dma_bytes_sent; 137 - esp->dma_can_transfer = &dma_can_transfer; 138 - esp->dma_dump_state = &dma_dump_state; 139 - esp->dma_init_read = &dma_init_read; 140 - esp->dma_init_write = &dma_init_write; 141 - esp->dma_ints_off = &dma_ints_off; 142 - esp->dma_ints_on = &dma_ints_on; 143 - esp->dma_irq_p = &dma_irq_p; 144 - esp->dma_ports_p = &dma_ports_p; 145 - esp->dma_setup = &dma_setup; 146 - 147 - /* Optional functions */ 148 - esp->dma_barrier = 0; 149 - esp->dma_drain = 0; 150 - esp->dma_invalidate = 0; 151 - esp->dma_irq_entry = 0; 152 - esp->dma_irq_exit = 0; 153 - esp->dma_led_on = 0; 154 - esp->dma_led_off = 0; 155 - esp->dma_poll = 0; 156 - esp->dma_reset = 0; 157 - 158 - /* SCSI chip speed */ 159 - esp->cfreq = 40000000; 160 - 161 - /* The DMA registers on the Blizzard are mapped 162 - * relative to the device (i.e. in the same Zorro 163 - * I/O block). 164 - */ 165 - esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR); 166 - 167 - /* ESP register base */ 168 - esp->eregs = eregs; 169 - 170 - /* Set the command buffer */ 171 - esp->esp_command = cmd_buffer; 172 - esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 173 - 174 - esp->irq = IRQ_AMIGA_PORTS; 175 - esp->slot = board+REAL_BLZ1230_ESP_ADDR; 176 - if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 177 - "Blizzard 1230 SCSI IV", esp->ehost)) 178 - goto err_out; 179 - 180 - /* Figure out our scsi ID on the bus */ 181 - esp->scsi_id = 7; 182 - 183 - /* We don't have a differential SCSI-bus. */ 184 - esp->diff = 0; 185 - 186 - esp_initialize(esp); 187 - 188 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 189 - esps_running = esps_in_use; 190 - return esps_in_use; 191 - } 192 - } 193 - return 0; 194 - 195 - err_out: 196 - scsi_unregister(esp->ehost); 197 - esp_deallocate(esp); 198 - release_mem_region(board+REAL_BLZ1230_ESP_ADDR, 199 - sizeof(struct ESP_regs)); 200 - return 0; 201 - } 202 - 203 - /************************************************************* DMA Functions */ 204 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 205 - { 206 - /* Since the Blizzard DMA is fully dedicated to the ESP chip, 207 - * the number of bytes sent (to the ESP chip) equals the number 208 - * of bytes in the FIFO - there is no buffering in the DMA controller. 209 - * XXXX Do I read this right? It is from host to ESP, right? 210 - */ 211 - return fifo_count; 212 - } 213 - 214 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 215 - { 216 - /* I don't think there's any limit on the Blizzard DMA. So we use what 217 - * the ESP chip can handle (24 bit). 218 - */ 219 - unsigned long sz = sp->SCp.this_residual; 220 - if(sz > 0x1000000) 221 - sz = 0x1000000; 222 - return sz; 223 - } 224 - 225 - static void dma_dump_state(struct NCR_ESP *esp) 226 - { 227 - ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 228 - amiga_custom.intreqr, amiga_custom.intenar)); 229 - } 230 - 231 - void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 232 - { 233 - #if MKIV 234 - struct blz1230_dma_registers *dregs = 235 - (struct blz1230_dma_registers *) (esp->dregs); 236 - #else 237 - struct blz1230II_dma_registers *dregs = 238 - (struct blz1230II_dma_registers *) (esp->dregs); 239 - #endif 240 - 241 - cache_clear(addr, length); 242 - 243 - addr >>= 1; 244 - addr &= ~(BLZ1230_DMA_WRITE); 245 - 246 - /* First set latch */ 247 - dregs->dma_latch = (addr >> 24) & 0xff; 248 - 249 - /* Then pump the address to the DMA address register */ 250 - #if MKIV 251 - dregs->dma_addr = (addr >> 24) & 0xff; 252 - #endif 253 - dregs->dma_addr = (addr >> 16) & 0xff; 254 - dregs->dma_addr = (addr >> 8) & 0xff; 255 - dregs->dma_addr = (addr ) & 0xff; 256 - } 257 - 258 - void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 259 - { 260 - #if MKIV 261 - struct blz1230_dma_registers *dregs = 262 - (struct blz1230_dma_registers *) (esp->dregs); 263 - #else 264 - struct blz1230II_dma_registers *dregs = 265 - (struct blz1230II_dma_registers *) (esp->dregs); 266 - #endif 267 - 268 - cache_push(addr, length); 269 - 270 - addr >>= 1; 271 - addr |= BLZ1230_DMA_WRITE; 272 - 273 - /* First set latch */ 274 - dregs->dma_latch = (addr >> 24) & 0xff; 275 - 276 - /* Then pump the address to the DMA address register */ 277 - #if MKIV 278 - dregs->dma_addr = (addr >> 24) & 0xff; 279 - #endif 280 - dregs->dma_addr = (addr >> 16) & 0xff; 281 - dregs->dma_addr = (addr >> 8) & 0xff; 282 - dregs->dma_addr = (addr ) & 0xff; 283 - } 284 - 285 - static void dma_ints_off(struct NCR_ESP *esp) 286 - { 287 - disable_irq(esp->irq); 288 - } 289 - 290 - static void dma_ints_on(struct NCR_ESP *esp) 291 - { 292 - enable_irq(esp->irq); 293 - } 294 - 295 - static int dma_irq_p(struct NCR_ESP *esp) 296 - { 297 - return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 298 - } 299 - 300 - static int dma_ports_p(struct NCR_ESP *esp) 301 - { 302 - return ((amiga_custom.intenar) & IF_PORTS); 303 - } 304 - 305 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 306 - { 307 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 308 - * so when (write) is true, it actually means READ! 309 - */ 310 - if(write){ 311 - dma_init_read(esp, addr, count); 312 - } else { 313 - dma_init_write(esp, addr, count); 314 - } 315 - } 316 - 317 - #define HOSTS_C 318 - 319 - int blz1230_esp_release(struct Scsi_Host *instance) 320 - { 321 - #ifdef MODULE 322 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 323 - esp_deallocate((struct NCR_ESP *)instance->hostdata); 324 - esp_release(); 325 - release_mem_region(address, sizeof(struct ESP_regs)); 326 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 327 - #endif 328 - return 1; 329 - } 330 - 331 - 332 - static struct scsi_host_template driver_template = { 333 - .proc_name = "esp-blz1230", 334 - .proc_info = esp_proc_info, 335 - .name = "Blizzard1230 SCSI IV", 336 - .detect = blz1230_esp_detect, 337 - .slave_alloc = esp_slave_alloc, 338 - .slave_destroy = esp_slave_destroy, 339 - .release = blz1230_esp_release, 340 - .queuecommand = esp_queue, 341 - .eh_abort_handler = esp_abort, 342 - .eh_bus_reset_handler = esp_reset, 343 - .can_queue = 7, 344 - .this_id = 7, 345 - .sg_tablesize = SG_ALL, 346 - .cmd_per_lun = 1, 347 - .use_clustering = ENABLE_CLUSTERING 348 - }; 349 - 350 - 351 - #include "scsi_module.c" 352 - 353 - MODULE_LICENSE("GPL");
-306
drivers/scsi/blz2060.c
··· 1 - /* blz2060.c: Driver for Blizzard 2060 SCSI Controller. 2 - * 3 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 4 - * 5 - * This driver is based on the CyberStorm driver, hence the occasional 6 - * reference to CyberStorm. 7 - */ 8 - 9 - /* TODO: 10 - * 11 - * 1) Figure out how to make a cleaner merge with the sparc driver with regard 12 - * to the caches and the Sparc MMU mapping. 13 - * 2) Make as few routines required outside the generic driver. A lot of the 14 - * routines in this file used to be inline! 15 - */ 16 - 17 - #include <linux/module.h> 18 - 19 - #include <linux/init.h> 20 - #include <linux/kernel.h> 21 - #include <linux/delay.h> 22 - #include <linux/types.h> 23 - #include <linux/string.h> 24 - #include <linux/slab.h> 25 - #include <linux/blkdev.h> 26 - #include <linux/proc_fs.h> 27 - #include <linux/stat.h> 28 - #include <linux/interrupt.h> 29 - 30 - #include "scsi.h" 31 - #include <scsi/scsi_host.h> 32 - #include "NCR53C9x.h" 33 - 34 - #include <linux/zorro.h> 35 - #include <asm/irq.h> 36 - #include <asm/amigaints.h> 37 - #include <asm/amigahw.h> 38 - 39 - #include <asm/pgtable.h> 40 - 41 - /* The controller registers can be found in the Z2 config area at these 42 - * offsets: 43 - */ 44 - #define BLZ2060_ESP_ADDR 0x1ff00 45 - #define BLZ2060_DMA_ADDR 0x1ffe0 46 - 47 - 48 - /* The Blizzard 2060 DMA interface 49 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 50 - * Only two things can be programmed in the Blizzard DMA: 51 - * 1) The data direction is controlled by the status of bit 31 (1 = write) 52 - * 2) The source/dest address (word aligned, shifted one right) in bits 30-0 53 - * 54 - * Figure out interrupt status by reading the ESP status byte. 55 - */ 56 - struct blz2060_dma_registers { 57 - volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */ 58 - unsigned char dmapad1[0x0f]; 59 - volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */ 60 - unsigned char dmapad2[0x03]; 61 - volatile unsigned char dma_addr1; /* DMA address [0x014] */ 62 - unsigned char dmapad3[0x03]; 63 - volatile unsigned char dma_addr2; /* DMA address [0x018] */ 64 - unsigned char dmapad4[0x03]; 65 - volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */ 66 - }; 67 - 68 - #define BLZ2060_DMA_WRITE 0x80000000 69 - 70 - /* DMA control bits */ 71 - #define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */ 72 - 73 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 74 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 75 - static void dma_dump_state(struct NCR_ESP *esp); 76 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); 77 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length); 78 - static void dma_ints_off(struct NCR_ESP *esp); 79 - static void dma_ints_on(struct NCR_ESP *esp); 80 - static int dma_irq_p(struct NCR_ESP *esp); 81 - static void dma_led_off(struct NCR_ESP *esp); 82 - static void dma_led_on(struct NCR_ESP *esp); 83 - static int dma_ports_p(struct NCR_ESP *esp); 84 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 85 - 86 - static volatile unsigned char cmd_buffer[16]; 87 - /* This is where all commands are put 88 - * before they are transferred to the ESP chip 89 - * via PIO. 90 - */ 91 - 92 - /***************************************************************** Detection */ 93 - int __init blz2060_esp_detect(struct scsi_host_template *tpnt) 94 - { 95 - struct NCR_ESP *esp; 96 - struct zorro_dev *z = NULL; 97 - unsigned long address; 98 - 99 - if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) { 100 - unsigned long board = z->resource.start; 101 - if (request_mem_region(board+BLZ2060_ESP_ADDR, 102 - sizeof(struct ESP_regs), "NCR53C9x")) { 103 - esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0); 104 - 105 - /* Do command transfer with programmed I/O */ 106 - esp->do_pio_cmds = 1; 107 - 108 - /* Required functions */ 109 - esp->dma_bytes_sent = &dma_bytes_sent; 110 - esp->dma_can_transfer = &dma_can_transfer; 111 - esp->dma_dump_state = &dma_dump_state; 112 - esp->dma_init_read = &dma_init_read; 113 - esp->dma_init_write = &dma_init_write; 114 - esp->dma_ints_off = &dma_ints_off; 115 - esp->dma_ints_on = &dma_ints_on; 116 - esp->dma_irq_p = &dma_irq_p; 117 - esp->dma_ports_p = &dma_ports_p; 118 - esp->dma_setup = &dma_setup; 119 - 120 - /* Optional functions */ 121 - esp->dma_barrier = 0; 122 - esp->dma_drain = 0; 123 - esp->dma_invalidate = 0; 124 - esp->dma_irq_entry = 0; 125 - esp->dma_irq_exit = 0; 126 - esp->dma_led_on = &dma_led_on; 127 - esp->dma_led_off = &dma_led_off; 128 - esp->dma_poll = 0; 129 - esp->dma_reset = 0; 130 - 131 - /* SCSI chip speed */ 132 - esp->cfreq = 40000000; 133 - 134 - /* The DMA registers on the Blizzard are mapped 135 - * relative to the device (i.e. in the same Zorro 136 - * I/O block). 137 - */ 138 - address = (unsigned long)ZTWO_VADDR(board); 139 - esp->dregs = (void *)(address + BLZ2060_DMA_ADDR); 140 - 141 - /* ESP register base */ 142 - esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR); 143 - 144 - /* Set the command buffer */ 145 - esp->esp_command = cmd_buffer; 146 - esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 147 - 148 - esp->irq = IRQ_AMIGA_PORTS; 149 - request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 150 - "Blizzard 2060 SCSI", esp->ehost); 151 - 152 - /* Figure out our scsi ID on the bus */ 153 - esp->scsi_id = 7; 154 - 155 - /* We don't have a differential SCSI-bus. */ 156 - esp->diff = 0; 157 - 158 - esp_initialize(esp); 159 - 160 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 161 - esps_running = esps_in_use; 162 - return esps_in_use; 163 - } 164 - } 165 - return 0; 166 - } 167 - 168 - /************************************************************* DMA Functions */ 169 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 170 - { 171 - /* Since the Blizzard DMA is fully dedicated to the ESP chip, 172 - * the number of bytes sent (to the ESP chip) equals the number 173 - * of bytes in the FIFO - there is no buffering in the DMA controller. 174 - * XXXX Do I read this right? It is from host to ESP, right? 175 - */ 176 - return fifo_count; 177 - } 178 - 179 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 180 - { 181 - /* I don't think there's any limit on the Blizzard DMA. So we use what 182 - * the ESP chip can handle (24 bit). 183 - */ 184 - unsigned long sz = sp->SCp.this_residual; 185 - if(sz > 0x1000000) 186 - sz = 0x1000000; 187 - return sz; 188 - } 189 - 190 - static void dma_dump_state(struct NCR_ESP *esp) 191 - { 192 - ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 193 - amiga_custom.intreqr, amiga_custom.intenar)); 194 - } 195 - 196 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 197 - { 198 - struct blz2060_dma_registers *dregs = 199 - (struct blz2060_dma_registers *) (esp->dregs); 200 - 201 - cache_clear(addr, length); 202 - 203 - addr >>= 1; 204 - addr &= ~(BLZ2060_DMA_WRITE); 205 - dregs->dma_addr3 = (addr ) & 0xff; 206 - dregs->dma_addr2 = (addr >> 8) & 0xff; 207 - dregs->dma_addr1 = (addr >> 16) & 0xff; 208 - dregs->dma_addr0 = (addr >> 24) & 0xff; 209 - } 210 - 211 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 212 - { 213 - struct blz2060_dma_registers *dregs = 214 - (struct blz2060_dma_registers *) (esp->dregs); 215 - 216 - cache_push(addr, length); 217 - 218 - addr >>= 1; 219 - addr |= BLZ2060_DMA_WRITE; 220 - dregs->dma_addr3 = (addr ) & 0xff; 221 - dregs->dma_addr2 = (addr >> 8) & 0xff; 222 - dregs->dma_addr1 = (addr >> 16) & 0xff; 223 - dregs->dma_addr0 = (addr >> 24) & 0xff; 224 - } 225 - 226 - static void dma_ints_off(struct NCR_ESP *esp) 227 - { 228 - disable_irq(esp->irq); 229 - } 230 - 231 - static void dma_ints_on(struct NCR_ESP *esp) 232 - { 233 - enable_irq(esp->irq); 234 - } 235 - 236 - static int dma_irq_p(struct NCR_ESP *esp) 237 - { 238 - return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 239 - } 240 - 241 - static void dma_led_off(struct NCR_ESP *esp) 242 - { 243 - ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 244 - BLZ2060_DMA_LED; 245 - } 246 - 247 - static void dma_led_on(struct NCR_ESP *esp) 248 - { 249 - ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0; 250 - } 251 - 252 - static int dma_ports_p(struct NCR_ESP *esp) 253 - { 254 - return ((amiga_custom.intenar) & IF_PORTS); 255 - } 256 - 257 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 258 - { 259 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 260 - * so when (write) is true, it actually means READ! 261 - */ 262 - if(write){ 263 - dma_init_read(esp, addr, count); 264 - } else { 265 - dma_init_write(esp, addr, count); 266 - } 267 - } 268 - 269 - #define HOSTS_C 270 - 271 - int blz2060_esp_release(struct Scsi_Host *instance) 272 - { 273 - #ifdef MODULE 274 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 275 - 276 - esp_deallocate((struct NCR_ESP *)instance->hostdata); 277 - esp_release(); 278 - release_mem_region(address, sizeof(struct ESP_regs)); 279 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 280 - #endif 281 - return 1; 282 - } 283 - 284 - 285 - static struct scsi_host_template driver_template = { 286 - .proc_name = "esp-blz2060", 287 - .proc_info = esp_proc_info, 288 - .name = "Blizzard2060 SCSI", 289 - .detect = blz2060_esp_detect, 290 - .slave_alloc = esp_slave_alloc, 291 - .slave_destroy = esp_slave_destroy, 292 - .release = blz2060_esp_release, 293 - .queuecommand = esp_queue, 294 - .eh_abort_handler = esp_abort, 295 - .eh_bus_reset_handler = esp_reset, 296 - .can_queue = 7, 297 - .this_id = 7, 298 - .sg_tablesize = SG_ALL, 299 - .cmd_per_lun = 1, 300 - .use_clustering = ENABLE_CLUSTERING 301 - }; 302 - 303 - 304 - #include "scsi_module.c" 305 - 306 - MODULE_LICENSE("GPL");
-377
drivers/scsi/cyberstorm.c
··· 1 - /* cyberstorm.c: Driver for CyberStorm SCSI Controller. 2 - * 3 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 4 - * 5 - * The CyberStorm SCSI driver is based on David S. Miller's ESP driver 6 - * for the Sparc computers. 7 - * 8 - * This work was made possible by Phase5 who willingly (and most generously) 9 - * supported me with hardware and all the information I needed. 10 - */ 11 - 12 - /* TODO: 13 - * 14 - * 1) Figure out how to make a cleaner merge with the sparc driver with regard 15 - * to the caches and the Sparc MMU mapping. 16 - * 2) Make as few routines required outside the generic driver. A lot of the 17 - * routines in this file used to be inline! 18 - */ 19 - 20 - #include <linux/module.h> 21 - 22 - #include <linux/init.h> 23 - #include <linux/kernel.h> 24 - #include <linux/delay.h> 25 - #include <linux/types.h> 26 - #include <linux/string.h> 27 - #include <linux/slab.h> 28 - #include <linux/blkdev.h> 29 - #include <linux/proc_fs.h> 30 - #include <linux/stat.h> 31 - #include <linux/interrupt.h> 32 - 33 - #include "scsi.h" 34 - #include <scsi/scsi_host.h> 35 - #include "NCR53C9x.h" 36 - 37 - #include <linux/zorro.h> 38 - #include <asm/irq.h> 39 - #include <asm/amigaints.h> 40 - #include <asm/amigahw.h> 41 - 42 - #include <asm/pgtable.h> 43 - 44 - /* The controller registers can be found in the Z2 config area at these 45 - * offsets: 46 - */ 47 - #define CYBER_ESP_ADDR 0xf400 48 - #define CYBER_DMA_ADDR 0xf800 49 - 50 - 51 - /* The CyberStorm DMA interface */ 52 - struct cyber_dma_registers { 53 - volatile unsigned char dma_addr0; /* DMA address (MSB) [0x000] */ 54 - unsigned char dmapad1[1]; 55 - volatile unsigned char dma_addr1; /* DMA address [0x002] */ 56 - unsigned char dmapad2[1]; 57 - volatile unsigned char dma_addr2; /* DMA address [0x004] */ 58 - unsigned char dmapad3[1]; 59 - volatile unsigned char dma_addr3; /* DMA address (LSB) [0x006] */ 60 - unsigned char dmapad4[0x3fb]; 61 - volatile unsigned char cond_reg; /* DMA cond (ro) [0x402] */ 62 - #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */ 63 - }; 64 - 65 - /* DMA control bits */ 66 - #define CYBER_DMA_LED 0x80 /* HD led control 1 = on */ 67 - #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */ 68 - #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */ 69 - 70 - /* DMA status bits */ 71 - #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */ 72 - 73 - /* The bits below appears to be Phase5 Debug bits only; they were not 74 - * described by Phase5 so using them may seem a bit stupid... 75 - */ 76 - #define CYBER_HOST_ID 0x02 /* If set, host ID should be 7, otherwise 77 - * it should be 6. 78 - */ 79 - #define CYBER_SLOW_CABLE 0x08 /* If *not* set, assume SLOW_CABLE */ 80 - 81 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 82 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 83 - static void dma_dump_state(struct NCR_ESP *esp); 84 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); 85 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length); 86 - static void dma_ints_off(struct NCR_ESP *esp); 87 - static void dma_ints_on(struct NCR_ESP *esp); 88 - static int dma_irq_p(struct NCR_ESP *esp); 89 - static void dma_led_off(struct NCR_ESP *esp); 90 - static void dma_led_on(struct NCR_ESP *esp); 91 - static int dma_ports_p(struct NCR_ESP *esp); 92 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 93 - 94 - static unsigned char ctrl_data = 0; /* Keep backup of the stuff written 95 - * to ctrl_reg. Always write a copy 96 - * to this register when writing to 97 - * the hardware register! 98 - */ 99 - 100 - static volatile unsigned char cmd_buffer[16]; 101 - /* This is where all commands are put 102 - * before they are transferred to the ESP chip 103 - * via PIO. 104 - */ 105 - 106 - /***************************************************************** Detection */ 107 - int __init cyber_esp_detect(struct scsi_host_template *tpnt) 108 - { 109 - struct NCR_ESP *esp; 110 - struct zorro_dev *z = NULL; 111 - unsigned long address; 112 - 113 - while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 114 - unsigned long board = z->resource.start; 115 - if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM || 116 - z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) && 117 - request_mem_region(board+CYBER_ESP_ADDR, 118 - sizeof(struct ESP_regs), "NCR53C9x")) { 119 - /* Figure out if this is a CyberStorm or really a 120 - * Fastlane/Blizzard Mk II by looking at the board size. 121 - * CyberStorm maps 64kB 122 - * (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway) 123 - */ 124 - if(z->resource.end-board != 0xffff) { 125 - release_mem_region(board+CYBER_ESP_ADDR, 126 - sizeof(struct ESP_regs)); 127 - return 0; 128 - } 129 - esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0); 130 - 131 - /* Do command transfer with programmed I/O */ 132 - esp->do_pio_cmds = 1; 133 - 134 - /* Required functions */ 135 - esp->dma_bytes_sent = &dma_bytes_sent; 136 - esp->dma_can_transfer = &dma_can_transfer; 137 - esp->dma_dump_state = &dma_dump_state; 138 - esp->dma_init_read = &dma_init_read; 139 - esp->dma_init_write = &dma_init_write; 140 - esp->dma_ints_off = &dma_ints_off; 141 - esp->dma_ints_on = &dma_ints_on; 142 - esp->dma_irq_p = &dma_irq_p; 143 - esp->dma_ports_p = &dma_ports_p; 144 - esp->dma_setup = &dma_setup; 145 - 146 - /* Optional functions */ 147 - esp->dma_barrier = 0; 148 - esp->dma_drain = 0; 149 - esp->dma_invalidate = 0; 150 - esp->dma_irq_entry = 0; 151 - esp->dma_irq_exit = 0; 152 - esp->dma_led_on = &dma_led_on; 153 - esp->dma_led_off = &dma_led_off; 154 - esp->dma_poll = 0; 155 - esp->dma_reset = 0; 156 - 157 - /* SCSI chip speed */ 158 - esp->cfreq = 40000000; 159 - 160 - /* The DMA registers on the CyberStorm are mapped 161 - * relative to the device (i.e. in the same Zorro 162 - * I/O block). 163 - */ 164 - address = (unsigned long)ZTWO_VADDR(board); 165 - esp->dregs = (void *)(address + CYBER_DMA_ADDR); 166 - 167 - /* ESP register base */ 168 - esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR); 169 - 170 - /* Set the command buffer */ 171 - esp->esp_command = cmd_buffer; 172 - esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 173 - 174 - esp->irq = IRQ_AMIGA_PORTS; 175 - request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 176 - "CyberStorm SCSI", esp->ehost); 177 - /* Figure out our scsi ID on the bus */ 178 - /* The DMA cond flag contains a hardcoded jumper bit 179 - * which can be used to select host number 6 or 7. 180 - * However, even though it may change, we use a hardcoded 181 - * value of 7. 182 - */ 183 - esp->scsi_id = 7; 184 - 185 - /* We don't have a differential SCSI-bus. */ 186 - esp->diff = 0; 187 - 188 - esp_initialize(esp); 189 - 190 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 191 - esps_running = esps_in_use; 192 - return esps_in_use; 193 - } 194 - } 195 - return 0; 196 - } 197 - 198 - /************************************************************* DMA Functions */ 199 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 200 - { 201 - /* Since the CyberStorm DMA is fully dedicated to the ESP chip, 202 - * the number of bytes sent (to the ESP chip) equals the number 203 - * of bytes in the FIFO - there is no buffering in the DMA controller. 204 - * XXXX Do I read this right? It is from host to ESP, right? 205 - */ 206 - return fifo_count; 207 - } 208 - 209 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 210 - { 211 - /* I don't think there's any limit on the CyberDMA. So we use what 212 - * the ESP chip can handle (24 bit). 213 - */ 214 - unsigned long sz = sp->SCp.this_residual; 215 - if(sz > 0x1000000) 216 - sz = 0x1000000; 217 - return sz; 218 - } 219 - 220 - static void dma_dump_state(struct NCR_ESP *esp) 221 - { 222 - ESPLOG(("esp%d: dma -- cond_reg<%02x>\n", 223 - esp->esp_id, ((struct cyber_dma_registers *) 224 - (esp->dregs))->cond_reg)); 225 - ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 226 - amiga_custom.intreqr, amiga_custom.intenar)); 227 - } 228 - 229 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 230 - { 231 - struct cyber_dma_registers *dregs = 232 - (struct cyber_dma_registers *) esp->dregs; 233 - 234 - cache_clear(addr, length); 235 - 236 - addr &= ~(1); 237 - dregs->dma_addr0 = (addr >> 24) & 0xff; 238 - dregs->dma_addr1 = (addr >> 16) & 0xff; 239 - dregs->dma_addr2 = (addr >> 8) & 0xff; 240 - dregs->dma_addr3 = (addr ) & 0xff; 241 - ctrl_data &= ~(CYBER_DMA_WRITE); 242 - 243 - /* Check if physical address is outside Z2 space and of 244 - * block length/block aligned in memory. If this is the 245 - * case, enable 32 bit transfer. In all other cases, fall back 246 - * to 16 bit transfer. 247 - * Obviously 32 bit transfer should be enabled if the DMA address 248 - * and length are 32 bit aligned. However, this leads to some 249 - * strange behavior. Even 64 bit aligned addr/length fails. 250 - * Until I've found a reason for this, 32 bit transfer is only 251 - * used for full-block transfers (1kB). 252 - * -jskov 253 - */ 254 - #if 0 255 - if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) && 256 - (addr < 0xff0000))) 257 - ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ 258 - else 259 - ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */ 260 - #else 261 - ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ 262 - #endif 263 - dregs->ctrl_reg = ctrl_data; 264 - } 265 - 266 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 267 - { 268 - struct cyber_dma_registers *dregs = 269 - (struct cyber_dma_registers *) esp->dregs; 270 - 271 - cache_push(addr, length); 272 - 273 - addr |= 1; 274 - dregs->dma_addr0 = (addr >> 24) & 0xff; 275 - dregs->dma_addr1 = (addr >> 16) & 0xff; 276 - dregs->dma_addr2 = (addr >> 8) & 0xff; 277 - dregs->dma_addr3 = (addr ) & 0xff; 278 - ctrl_data |= CYBER_DMA_WRITE; 279 - 280 - /* See comment above */ 281 - #if 0 282 - if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) && 283 - (addr < 0xff0000))) 284 - ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ 285 - else 286 - ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */ 287 - #else 288 - ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ 289 - #endif 290 - dregs->ctrl_reg = ctrl_data; 291 - } 292 - 293 - static void dma_ints_off(struct NCR_ESP *esp) 294 - { 295 - disable_irq(esp->irq); 296 - } 297 - 298 - static void dma_ints_on(struct NCR_ESP *esp) 299 - { 300 - enable_irq(esp->irq); 301 - } 302 - 303 - static int dma_irq_p(struct NCR_ESP *esp) 304 - { 305 - /* It's important to check the DMA IRQ bit in the correct way! */ 306 - return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) && 307 - ((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) & 308 - CYBER_DMA_HNDL_INTR)); 309 - } 310 - 311 - static void dma_led_off(struct NCR_ESP *esp) 312 - { 313 - ctrl_data &= ~CYBER_DMA_LED; 314 - ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; 315 - } 316 - 317 - static void dma_led_on(struct NCR_ESP *esp) 318 - { 319 - ctrl_data |= CYBER_DMA_LED; 320 - ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; 321 - } 322 - 323 - static int dma_ports_p(struct NCR_ESP *esp) 324 - { 325 - return ((amiga_custom.intenar) & IF_PORTS); 326 - } 327 - 328 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 329 - { 330 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 331 - * so when (write) is true, it actually means READ! 332 - */ 333 - if(write){ 334 - dma_init_read(esp, addr, count); 335 - } else { 336 - dma_init_write(esp, addr, count); 337 - } 338 - } 339 - 340 - #define HOSTS_C 341 - 342 - int cyber_esp_release(struct Scsi_Host *instance) 343 - { 344 - #ifdef MODULE 345 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 346 - 347 - esp_deallocate((struct NCR_ESP *)instance->hostdata); 348 - esp_release(); 349 - release_mem_region(address, sizeof(struct ESP_regs)); 350 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 351 - #endif 352 - return 1; 353 - } 354 - 355 - 356 - static struct scsi_host_template driver_template = { 357 - .proc_name = "esp-cyberstorm", 358 - .proc_info = esp_proc_info, 359 - .name = "CyberStorm SCSI", 360 - .detect = cyber_esp_detect, 361 - .slave_alloc = esp_slave_alloc, 362 - .slave_destroy = esp_slave_destroy, 363 - .release = cyber_esp_release, 364 - .queuecommand = esp_queue, 365 - .eh_abort_handler = esp_abort, 366 - .eh_bus_reset_handler = esp_reset, 367 - .can_queue = 7, 368 - .this_id = 7, 369 - .sg_tablesize = SG_ALL, 370 - .cmd_per_lun = 1, 371 - .use_clustering = ENABLE_CLUSTERING 372 - }; 373 - 374 - 375 - #include "scsi_module.c" 376 - 377 - MODULE_LICENSE("GPL");
-314
drivers/scsi/cyberstormII.c
··· 1 - /* cyberstormII.c: Driver for CyberStorm SCSI Mk II 2 - * 3 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 4 - * 5 - * This driver is based on cyberstorm.c 6 - */ 7 - 8 - /* TODO: 9 - * 10 - * 1) Figure out how to make a cleaner merge with the sparc driver with regard 11 - * to the caches and the Sparc MMU mapping. 12 - * 2) Make as few routines required outside the generic driver. A lot of the 13 - * routines in this file used to be inline! 14 - */ 15 - 16 - #include <linux/module.h> 17 - 18 - #include <linux/init.h> 19 - #include <linux/kernel.h> 20 - #include <linux/delay.h> 21 - #include <linux/types.h> 22 - #include <linux/string.h> 23 - #include <linux/slab.h> 24 - #include <linux/blkdev.h> 25 - #include <linux/proc_fs.h> 26 - #include <linux/stat.h> 27 - #include <linux/interrupt.h> 28 - 29 - #include "scsi.h" 30 - #include <scsi/scsi_host.h> 31 - #include "NCR53C9x.h" 32 - 33 - #include <linux/zorro.h> 34 - #include <asm/irq.h> 35 - #include <asm/amigaints.h> 36 - #include <asm/amigahw.h> 37 - 38 - #include <asm/pgtable.h> 39 - 40 - /* The controller registers can be found in the Z2 config area at these 41 - * offsets: 42 - */ 43 - #define CYBERII_ESP_ADDR 0x1ff03 44 - #define CYBERII_DMA_ADDR 0x1ff43 45 - 46 - 47 - /* The CyberStorm II DMA interface */ 48 - struct cyberII_dma_registers { 49 - volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */ 50 - #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */ 51 - unsigned char dmapad4[0x3f]; 52 - volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */ 53 - unsigned char dmapad1[3]; 54 - volatile unsigned char dma_addr1; /* DMA address [0x044] */ 55 - unsigned char dmapad2[3]; 56 - volatile unsigned char dma_addr2; /* DMA address [0x048] */ 57 - unsigned char dmapad3[3]; 58 - volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */ 59 - }; 60 - 61 - /* DMA control bits */ 62 - #define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */ 63 - 64 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 65 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 66 - static void dma_dump_state(struct NCR_ESP *esp); 67 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); 68 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length); 69 - static void dma_ints_off(struct NCR_ESP *esp); 70 - static void dma_ints_on(struct NCR_ESP *esp); 71 - static int dma_irq_p(struct NCR_ESP *esp); 72 - static void dma_led_off(struct NCR_ESP *esp); 73 - static void dma_led_on(struct NCR_ESP *esp); 74 - static int dma_ports_p(struct NCR_ESP *esp); 75 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 76 - 77 - static volatile unsigned char cmd_buffer[16]; 78 - /* This is where all commands are put 79 - * before they are transferred to the ESP chip 80 - * via PIO. 81 - */ 82 - 83 - /***************************************************************** Detection */ 84 - int __init cyberII_esp_detect(struct scsi_host_template *tpnt) 85 - { 86 - struct NCR_ESP *esp; 87 - struct zorro_dev *z = NULL; 88 - unsigned long address; 89 - struct ESP_regs *eregs; 90 - 91 - if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) { 92 - unsigned long board = z->resource.start; 93 - if (request_mem_region(board+CYBERII_ESP_ADDR, 94 - sizeof(struct ESP_regs), "NCR53C9x")) { 95 - /* Do some magic to figure out if the CyberStorm Mk II 96 - * is equipped with a SCSI controller 97 - */ 98 - address = (unsigned long)ZTWO_VADDR(board); 99 - eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR); 100 - 101 - esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0); 102 - 103 - esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); 104 - udelay(5); 105 - if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) { 106 - esp_deallocate(esp); 107 - scsi_unregister(esp->ehost); 108 - release_mem_region(board+CYBERII_ESP_ADDR, 109 - sizeof(struct ESP_regs)); 110 - return 0; /* Bail out if address did not hold data */ 111 - } 112 - 113 - /* Do command transfer with programmed I/O */ 114 - esp->do_pio_cmds = 1; 115 - 116 - /* Required functions */ 117 - esp->dma_bytes_sent = &dma_bytes_sent; 118 - esp->dma_can_transfer = &dma_can_transfer; 119 - esp->dma_dump_state = &dma_dump_state; 120 - esp->dma_init_read = &dma_init_read; 121 - esp->dma_init_write = &dma_init_write; 122 - esp->dma_ints_off = &dma_ints_off; 123 - esp->dma_ints_on = &dma_ints_on; 124 - esp->dma_irq_p = &dma_irq_p; 125 - esp->dma_ports_p = &dma_ports_p; 126 - esp->dma_setup = &dma_setup; 127 - 128 - /* Optional functions */ 129 - esp->dma_barrier = 0; 130 - esp->dma_drain = 0; 131 - esp->dma_invalidate = 0; 132 - esp->dma_irq_entry = 0; 133 - esp->dma_irq_exit = 0; 134 - esp->dma_led_on = &dma_led_on; 135 - esp->dma_led_off = &dma_led_off; 136 - esp->dma_poll = 0; 137 - esp->dma_reset = 0; 138 - 139 - /* SCSI chip speed */ 140 - esp->cfreq = 40000000; 141 - 142 - /* The DMA registers on the CyberStorm are mapped 143 - * relative to the device (i.e. in the same Zorro 144 - * I/O block). 145 - */ 146 - esp->dregs = (void *)(address + CYBERII_DMA_ADDR); 147 - 148 - /* ESP register base */ 149 - esp->eregs = eregs; 150 - 151 - /* Set the command buffer */ 152 - esp->esp_command = cmd_buffer; 153 - esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 154 - 155 - esp->irq = IRQ_AMIGA_PORTS; 156 - request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 157 - "CyberStorm SCSI Mk II", esp->ehost); 158 - 159 - /* Figure out our scsi ID on the bus */ 160 - esp->scsi_id = 7; 161 - 162 - /* We don't have a differential SCSI-bus. */ 163 - esp->diff = 0; 164 - 165 - esp_initialize(esp); 166 - 167 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 168 - esps_running = esps_in_use; 169 - return esps_in_use; 170 - } 171 - } 172 - return 0; 173 - } 174 - 175 - /************************************************************* DMA Functions */ 176 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 177 - { 178 - /* Since the CyberStorm DMA is fully dedicated to the ESP chip, 179 - * the number of bytes sent (to the ESP chip) equals the number 180 - * of bytes in the FIFO - there is no buffering in the DMA controller. 181 - * XXXX Do I read this right? It is from host to ESP, right? 182 - */ 183 - return fifo_count; 184 - } 185 - 186 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 187 - { 188 - /* I don't think there's any limit on the CyberDMA. So we use what 189 - * the ESP chip can handle (24 bit). 190 - */ 191 - unsigned long sz = sp->SCp.this_residual; 192 - if(sz > 0x1000000) 193 - sz = 0x1000000; 194 - return sz; 195 - } 196 - 197 - static void dma_dump_state(struct NCR_ESP *esp) 198 - { 199 - ESPLOG(("esp%d: dma -- cond_reg<%02x>\n", 200 - esp->esp_id, ((struct cyberII_dma_registers *) 201 - (esp->dregs))->cond_reg)); 202 - ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 203 - amiga_custom.intreqr, amiga_custom.intenar)); 204 - } 205 - 206 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 207 - { 208 - struct cyberII_dma_registers *dregs = 209 - (struct cyberII_dma_registers *) esp->dregs; 210 - 211 - cache_clear(addr, length); 212 - 213 - addr &= ~(1); 214 - dregs->dma_addr0 = (addr >> 24) & 0xff; 215 - dregs->dma_addr1 = (addr >> 16) & 0xff; 216 - dregs->dma_addr2 = (addr >> 8) & 0xff; 217 - dregs->dma_addr3 = (addr ) & 0xff; 218 - } 219 - 220 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 221 - { 222 - struct cyberII_dma_registers *dregs = 223 - (struct cyberII_dma_registers *) esp->dregs; 224 - 225 - cache_push(addr, length); 226 - 227 - addr |= 1; 228 - dregs->dma_addr0 = (addr >> 24) & 0xff; 229 - dregs->dma_addr1 = (addr >> 16) & 0xff; 230 - dregs->dma_addr2 = (addr >> 8) & 0xff; 231 - dregs->dma_addr3 = (addr ) & 0xff; 232 - } 233 - 234 - static void dma_ints_off(struct NCR_ESP *esp) 235 - { 236 - disable_irq(esp->irq); 237 - } 238 - 239 - static void dma_ints_on(struct NCR_ESP *esp) 240 - { 241 - enable_irq(esp->irq); 242 - } 243 - 244 - static int dma_irq_p(struct NCR_ESP *esp) 245 - { 246 - /* It's important to check the DMA IRQ bit in the correct way! */ 247 - return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 248 - } 249 - 250 - static void dma_led_off(struct NCR_ESP *esp) 251 - { 252 - ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED; 253 - } 254 - 255 - static void dma_led_on(struct NCR_ESP *esp) 256 - { 257 - ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED; 258 - } 259 - 260 - static int dma_ports_p(struct NCR_ESP *esp) 261 - { 262 - return ((amiga_custom.intenar) & IF_PORTS); 263 - } 264 - 265 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 266 - { 267 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 268 - * so when (write) is true, it actually means READ! 269 - */ 270 - if(write){ 271 - dma_init_read(esp, addr, count); 272 - } else { 273 - dma_init_write(esp, addr, count); 274 - } 275 - } 276 - 277 - #define HOSTS_C 278 - 279 - int cyberII_esp_release(struct Scsi_Host *instance) 280 - { 281 - #ifdef MODULE 282 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 283 - 284 - esp_deallocate((struct NCR_ESP *)instance->hostdata); 285 - esp_release(); 286 - release_mem_region(address, sizeof(struct ESP_regs)); 287 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 288 - #endif 289 - return 1; 290 - } 291 - 292 - 293 - static struct scsi_host_template driver_template = { 294 - .proc_name = "esp-cyberstormII", 295 - .proc_info = esp_proc_info, 296 - .name = "CyberStorm Mk II SCSI", 297 - .detect = cyberII_esp_detect, 298 - .slave_alloc = esp_slave_alloc, 299 - .slave_destroy = esp_slave_destroy, 300 - .release = cyberII_esp_release, 301 - .queuecommand = esp_queue, 302 - .eh_abort_handler = esp_abort, 303 - .eh_bus_reset_handler = esp_reset, 304 - .can_queue = 7, 305 - .this_id = 7, 306 - .sg_tablesize = SG_ALL, 307 - .cmd_per_lun = 1, 308 - .use_clustering = ENABLE_CLUSTERING 309 - }; 310 - 311 - 312 - #include "scsi_module.c" 313 - 314 - MODULE_LICENSE("GPL");
+1 -1
drivers/scsi/dc395x.c
··· 4267 4267 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; 4268 4268 int srb_idx = 0; 4269 4269 unsigned i = 0; 4270 - struct SGentry *ptr; 4270 + struct SGentry *uninitialized_var(ptr); 4271 4271 4272 4272 for (i = 0; i < DC395x_MAX_SRB_CNT; i++) 4273 4273 acb->srb_array[i].segment_x = NULL;
-687
drivers/scsi/dec_esp.c
··· 1 - /* 2 - * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations 3 - * and TURBOchannel PMAZ-A cards 4 - * 5 - * TURBOchannel changes by Harald Koerfgen 6 - * PMAZ-A support by David Airlie 7 - * 8 - * based on jazz_esp.c: 9 - * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 10 - * 11 - * jazz_esp is based on David S. Miller's ESP driver and cyber_esp 12 - * 13 - * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org> 14 - * Be warned the PMAZ-AA works currently as a single card. 15 - * Dont try to put multiple cards in one machine - They are 16 - * both detected but it may crash under high load garbling your 17 - * data. 18 - * 20001005 - Initialization fixes for 2.4.0-test9 19 - * Florian Lohoff <flo@rfc822.org> 20 - * 21 - * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki 22 - */ 23 - 24 - #include <linux/kernel.h> 25 - #include <linux/delay.h> 26 - #include <linux/types.h> 27 - #include <linux/string.h> 28 - #include <linux/slab.h> 29 - #include <linux/blkdev.h> 30 - #include <linux/proc_fs.h> 31 - #include <linux/spinlock.h> 32 - #include <linux/stat.h> 33 - #include <linux/tc.h> 34 - 35 - #include <asm/dma.h> 36 - #include <asm/irq.h> 37 - #include <asm/pgtable.h> 38 - #include <asm/system.h> 39 - 40 - #include <asm/dec/interrupts.h> 41 - #include <asm/dec/ioasic.h> 42 - #include <asm/dec/ioasic_addrs.h> 43 - #include <asm/dec/ioasic_ints.h> 44 - #include <asm/dec/machtype.h> 45 - #include <asm/dec/system.h> 46 - 47 - #define DEC_SCSI_SREG 0 48 - #define DEC_SCSI_DMAREG 0x40000 49 - #define DEC_SCSI_SRAM 0x80000 50 - #define DEC_SCSI_DIAG 0xC0000 51 - 52 - #include "scsi.h" 53 - #include <scsi/scsi_host.h> 54 - #include "NCR53C9x.h" 55 - 56 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 57 - static void dma_drain(struct NCR_ESP *esp); 58 - static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp); 59 - static void dma_dump_state(struct NCR_ESP *esp); 60 - static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length); 61 - static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length); 62 - static void dma_ints_off(struct NCR_ESP *esp); 63 - static void dma_ints_on(struct NCR_ESP *esp); 64 - static int dma_irq_p(struct NCR_ESP *esp); 65 - static int dma_ports_p(struct NCR_ESP *esp); 66 - static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write); 67 - static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp); 68 - static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp); 69 - static void dma_advance_sg(struct scsi_cmnd * sp); 70 - 71 - static void pmaz_dma_drain(struct NCR_ESP *esp); 72 - static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length); 73 - static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length); 74 - static void pmaz_dma_ints_off(struct NCR_ESP *esp); 75 - static void pmaz_dma_ints_on(struct NCR_ESP *esp); 76 - static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write); 77 - static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp); 78 - 79 - #define TC_ESP_RAM_SIZE 0x20000 80 - #define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1)) 81 - #define ESP_NCMD 7 82 - 83 - #define TC_ESP_DMAR_MASK 0x1ffff 84 - #define TC_ESP_DMAR_WRITE 0x80000000 85 - #define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK) 86 - 87 - u32 esp_virt_buffer; 88 - int scsi_current_length; 89 - 90 - volatile unsigned char cmd_buffer[16]; 91 - volatile unsigned char pmaz_cmd_buffer[16]; 92 - /* This is where all commands are put 93 - * before they are trasfered to the ESP chip 94 - * via PIO. 95 - */ 96 - 97 - static irqreturn_t scsi_dma_merr_int(int, void *); 98 - static irqreturn_t scsi_dma_err_int(int, void *); 99 - static irqreturn_t scsi_dma_int(int, void *); 100 - 101 - static struct scsi_host_template dec_esp_template = { 102 - .module = THIS_MODULE, 103 - .name = "NCR53C94", 104 - .info = esp_info, 105 - .queuecommand = esp_queue, 106 - .eh_abort_handler = esp_abort, 107 - .eh_bus_reset_handler = esp_reset, 108 - .slave_alloc = esp_slave_alloc, 109 - .slave_destroy = esp_slave_destroy, 110 - .proc_info = esp_proc_info, 111 - .proc_name = "dec_esp", 112 - .can_queue = 7, 113 - .sg_tablesize = SG_ALL, 114 - .cmd_per_lun = 1, 115 - .use_clustering = DISABLE_CLUSTERING, 116 - }; 117 - 118 - static struct NCR_ESP *dec_esp_platform; 119 - 120 - /***************************************************************** Detection */ 121 - static int dec_esp_platform_probe(void) 122 - { 123 - struct NCR_ESP *esp; 124 - int err = 0; 125 - 126 - if (IOASIC) { 127 - esp = esp_allocate(&dec_esp_template, NULL, 1); 128 - 129 - /* Do command transfer with programmed I/O */ 130 - esp->do_pio_cmds = 1; 131 - 132 - /* Required functions */ 133 - esp->dma_bytes_sent = &dma_bytes_sent; 134 - esp->dma_can_transfer = &dma_can_transfer; 135 - esp->dma_dump_state = &dma_dump_state; 136 - esp->dma_init_read = &dma_init_read; 137 - esp->dma_init_write = &dma_init_write; 138 - esp->dma_ints_off = &dma_ints_off; 139 - esp->dma_ints_on = &dma_ints_on; 140 - esp->dma_irq_p = &dma_irq_p; 141 - esp->dma_ports_p = &dma_ports_p; 142 - esp->dma_setup = &dma_setup; 143 - 144 - /* Optional functions */ 145 - esp->dma_barrier = 0; 146 - esp->dma_drain = &dma_drain; 147 - esp->dma_invalidate = 0; 148 - esp->dma_irq_entry = 0; 149 - esp->dma_irq_exit = 0; 150 - esp->dma_poll = 0; 151 - esp->dma_reset = 0; 152 - esp->dma_led_off = 0; 153 - esp->dma_led_on = 0; 154 - 155 - /* virtual DMA functions */ 156 - esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one; 157 - esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl; 158 - esp->dma_mmu_release_scsi_one = 0; 159 - esp->dma_mmu_release_scsi_sgl = 0; 160 - esp->dma_advance_sg = &dma_advance_sg; 161 - 162 - 163 - /* SCSI chip speed */ 164 - esp->cfreq = 25000000; 165 - 166 - esp->dregs = 0; 167 - 168 - /* ESP register base */ 169 - esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base + 170 - IOASIC_SCSI); 171 - 172 - /* Set the command buffer */ 173 - esp->esp_command = (volatile unsigned char *) cmd_buffer; 174 - 175 - /* get virtual dma address for command buffer */ 176 - esp->esp_command_dvma = virt_to_phys(cmd_buffer); 177 - 178 - esp->irq = dec_interrupt[DEC_IRQ_ASC]; 179 - 180 - esp->scsi_id = 7; 181 - 182 - /* Check for differential SCSI-bus */ 183 - esp->diff = 0; 184 - 185 - err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, 186 - "ncr53c94", esp->ehost); 187 - if (err) 188 - goto err_alloc; 189 - err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], 190 - scsi_dma_merr_int, IRQF_DISABLED, 191 - "ncr53c94 error", esp->ehost); 192 - if (err) 193 - goto err_irq; 194 - err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR], 195 - scsi_dma_err_int, IRQF_DISABLED, 196 - "ncr53c94 overrun", esp->ehost); 197 - if (err) 198 - goto err_irq_merr; 199 - err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int, 200 - IRQF_DISABLED, "ncr53c94 dma", esp->ehost); 201 - if (err) 202 - goto err_irq_err; 203 - 204 - esp_initialize(esp); 205 - 206 - err = scsi_add_host(esp->ehost, NULL); 207 - if (err) { 208 - printk(KERN_ERR "ESP: Unable to register adapter\n"); 209 - goto err_irq_dma; 210 - } 211 - 212 - scsi_scan_host(esp->ehost); 213 - 214 - dec_esp_platform = esp; 215 - } 216 - 217 - return 0; 218 - 219 - err_irq_dma: 220 - free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost); 221 - err_irq_err: 222 - free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost); 223 - err_irq_merr: 224 - free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost); 225 - err_irq: 226 - free_irq(esp->irq, esp->ehost); 227 - err_alloc: 228 - esp_deallocate(esp); 229 - scsi_host_put(esp->ehost); 230 - return err; 231 - } 232 - 233 - static int __init dec_esp_probe(struct device *dev) 234 - { 235 - struct NCR_ESP *esp; 236 - resource_size_t start, len; 237 - int err; 238 - 239 - esp = esp_allocate(&dec_esp_template, NULL, 1); 240 - 241 - dev_set_drvdata(dev, esp); 242 - 243 - start = to_tc_dev(dev)->resource.start; 244 - len = to_tc_dev(dev)->resource.end - start + 1; 245 - 246 - if (!request_mem_region(start, len, dev->bus_id)) { 247 - printk(KERN_ERR "%s: Unable to reserve MMIO resource\n", 248 - dev->bus_id); 249 - err = -EBUSY; 250 - goto err_alloc; 251 - } 252 - 253 - /* Store base addr into esp struct. */ 254 - esp->slot = start; 255 - 256 - esp->dregs = 0; 257 - esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG); 258 - esp->do_pio_cmds = 1; 259 - 260 - /* Set the command buffer. */ 261 - esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer; 262 - 263 - /* Get virtual dma address for command buffer. */ 264 - esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer); 265 - 266 - esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus); 267 - 268 - esp->irq = to_tc_dev(dev)->interrupt; 269 - 270 - /* Required functions. */ 271 - esp->dma_bytes_sent = &dma_bytes_sent; 272 - esp->dma_can_transfer = &dma_can_transfer; 273 - esp->dma_dump_state = &dma_dump_state; 274 - esp->dma_init_read = &pmaz_dma_init_read; 275 - esp->dma_init_write = &pmaz_dma_init_write; 276 - esp->dma_ints_off = &pmaz_dma_ints_off; 277 - esp->dma_ints_on = &pmaz_dma_ints_on; 278 - esp->dma_irq_p = &dma_irq_p; 279 - esp->dma_ports_p = &dma_ports_p; 280 - esp->dma_setup = &pmaz_dma_setup; 281 - 282 - /* Optional functions. */ 283 - esp->dma_barrier = 0; 284 - esp->dma_drain = &pmaz_dma_drain; 285 - esp->dma_invalidate = 0; 286 - esp->dma_irq_entry = 0; 287 - esp->dma_irq_exit = 0; 288 - esp->dma_poll = 0; 289 - esp->dma_reset = 0; 290 - esp->dma_led_off = 0; 291 - esp->dma_led_on = 0; 292 - 293 - esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one; 294 - esp->dma_mmu_get_scsi_sgl = 0; 295 - esp->dma_mmu_release_scsi_one = 0; 296 - esp->dma_mmu_release_scsi_sgl = 0; 297 - esp->dma_advance_sg = 0; 298 - 299 - err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA", 300 - esp->ehost); 301 - if (err) { 302 - printk(KERN_ERR "%s: Unable to get IRQ %d\n", 303 - dev->bus_id, esp->irq); 304 - goto err_resource; 305 - } 306 - 307 - esp->scsi_id = 7; 308 - esp->diff = 0; 309 - esp_initialize(esp); 310 - 311 - err = scsi_add_host(esp->ehost, dev); 312 - if (err) { 313 - printk(KERN_ERR "%s: Unable to register adapter\n", 314 - dev->bus_id); 315 - goto err_irq; 316 - } 317 - 318 - scsi_scan_host(esp->ehost); 319 - 320 - return 0; 321 - 322 - err_irq: 323 - free_irq(esp->irq, esp->ehost); 324 - 325 - err_resource: 326 - release_mem_region(start, len); 327 - 328 - err_alloc: 329 - esp_deallocate(esp); 330 - scsi_host_put(esp->ehost); 331 - return err; 332 - } 333 - 334 - static void __exit dec_esp_platform_remove(void) 335 - { 336 - struct NCR_ESP *esp = dec_esp_platform; 337 - 338 - free_irq(esp->irq, esp->ehost); 339 - esp_deallocate(esp); 340 - scsi_host_put(esp->ehost); 341 - dec_esp_platform = NULL; 342 - } 343 - 344 - static void __exit dec_esp_remove(struct device *dev) 345 - { 346 - struct NCR_ESP *esp = dev_get_drvdata(dev); 347 - 348 - free_irq(esp->irq, esp->ehost); 349 - esp_deallocate(esp); 350 - scsi_host_put(esp->ehost); 351 - } 352 - 353 - 354 - /************************************************************* DMA Functions */ 355 - static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id) 356 - { 357 - printk("Got unexpected SCSI DMA Interrupt! < "); 358 - printk("SCSI_DMA_MEMRDERR "); 359 - printk(">\n"); 360 - 361 - return IRQ_HANDLED; 362 - } 363 - 364 - static irqreturn_t scsi_dma_err_int(int irq, void *dev_id) 365 - { 366 - /* empty */ 367 - 368 - return IRQ_HANDLED; 369 - } 370 - 371 - static irqreturn_t scsi_dma_int(int irq, void *dev_id) 372 - { 373 - u32 scsi_next_ptr; 374 - 375 - scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P); 376 - 377 - /* next page */ 378 - scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3; 379 - ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); 380 - fast_iob(); 381 - 382 - return IRQ_HANDLED; 383 - } 384 - 385 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 386 - { 387 - return fifo_count; 388 - } 389 - 390 - static void dma_drain(struct NCR_ESP *esp) 391 - { 392 - u32 nw, data0, data1, scsi_data_ptr; 393 - u16 *p; 394 - 395 - nw = ioasic_read(IO_REG_SCSI_SCR); 396 - 397 - /* 398 - * Is there something in the dma buffers left? 399 - */ 400 - if (nw) { 401 - scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3; 402 - p = phys_to_virt(scsi_data_ptr); 403 - switch (nw) { 404 - case 1: 405 - data0 = ioasic_read(IO_REG_SCSI_SDR0); 406 - p[0] = data0 & 0xffff; 407 - break; 408 - case 2: 409 - data0 = ioasic_read(IO_REG_SCSI_SDR0); 410 - p[0] = data0 & 0xffff; 411 - p[1] = (data0 >> 16) & 0xffff; 412 - break; 413 - case 3: 414 - data0 = ioasic_read(IO_REG_SCSI_SDR0); 415 - data1 = ioasic_read(IO_REG_SCSI_SDR1); 416 - p[0] = data0 & 0xffff; 417 - p[1] = (data0 >> 16) & 0xffff; 418 - p[2] = data1 & 0xffff; 419 - break; 420 - default: 421 - printk("Strange: %d words in dma buffer left\n", nw); 422 - break; 423 - } 424 - } 425 - } 426 - 427 - static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp) 428 - { 429 - return sp->SCp.this_residual; 430 - } 431 - 432 - static void dma_dump_state(struct NCR_ESP *esp) 433 - { 434 - } 435 - 436 - static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length) 437 - { 438 - u32 scsi_next_ptr, ioasic_ssr; 439 - unsigned long flags; 440 - 441 - if (vaddress & 3) 442 - panic("dec_esp.c: unable to handle partial word transfers, yet..."); 443 - 444 - dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length); 445 - 446 - spin_lock_irqsave(&ioasic_ssr_lock, flags); 447 - 448 - fast_mb(); 449 - ioasic_ssr = ioasic_read(IO_REG_SSR); 450 - 451 - ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN; 452 - ioasic_write(IO_REG_SSR, ioasic_ssr); 453 - 454 - fast_wmb(); 455 - ioasic_write(IO_REG_SCSI_SCR, 0); 456 - ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3); 457 - 458 - /* prepare for next page */ 459 - scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3; 460 - ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); 461 - 462 - ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN); 463 - fast_wmb(); 464 - ioasic_write(IO_REG_SSR, ioasic_ssr); 465 - 466 - fast_iob(); 467 - spin_unlock_irqrestore(&ioasic_ssr_lock, flags); 468 - } 469 - 470 - static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length) 471 - { 472 - u32 scsi_next_ptr, ioasic_ssr; 473 - unsigned long flags; 474 - 475 - if (vaddress & 3) 476 - panic("dec_esp.c: unable to handle partial word transfers, yet..."); 477 - 478 - dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length); 479 - 480 - spin_lock_irqsave(&ioasic_ssr_lock, flags); 481 - 482 - fast_mb(); 483 - ioasic_ssr = ioasic_read(IO_REG_SSR); 484 - 485 - ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN); 486 - ioasic_write(IO_REG_SSR, ioasic_ssr); 487 - 488 - fast_wmb(); 489 - ioasic_write(IO_REG_SCSI_SCR, 0); 490 - ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3); 491 - 492 - /* prepare for next page */ 493 - scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3; 494 - ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); 495 - 496 - ioasic_ssr |= IO_SSR_SCSI_DMA_EN; 497 - fast_wmb(); 498 - ioasic_write(IO_REG_SSR, ioasic_ssr); 499 - 500 - fast_iob(); 501 - spin_unlock_irqrestore(&ioasic_ssr_lock, flags); 502 - } 503 - 504 - static void dma_ints_off(struct NCR_ESP *esp) 505 - { 506 - disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]); 507 - } 508 - 509 - static void dma_ints_on(struct NCR_ESP *esp) 510 - { 511 - enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]); 512 - } 513 - 514 - static int dma_irq_p(struct NCR_ESP *esp) 515 - { 516 - return (esp->eregs->esp_status & ESP_STAT_INTR); 517 - } 518 - 519 - static int dma_ports_p(struct NCR_ESP *esp) 520 - { 521 - /* 522 - * FIXME: what's this good for? 523 - */ 524 - return 1; 525 - } 526 - 527 - static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write) 528 - { 529 - /* 530 - * DMA_ST_WRITE means "move data from device to memory" 531 - * so when (write) is true, it actually means READ! 532 - */ 533 - if (write) 534 - dma_init_read(esp, addr, count); 535 - else 536 - dma_init_write(esp, addr, count); 537 - } 538 - 539 - static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp) 540 - { 541 - sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); 542 - } 543 - 544 - static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp) 545 - { 546 - int sz = sp->SCp.buffers_residual; 547 - struct scatterlist *sg = sp->SCp.buffer; 548 - 549 - while (sz >= 0) { 550 - sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset; 551 - sz--; 552 - } 553 - sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); 554 - } 555 - 556 - static void dma_advance_sg(struct scsi_cmnd * sp) 557 - { 558 - sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); 559 - } 560 - 561 - static void pmaz_dma_drain(struct NCR_ESP *esp) 562 - { 563 - memcpy(phys_to_virt(esp_virt_buffer), 564 - (void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM + 565 - ESP_TGT_DMA_SIZE), 566 - scsi_current_length); 567 - } 568 - 569 - static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length) 570 - { 571 - volatile u32 *dmareg = 572 - (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG); 573 - 574 - if (length > ESP_TGT_DMA_SIZE) 575 - length = ESP_TGT_DMA_SIZE; 576 - 577 - *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE); 578 - 579 - iob(); 580 - 581 - esp_virt_buffer = vaddress; 582 - scsi_current_length = length; 583 - } 584 - 585 - static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length) 586 - { 587 - volatile u32 *dmareg = 588 - (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG); 589 - 590 - memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM + 591 - ESP_TGT_DMA_SIZE), 592 - phys_to_virt(vaddress), length); 593 - 594 - wmb(); 595 - *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE); 596 - 597 - iob(); 598 - } 599 - 600 - static void pmaz_dma_ints_off(struct NCR_ESP *esp) 601 - { 602 - } 603 - 604 - static void pmaz_dma_ints_on(struct NCR_ESP *esp) 605 - { 606 - } 607 - 608 - static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write) 609 - { 610 - /* 611 - * DMA_ST_WRITE means "move data from device to memory" 612 - * so when (write) is true, it actually means READ! 613 - */ 614 - if (write) 615 - pmaz_dma_init_read(esp, addr, count); 616 - else 617 - pmaz_dma_init_write(esp, addr, count); 618 - } 619 - 620 - static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp) 621 - { 622 - sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); 623 - } 624 - 625 - 626 - #ifdef CONFIG_TC 627 - static int __init dec_esp_tc_probe(struct device *dev); 628 - static int __exit dec_esp_tc_remove(struct device *dev); 629 - 630 - static const struct tc_device_id dec_esp_tc_table[] = { 631 - { "DEC ", "PMAZ-AA " }, 632 - { } 633 - }; 634 - MODULE_DEVICE_TABLE(tc, dec_esp_tc_table); 635 - 636 - static struct tc_driver dec_esp_tc_driver = { 637 - .id_table = dec_esp_tc_table, 638 - .driver = { 639 - .name = "dec_esp", 640 - .bus = &tc_bus_type, 641 - .probe = dec_esp_tc_probe, 642 - .remove = __exit_p(dec_esp_tc_remove), 643 - }, 644 - }; 645 - 646 - static int __init dec_esp_tc_probe(struct device *dev) 647 - { 648 - int status = dec_esp_probe(dev); 649 - if (!status) 650 - get_device(dev); 651 - return status; 652 - } 653 - 654 - static int __exit dec_esp_tc_remove(struct device *dev) 655 - { 656 - put_device(dev); 657 - dec_esp_remove(dev); 658 - return 0; 659 - } 660 - #endif 661 - 662 - static int __init dec_esp_init(void) 663 - { 664 - int status; 665 - 666 - status = tc_register_driver(&dec_esp_tc_driver); 667 - if (!status) 668 - dec_esp_platform_probe(); 669 - 670 - if (nesps) { 671 - pr_info("ESP: Total of %d ESP hosts found, " 672 - "%d actually in use.\n", nesps, esps_in_use); 673 - esps_running = esps_in_use; 674 - } 675 - 676 - return status; 677 - } 678 - 679 - static void __exit dec_esp_exit(void) 680 - { 681 - dec_esp_platform_remove(); 682 - tc_unregister_driver(&dec_esp_tc_driver); 683 - } 684 - 685 - 686 - module_init(dec_esp_init); 687 - module_exit(dec_esp_exit);
-421
drivers/scsi/fastlane.c
··· 1 - /* fastlane.c: Driver for Phase5's Fastlane SCSI Controller. 2 - * 3 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 4 - * 5 - * This driver is based on the CyberStorm driver, hence the occasional 6 - * reference to CyberStorm. 7 - * 8 - * Betatesting & crucial adjustments by 9 - * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz) 10 - * 11 - */ 12 - 13 - /* TODO: 14 - * 15 - * o According to the doc from laire, it is required to reset the DMA when 16 - * the transfer is done. ATM we reset DMA just before every new 17 - * dma_init_(read|write). 18 - * 19 - * 1) Figure out how to make a cleaner merge with the sparc driver with regard 20 - * to the caches and the Sparc MMU mapping. 21 - * 2) Make as few routines required outside the generic driver. A lot of the 22 - * routines in this file used to be inline! 23 - */ 24 - 25 - #include <linux/module.h> 26 - 27 - #include <linux/init.h> 28 - #include <linux/kernel.h> 29 - #include <linux/delay.h> 30 - #include <linux/types.h> 31 - #include <linux/string.h> 32 - #include <linux/slab.h> 33 - #include <linux/blkdev.h> 34 - #include <linux/proc_fs.h> 35 - #include <linux/stat.h> 36 - #include <linux/interrupt.h> 37 - 38 - #include "scsi.h" 39 - #include <scsi/scsi_host.h> 40 - #include "NCR53C9x.h" 41 - 42 - #include <linux/zorro.h> 43 - #include <asm/irq.h> 44 - 45 - #include <asm/amigaints.h> 46 - #include <asm/amigahw.h> 47 - 48 - #include <asm/pgtable.h> 49 - 50 - /* Such day has just come... */ 51 - #if 0 52 - /* Let this defined unless you really need to enable DMA IRQ one day */ 53 - #define NODMAIRQ 54 - #endif 55 - 56 - /* The controller registers can be found in the Z2 config area at these 57 - * offsets: 58 - */ 59 - #define FASTLANE_ESP_ADDR 0x1000001 60 - #define FASTLANE_DMA_ADDR 0x1000041 61 - 62 - 63 - /* The Fastlane DMA interface */ 64 - struct fastlane_dma_registers { 65 - volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */ 66 - #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */ 67 - unsigned char dmapad1[0x3f]; 68 - volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */ 69 - }; 70 - 71 - 72 - /* DMA status bits */ 73 - #define FASTLANE_DMA_MINT 0x80 74 - #define FASTLANE_DMA_IACT 0x40 75 - #define FASTLANE_DMA_CREQ 0x20 76 - 77 - /* DMA control bits */ 78 - #define FASTLANE_DMA_FCODE 0xa0 79 - #define FASTLANE_DMA_MASK 0xf3 80 - #define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */ 81 - #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */ 82 - #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */ 83 - #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */ 84 - #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */ 85 - 86 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 87 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 88 - static void dma_dump_state(struct NCR_ESP *esp); 89 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length); 90 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length); 91 - static void dma_ints_off(struct NCR_ESP *esp); 92 - static void dma_ints_on(struct NCR_ESP *esp); 93 - static int dma_irq_p(struct NCR_ESP *esp); 94 - static void dma_irq_exit(struct NCR_ESP *esp); 95 - static void dma_led_off(struct NCR_ESP *esp); 96 - static void dma_led_on(struct NCR_ESP *esp); 97 - static int dma_ports_p(struct NCR_ESP *esp); 98 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 99 - 100 - static unsigned char ctrl_data = 0; /* Keep backup of the stuff written 101 - * to ctrl_reg. Always write a copy 102 - * to this register when writing to 103 - * the hardware register! 104 - */ 105 - 106 - static volatile unsigned char cmd_buffer[16]; 107 - /* This is where all commands are put 108 - * before they are transferred to the ESP chip 109 - * via PIO. 110 - */ 111 - 112 - static inline void dma_clear(struct NCR_ESP *esp) 113 - { 114 - struct fastlane_dma_registers *dregs = 115 - (struct fastlane_dma_registers *) (esp->dregs); 116 - unsigned long *t; 117 - 118 - ctrl_data = (ctrl_data & FASTLANE_DMA_MASK); 119 - dregs->ctrl_reg = ctrl_data; 120 - 121 - t = (unsigned long *)(esp->edev); 122 - 123 - dregs->clear_strobe = 0; 124 - *t = 0 ; 125 - } 126 - 127 - /***************************************************************** Detection */ 128 - int __init fastlane_esp_detect(struct scsi_host_template *tpnt) 129 - { 130 - struct NCR_ESP *esp; 131 - struct zorro_dev *z = NULL; 132 - unsigned long address; 133 - 134 - if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) { 135 - unsigned long board = z->resource.start; 136 - if (request_mem_region(board+FASTLANE_ESP_ADDR, 137 - sizeof(struct ESP_regs), "NCR53C9x")) { 138 - /* Check if this is really a fastlane controller. The problem 139 - * is that also the cyberstorm and blizzard controllers use 140 - * this ID value. Fortunately only Fastlane maps in Z3 space 141 - */ 142 - if (board < 0x1000000) { 143 - goto err_release; 144 - } 145 - esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0); 146 - 147 - /* Do command transfer with programmed I/O */ 148 - esp->do_pio_cmds = 1; 149 - 150 - /* Required functions */ 151 - esp->dma_bytes_sent = &dma_bytes_sent; 152 - esp->dma_can_transfer = &dma_can_transfer; 153 - esp->dma_dump_state = &dma_dump_state; 154 - esp->dma_init_read = &dma_init_read; 155 - esp->dma_init_write = &dma_init_write; 156 - esp->dma_ints_off = &dma_ints_off; 157 - esp->dma_ints_on = &dma_ints_on; 158 - esp->dma_irq_p = &dma_irq_p; 159 - esp->dma_ports_p = &dma_ports_p; 160 - esp->dma_setup = &dma_setup; 161 - 162 - /* Optional functions */ 163 - esp->dma_barrier = 0; 164 - esp->dma_drain = 0; 165 - esp->dma_invalidate = 0; 166 - esp->dma_irq_entry = 0; 167 - esp->dma_irq_exit = &dma_irq_exit; 168 - esp->dma_led_on = &dma_led_on; 169 - esp->dma_led_off = &dma_led_off; 170 - esp->dma_poll = 0; 171 - esp->dma_reset = 0; 172 - 173 - /* Initialize the portBits (enable IRQs) */ 174 - ctrl_data = (FASTLANE_DMA_FCODE | 175 - #ifndef NODMAIRQ 176 - FASTLANE_DMA_EDI | 177 - #endif 178 - FASTLANE_DMA_ESI); 179 - 180 - 181 - /* SCSI chip clock */ 182 - esp->cfreq = 40000000; 183 - 184 - 185 - /* Map the physical address space into virtual kernel space */ 186 - address = (unsigned long) 187 - z_ioremap(board, z->resource.end-board+1); 188 - 189 - if(!address){ 190 - printk("Could not remap Fastlane controller memory!"); 191 - goto err_unregister; 192 - } 193 - 194 - 195 - /* The DMA registers on the Fastlane are mapped 196 - * relative to the device (i.e. in the same Zorro 197 - * I/O block). 198 - */ 199 - esp->dregs = (void *)(address + FASTLANE_DMA_ADDR); 200 - 201 - /* ESP register base */ 202 - esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR); 203 - 204 - /* Board base */ 205 - esp->edev = (void *) address; 206 - 207 - /* Set the command buffer */ 208 - esp->esp_command = cmd_buffer; 209 - esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer); 210 - 211 - esp->irq = IRQ_AMIGA_PORTS; 212 - esp->slot = board+FASTLANE_ESP_ADDR; 213 - if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 214 - "Fastlane SCSI", esp->ehost)) { 215 - printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS); 216 - goto err_unmap; 217 - } 218 - 219 - /* Controller ID */ 220 - esp->scsi_id = 7; 221 - 222 - /* We don't have a differential SCSI-bus. */ 223 - esp->diff = 0; 224 - 225 - dma_clear(esp); 226 - esp_initialize(esp); 227 - 228 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 229 - esps_running = esps_in_use; 230 - return esps_in_use; 231 - } 232 - } 233 - return 0; 234 - 235 - err_unmap: 236 - z_iounmap((void *)address); 237 - err_unregister: 238 - scsi_unregister (esp->ehost); 239 - err_release: 240 - release_mem_region(z->resource.start+FASTLANE_ESP_ADDR, 241 - sizeof(struct ESP_regs)); 242 - return 0; 243 - } 244 - 245 - 246 - /************************************************************* DMA Functions */ 247 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 248 - { 249 - /* Since the Fastlane DMA is fully dedicated to the ESP chip, 250 - * the number of bytes sent (to the ESP chip) equals the number 251 - * of bytes in the FIFO - there is no buffering in the DMA controller. 252 - * XXXX Do I read this right? It is from host to ESP, right? 253 - */ 254 - return fifo_count; 255 - } 256 - 257 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 258 - { 259 - unsigned long sz = sp->SCp.this_residual; 260 - if(sz > 0xfffc) 261 - sz = 0xfffc; 262 - return sz; 263 - } 264 - 265 - static void dma_dump_state(struct NCR_ESP *esp) 266 - { 267 - ESPLOG(("esp%d: dma -- cond_reg<%02x>\n", 268 - esp->esp_id, ((struct fastlane_dma_registers *) 269 - (esp->dregs))->cond_reg)); 270 - ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 271 - amiga_custom.intreqr, amiga_custom.intenar)); 272 - } 273 - 274 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 275 - { 276 - struct fastlane_dma_registers *dregs = 277 - (struct fastlane_dma_registers *) (esp->dregs); 278 - unsigned long *t; 279 - 280 - cache_clear(addr, length); 281 - 282 - dma_clear(esp); 283 - 284 - t = (unsigned long *)((addr & 0x00ffffff) + esp->edev); 285 - 286 - dregs->clear_strobe = 0; 287 - *t = addr; 288 - 289 - ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE; 290 - dregs->ctrl_reg = ctrl_data; 291 - } 292 - 293 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 294 - { 295 - struct fastlane_dma_registers *dregs = 296 - (struct fastlane_dma_registers *) (esp->dregs); 297 - unsigned long *t; 298 - 299 - cache_push(addr, length); 300 - 301 - dma_clear(esp); 302 - 303 - t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev)); 304 - 305 - dregs->clear_strobe = 0; 306 - *t = addr; 307 - 308 - ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) | 309 - FASTLANE_DMA_ENABLE | 310 - FASTLANE_DMA_WRITE); 311 - dregs->ctrl_reg = ctrl_data; 312 - } 313 - 314 - 315 - static void dma_ints_off(struct NCR_ESP *esp) 316 - { 317 - disable_irq(esp->irq); 318 - } 319 - 320 - static void dma_ints_on(struct NCR_ESP *esp) 321 - { 322 - enable_irq(esp->irq); 323 - } 324 - 325 - static void dma_irq_exit(struct NCR_ESP *esp) 326 - { 327 - struct fastlane_dma_registers *dregs = 328 - (struct fastlane_dma_registers *) (esp->dregs); 329 - 330 - dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI); 331 - #ifdef __mc68000__ 332 - nop(); 333 - #endif 334 - dregs->ctrl_reg = ctrl_data; 335 - } 336 - 337 - static int dma_irq_p(struct NCR_ESP *esp) 338 - { 339 - struct fastlane_dma_registers *dregs = 340 - (struct fastlane_dma_registers *) (esp->dregs); 341 - unsigned char dma_status; 342 - 343 - dma_status = dregs->cond_reg; 344 - 345 - if(dma_status & FASTLANE_DMA_IACT) 346 - return 0; /* not our IRQ */ 347 - 348 - /* Return non-zero if ESP requested IRQ */ 349 - return ( 350 - #ifndef NODMAIRQ 351 - (dma_status & FASTLANE_DMA_CREQ) && 352 - #endif 353 - (!(dma_status & FASTLANE_DMA_MINT)) && 354 - (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR)); 355 - } 356 - 357 - static void dma_led_off(struct NCR_ESP *esp) 358 - { 359 - ctrl_data &= ~FASTLANE_DMA_LED; 360 - ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; 361 - } 362 - 363 - static void dma_led_on(struct NCR_ESP *esp) 364 - { 365 - ctrl_data |= FASTLANE_DMA_LED; 366 - ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data; 367 - } 368 - 369 - static int dma_ports_p(struct NCR_ESP *esp) 370 - { 371 - return ((amiga_custom.intenar) & IF_PORTS); 372 - } 373 - 374 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 375 - { 376 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 377 - * so when (write) is true, it actually means READ! 378 - */ 379 - if(write){ 380 - dma_init_read(esp, addr, count); 381 - } else { 382 - dma_init_write(esp, addr, count); 383 - } 384 - } 385 - 386 - #define HOSTS_C 387 - 388 - int fastlane_esp_release(struct Scsi_Host *instance) 389 - { 390 - #ifdef MODULE 391 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 392 - esp_deallocate((struct NCR_ESP *)instance->hostdata); 393 - esp_release(); 394 - release_mem_region(address, sizeof(struct ESP_regs)); 395 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 396 - #endif 397 - return 1; 398 - } 399 - 400 - 401 - static struct scsi_host_template driver_template = { 402 - .proc_name = "esp-fastlane", 403 - .proc_info = esp_proc_info, 404 - .name = "Fastlane SCSI", 405 - .detect = fastlane_esp_detect, 406 - .slave_alloc = esp_slave_alloc, 407 - .slave_destroy = esp_slave_destroy, 408 - .release = fastlane_esp_release, 409 - .queuecommand = esp_queue, 410 - .eh_abort_handler = esp_abort, 411 - .eh_bus_reset_handler = esp_reset, 412 - .can_queue = 7, 413 - .this_id = 7, 414 - .sg_tablesize = SG_ALL, 415 - .cmd_per_lun = 1, 416 - .use_clustering = ENABLE_CLUSTERING 417 - }; 418 - 419 - #include "scsi_module.c" 420 - 421 - MODULE_LICENSE("GPL");
+30 -27
drivers/scsi/iscsi_tcp.c
··· 629 629 int rc; 630 630 631 631 if (tcp_conn->in.datalen) { 632 - printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n", 633 - tcp_conn->in.datalen); 632 + iscsi_conn_printk(KERN_ERR, conn, 633 + "invalid R2t with datalen %d\n", 634 + tcp_conn->in.datalen); 634 635 return ISCSI_ERR_DATALEN; 635 636 } 636 637 ··· 645 644 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 646 645 647 646 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { 648 - printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 649 - "recovery...\n", ctask->itt); 647 + iscsi_conn_printk(KERN_INFO, conn, 648 + "dropping R2T itt %d in recovery.\n", 649 + ctask->itt); 650 650 return 0; 651 651 } 652 652 ··· 657 655 r2t->exp_statsn = rhdr->statsn; 658 656 r2t->data_length = be32_to_cpu(rhdr->data_length); 659 657 if (r2t->data_length == 0) { 660 - printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); 658 + iscsi_conn_printk(KERN_ERR, conn, 659 + "invalid R2T with zero data len\n"); 661 660 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 662 661 sizeof(void*)); 663 662 return ISCSI_ERR_DATALEN; ··· 671 668 672 669 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 673 670 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { 674 - printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " 675 - "offset %u and total length %d\n", r2t->data_length, 676 - r2t->data_offset, scsi_bufflen(ctask->sc)); 671 + iscsi_conn_printk(KERN_ERR, conn, 672 + "invalid R2T with data len %u at offset %u " 673 + "and total length %d\n", r2t->data_length, 674 + r2t->data_offset, scsi_bufflen(ctask->sc)); 677 675 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 678 676 sizeof(void*)); 679 677 return ISCSI_ERR_DATALEN; ··· 740 736 /* verify PDU length */ 741 737 tcp_conn->in.datalen = ntoh24(hdr->dlength); 742 738 if (tcp_conn->in.datalen > conn->max_recv_dlength) { 743 - printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n", 744 - tcp_conn->in.datalen, conn->max_recv_dlength); 739 + iscsi_conn_printk(KERN_ERR, conn, 740 + "iscsi_tcp: datalen %d > %d\n", 741 + tcp_conn->in.datalen, conn->max_recv_dlength); 745 742 return ISCSI_ERR_DATALEN; 746 743 } 747 744 ··· 824 819 * For now we fail until we find a vendor that needs it 825 820 */ 826 821 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { 827 - printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 828 - "but conn buffer is only %u (opcode %0x)\n", 829 - tcp_conn->in.datalen, 830 - ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); 822 + iscsi_conn_printk(KERN_ERR, conn, 823 + "iscsi_tcp: received buffer of " 824 + "len %u but conn buffer is only %u " 825 + "(opcode %0x)\n", 826 + tcp_conn->in.datalen, 827 + ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); 831 828 rc = ISCSI_ERR_PROTO; 832 829 break; 833 830 } ··· 1503 1496 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1504 1497 CRYPTO_ALG_ASYNC); 1505 1498 tcp_conn->tx_hash.flags = 0; 1506 - if (IS_ERR(tcp_conn->tx_hash.tfm)) { 1507 - printk(KERN_ERR "Could not create connection due to crc32c " 1508 - "loading error %ld. Make sure the crc32c module is " 1509 - "built as a module or into the kernel\n", 1510 - PTR_ERR(tcp_conn->tx_hash.tfm)); 1499 + if (IS_ERR(tcp_conn->tx_hash.tfm)) 1511 1500 goto free_tcp_conn; 1512 - } 1513 1501 1514 1502 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1515 1503 CRYPTO_ALG_ASYNC); 1516 1504 tcp_conn->rx_hash.flags = 0; 1517 - if (IS_ERR(tcp_conn->rx_hash.tfm)) { 1518 - printk(KERN_ERR "Could not create connection due to crc32c " 1519 - "loading error %ld. Make sure the crc32c module is " 1520 - "built as a module or into the kernel\n", 1521 - PTR_ERR(tcp_conn->rx_hash.tfm)); 1505 + if (IS_ERR(tcp_conn->rx_hash.tfm)) 1522 1506 goto free_tx_tfm; 1523 - } 1524 1507 1525 1508 return cls_conn; 1526 1509 1527 1510 free_tx_tfm: 1528 1511 crypto_free_hash(tcp_conn->tx_hash.tfm); 1529 1512 free_tcp_conn: 1513 + iscsi_conn_printk(KERN_ERR, conn, 1514 + "Could not create connection due to crc32c " 1515 + "loading error. Make sure the crc32c " 1516 + "module is built as a module or into the " 1517 + "kernel\n"); 1530 1518 kfree(tcp_conn); 1531 1519 tcp_conn_alloc_fail: 1532 1520 iscsi_conn_teardown(cls_conn); ··· 1629 1627 /* lookup for existing socket */ 1630 1628 sock = sockfd_lookup((int)transport_eph, &err); 1631 1629 if (!sock) { 1632 - printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); 1630 + iscsi_conn_printk(KERN_ERR, conn, 1631 + "sockfd_lookup failed %d\n", err); 1633 1632 return -EEXIST; 1634 1633 } 1635 1634 /*
+74 -63
drivers/scsi/libiscsi.c
··· 160 160 hdr->opcode = ISCSI_OP_SCSI_CMD; 161 161 hdr->flags = ISCSI_ATTR_SIMPLE; 162 162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 163 - hdr->itt = build_itt(ctask->itt, conn->id, session->age); 163 + hdr->itt = build_itt(ctask->itt, session->age); 164 164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); 165 165 hdr->cmdsn = cpu_to_be32(session->cmdsn); 166 166 session->cmdsn++; ··· 416 416 417 417 if (datalen < 2) { 418 418 invalid_datalen: 419 - printk(KERN_ERR "iscsi: Got CHECK_CONDITION but " 420 - "invalid data buffer size of %d\n", datalen); 419 + iscsi_conn_printk(KERN_ERR, conn, 420 + "Got CHECK_CONDITION but invalid data " 421 + "buffer size of %d\n", datalen); 421 422 sc->result = DID_BAD_TARGET << 16; 422 423 goto out; 423 424 } ··· 495 494 496 495 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 497 496 if (!mtask) { 498 - printk(KERN_ERR "Could not send nopout\n"); 497 + iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 499 498 return; 500 499 } 501 500 ··· 523 522 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 524 523 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 525 524 itt = get_itt(rejected_pdu.itt); 526 - printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " 527 - "due to DataDigest error.\n", itt, 528 - rejected_pdu.opcode); 525 + iscsi_conn_printk(KERN_ERR, conn, 526 + "itt 0x%x had pdu (op 0x%x) rejected " 527 + "due to DataDigest error.\n", itt, 528 + rejected_pdu.opcode); 529 529 } 530 530 } 531 531 return 0; ··· 543 541 * queuecommand or send generic. session lock must be held and verify 544 542 * itt must have been called. 545 543 */ 546 - int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 547 - char *data, int datalen) 544 + static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 545 + char *data, int datalen) 548 546 { 549 547 struct iscsi_session *session = conn->session; 550 548 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; ··· 674 672 675 673 return rc; 676 674 } 677 - EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 678 675 679 676 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 680 677 char *data, int datalen) ··· 698 697 if (hdr->itt != RESERVED_ITT) { 699 698 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != 700 699 (session->age << ISCSI_AGE_SHIFT)) { 701 - printk(KERN_ERR "iscsi: received itt %x expected " 702 - "session age (%x)\n", (__force u32)hdr->itt, 703 - session->age & ISCSI_AGE_MASK); 700 + iscsi_conn_printk(KERN_ERR, conn, 701 + "received itt %x expected session " 702 + "age (%x)\n", (__force u32)hdr->itt, 703 + session->age & ISCSI_AGE_MASK); 704 704 return ISCSI_ERR_BAD_ITT; 705 705 } 706 706 707 - if (((__force u32)hdr->itt & ISCSI_CID_MASK) != 708 - (conn->id << ISCSI_CID_SHIFT)) { 709 - printk(KERN_ERR "iscsi: received itt %x, expected " 710 - "CID (%x)\n", (__force u32)hdr->itt, conn->id); 711 - return ISCSI_ERR_BAD_ITT; 712 - } 713 707 itt = get_itt(hdr->itt); 714 708 } else 715 709 itt = ~0U; ··· 713 717 ctask = session->cmds[itt]; 714 718 715 719 if (!ctask->sc) { 716 - printk(KERN_INFO "iscsi: dropping ctask with " 717 - "itt 0x%x\n", ctask->itt); 720 + iscsi_conn_printk(KERN_INFO, conn, "dropping ctask " 721 + "with itt 0x%x\n", ctask->itt); 718 722 /* force drop */ 719 723 return ISCSI_ERR_NO_SCSI_CMD; 720 724 } 721 725 722 726 if (ctask->sc->SCp.phase != session->age) { 723 - printk(KERN_ERR "iscsi: ctask's session age %d, " 724 - "expected %d\n", ctask->sc->SCp.phase, 725 - session->age); 727 + iscsi_conn_printk(KERN_ERR, conn, 728 + "iscsi: ctask's session age %d, " 729 + "expected %d\n", ctask->sc->SCp.phase, 730 + session->age); 726 731 return ISCSI_ERR_SESSION_FAILED; 727 732 } 728 733 } ··· 768 771 */ 769 772 nop->cmdsn = cpu_to_be32(session->cmdsn); 770 773 if (hdr->itt != RESERVED_ITT) { 771 - hdr->itt = build_itt(mtask->itt, conn->id, session->age); 774 + hdr->itt = build_itt(mtask->itt, session->age); 772 775 /* 773 776 * TODO: We always use immediate, so we never hit this. 774 777 * If we start to send tmfs or nops as non-immediate then ··· 994 997 FAILURE_SESSION_IN_RECOVERY, 995 998 FAILURE_SESSION_RECOVERY_TIMEOUT, 996 999 FAILURE_SESSION_LOGGING_OUT, 1000 + FAILURE_SESSION_NOT_READY, 997 1001 }; 998 1002 999 1003 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) ··· 1015 1017 session = iscsi_hostdata(host->hostdata); 1016 1018 spin_lock(&session->lock); 1017 1019 1020 + reason = iscsi_session_chkready(session_to_cls(session)); 1021 + if (reason) { 1022 + sc->result = reason; 1023 + goto fault; 1024 + } 1025 + 1018 1026 /* 1019 1027 * ISCSI_STATE_FAILED is a temp. state. The recovery 1020 1028 * code will decide what is best to do with command queued ··· 1037 1033 switch (session->state) { 1038 1034 case ISCSI_STATE_IN_RECOVERY: 1039 1035 reason = FAILURE_SESSION_IN_RECOVERY; 1040 - goto reject; 1036 + sc->result = DID_IMM_RETRY << 16; 1037 + break; 1041 1038 case ISCSI_STATE_LOGGING_OUT: 1042 1039 reason = FAILURE_SESSION_LOGGING_OUT; 1043 - goto reject; 1040 + sc->result = DID_IMM_RETRY << 16; 1041 + break; 1044 1042 case ISCSI_STATE_RECOVERY_FAILED: 1045 1043 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1044 + sc->result = DID_NO_CONNECT << 16; 1046 1045 break; 1047 1046 case ISCSI_STATE_TERMINATE: 1048 1047 reason = FAILURE_SESSION_TERMINATE; 1048 + sc->result = DID_NO_CONNECT << 16; 1049 1049 break; 1050 1050 default: 1051 1051 reason = FAILURE_SESSION_FREED; 1052 + sc->result = DID_NO_CONNECT << 16; 1052 1053 } 1053 1054 goto fault; 1054 1055 } ··· 1061 1052 conn = session->leadconn; 1062 1053 if (!conn) { 1063 1054 reason = FAILURE_SESSION_FREED; 1055 + sc->result = DID_NO_CONNECT << 16; 1064 1056 goto fault; 1065 1057 } 1066 1058 ··· 1101 1091 1102 1092 fault: 1103 1093 spin_unlock(&session->lock); 1104 - printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n", 1105 - sc->cmnd[0], reason); 1106 - sc->result = (DID_NO_CONNECT << 16); 1094 + debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1107 1095 scsi_set_resid(sc, scsi_bufflen(sc)); 1108 1096 sc->scsi_done(sc); 1109 1097 spin_lock(host->host_lock); ··· 1168 1160 mutex_lock(&session->eh_mutex); 1169 1161 spin_lock_bh(&session->lock); 1170 1162 if (session->state == ISCSI_STATE_LOGGED_IN) 1171 - printk(KERN_INFO "iscsi: host reset succeeded\n"); 1163 + iscsi_session_printk(KERN_INFO, session, 1164 + "host reset succeeded\n"); 1172 1165 else 1173 1166 goto failed; 1174 1167 spin_unlock_bh(&session->lock); ··· 1248 1239 * Fail commands. session lock held and recv side suspended and xmit 1249 1240 * thread flushed 1250 1241 */ 1251 - static void fail_all_commands(struct iscsi_conn *conn, unsigned lun) 1242 + static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1243 + int error) 1252 1244 { 1253 1245 struct iscsi_cmd_task *ctask, *tmp; 1254 1246 ··· 1261 1251 if (lun == ctask->sc->device->lun || lun == -1) { 1262 1252 debug_scsi("failing pending sc %p itt 0x%x\n", 1263 1253 ctask->sc, ctask->itt); 1264 - fail_command(conn, ctask, DID_BUS_BUSY << 16); 1254 + fail_command(conn, ctask, error << 16); 1265 1255 } 1266 1256 } 1267 1257 ··· 1269 1259 if (lun == ctask->sc->device->lun || lun == -1) { 1270 1260 debug_scsi("failing requeued sc %p itt 0x%x\n", 1271 1261 ctask->sc, ctask->itt); 1272 - fail_command(conn, ctask, DID_BUS_BUSY << 16); 1262 + fail_command(conn, ctask, error << 16); 1273 1263 } 1274 1264 } 1275 1265 ··· 1367 1357 last_recv = conn->last_recv; 1368 1358 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ), 1369 1359 jiffies)) { 1370 - printk(KERN_ERR "ping timeout of %d secs expired, " 1371 - "last rx %lu, last ping %lu, now %lu\n", 1372 - conn->ping_timeout, last_recv, 1373 - conn->last_ping, jiffies); 1360 + iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1361 + "expired, last rx %lu, last ping %lu, " 1362 + "now %lu\n", conn->ping_timeout, last_recv, 1363 + conn->last_ping, jiffies); 1374 1364 spin_unlock(&session->lock); 1375 1365 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1376 1366 return; ··· 1383 1373 iscsi_send_nopout(conn, NULL); 1384 1374 } 1385 1375 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ); 1386 - } else { 1376 + } else 1387 1377 next_timeout = last_recv + timeout; 1388 - } 1389 1378 1390 - if (next_timeout) { 1391 - debug_scsi("Setting next tmo %lu\n", next_timeout); 1392 - mod_timer(&conn->transport_timer, next_timeout); 1393 - } 1379 + debug_scsi("Setting next tmo %lu\n", next_timeout); 1380 + mod_timer(&conn->transport_timer, next_timeout); 1394 1381 done: 1395 1382 spin_unlock(&session->lock); 1396 1383 } ··· 1580 1573 /* need to grab the recv lock then session lock */ 1581 1574 write_lock_bh(conn->recv_lock); 1582 1575 spin_lock(&session->lock); 1583 - fail_all_commands(conn, sc->device->lun); 1576 + fail_all_commands(conn, sc->device->lun, DID_ERROR); 1584 1577 conn->tmf_state = TMF_INITIAL; 1585 1578 spin_unlock(&session->lock); 1586 1579 write_unlock_bh(conn->recv_lock); ··· 1951 1944 } 1952 1945 spin_unlock_irqrestore(session->host->host_lock, flags); 1953 1946 msleep_interruptible(500); 1954 - printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d " 1955 - "host_failed %d\n", session->host->host_busy, 1956 - session->host->host_failed); 1947 + iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " 1948 + "host_busy %d host_failed %d\n", 1949 + session->host->host_busy, 1950 + session->host->host_failed); 1957 1951 /* 1958 1952 * force eh_abort() to unblock 1959 1953 */ ··· 1983 1975 struct iscsi_session *session = conn->session; 1984 1976 1985 1977 if (!session) { 1986 - printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1978 + iscsi_conn_printk(KERN_ERR, conn, 1979 + "can't start unbound connection\n"); 1987 1980 return -EPERM; 1988 1981 } 1989 1982 1990 1983 if ((session->imm_data_en || !session->initial_r2t_en) && 1991 1984 session->first_burst > session->max_burst) { 1992 - printk("iscsi: invalid burst lengths: " 1993 - "first_burst %d max_burst %d\n", 1994 - session->first_burst, session->max_burst); 1985 + iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " 1986 + "first_burst %d max_burst %d\n", 1987 + session->first_burst, session->max_burst); 1995 1988 return -EINVAL; 1996 1989 } 1997 1990 1998 1991 if (conn->ping_timeout && !conn->recv_timeout) { 1999 - printk(KERN_ERR "iscsi: invalid recv timeout of zero " 2000 - "Using 5 seconds\n."); 1992 + iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " 1993 + "zero. Using 5 seconds\n."); 2001 1994 conn->recv_timeout = 5; 2002 1995 } 2003 1996 2004 1997 if (conn->recv_timeout && !conn->ping_timeout) { 2005 - printk(KERN_ERR "iscsi: invalid ping timeout of zero " 2006 - "Using 5 seconds.\n"); 1998 + iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " 1999 + "zero. Using 5 seconds.\n"); 2007 2000 conn->ping_timeout = 5; 2008 2001 } 2009 2002 ··· 2028 2019 conn->stop_stage = 0; 2029 2020 conn->tmf_state = TMF_INITIAL; 2030 2021 session->age++; 2031 - spin_unlock_bh(&session->lock); 2032 - 2033 - iscsi_unblock_session(session_to_cls(session)); 2034 - wake_up(&conn->ehwait); 2035 - return 0; 2022 + if (session->age == 16) 2023 + session->age = 0; 2024 + break; 2036 2025 case STOP_CONN_TERM: 2037 2026 conn->stop_stage = 0; 2038 2027 break; ··· 2039 2032 } 2040 2033 spin_unlock_bh(&session->lock); 2041 2034 2035 + iscsi_unblock_session(session_to_cls(session)); 2036 + wake_up(&conn->ehwait); 2042 2037 return 0; 2043 2038 } 2044 2039 EXPORT_SYMBOL_GPL(iscsi_conn_start); ··· 2132 2123 * flush queues. 2133 2124 */ 2134 2125 spin_lock_bh(&session->lock); 2135 - fail_all_commands(conn, -1); 2126 + fail_all_commands(conn, -1, 2127 + STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR); 2136 2128 flush_control_queues(session, conn); 2137 2129 spin_unlock_bh(&session->lock); 2138 2130 mutex_unlock(&session->eh_mutex); ··· 2150 2140 iscsi_start_session_recovery(session, conn, flag); 2151 2141 break; 2152 2142 default: 2153 - printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag); 2143 + iscsi_conn_printk(KERN_ERR, conn, 2144 + "invalid stop flag %d\n", flag); 2154 2145 } 2155 2146 } 2156 2147 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
-751
drivers/scsi/mac_esp.c
··· 1 - /* 2 - * 68k mac 53c9[46] scsi driver 3 - * 4 - * copyright (c) 1998, David Weis weisd3458@uni.edu 5 - * 6 - * debugging on Quadra 800 and 660AV Michael Schmitz, Dave Kilzer 7/98 7 - * 8 - * based loosely on cyber_esp.c 9 - */ 10 - 11 - /* these are unused for now */ 12 - #define myreadl(addr) (*(volatile unsigned int *) (addr)) 13 - #define mywritel(b, addr) ((*(volatile unsigned int *) (addr)) = (b)) 14 - 15 - 16 - #include <linux/kernel.h> 17 - #include <linux/delay.h> 18 - #include <linux/types.h> 19 - #include <linux/ctype.h> 20 - #include <linux/string.h> 21 - #include <linux/slab.h> 22 - #include <linux/blkdev.h> 23 - #include <linux/proc_fs.h> 24 - #include <linux/stat.h> 25 - #include <linux/init.h> 26 - #include <linux/interrupt.h> 27 - 28 - #include "scsi.h" 29 - #include <scsi/scsi_host.h> 30 - #include "NCR53C9x.h" 31 - 32 - #include <asm/io.h> 33 - 34 - #include <asm/setup.h> 35 - #include <asm/irq.h> 36 - #include <asm/macints.h> 37 - #include <asm/machw.h> 38 - #include <asm/mac_via.h> 39 - 40 - #include <asm/pgtable.h> 41 - 42 - #include <asm/macintosh.h> 43 - 44 - /* #define DEBUG_MAC_ESP */ 45 - 46 - extern void esp_handle(struct NCR_ESP *esp); 47 - extern void mac_esp_intr(int irq, void *dev_id); 48 - 49 - static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count); 50 - static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd *sp); 51 - static void dma_dump_state(struct NCR_ESP * esp); 52 - static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length); 53 - static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length); 54 - static void dma_ints_off(struct NCR_ESP * esp); 55 - static void dma_ints_on(struct NCR_ESP * esp); 56 - static int dma_irq_p(struct NCR_ESP * esp); 57 - static int dma_irq_p_quick(struct NCR_ESP * esp); 58 - static void dma_led_off(struct NCR_ESP * esp); 59 - static void dma_led_on(struct NCR_ESP *esp); 60 - static int dma_ports_p(struct NCR_ESP *esp); 61 - static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write); 62 - static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write); 63 - 64 - static int esp_dafb_dma_irq_p(struct NCR_ESP * espdev); 65 - static int esp_iosb_dma_irq_p(struct NCR_ESP * espdev); 66 - 67 - static volatile unsigned char cmd_buffer[16]; 68 - /* This is where all commands are put 69 - * before they are transferred to the ESP chip 70 - * via PIO. 71 - */ 72 - 73 - static int esp_initialized = 0; 74 - 75 - static int setup_num_esps = -1; 76 - static int setup_disconnect = -1; 77 - static int setup_nosync = -1; 78 - static int setup_can_queue = -1; 79 - static int setup_cmd_per_lun = -1; 80 - static int setup_sg_tablesize = -1; 81 - #ifdef SUPPORT_TAGS 82 - static int setup_use_tagged_queuing = -1; 83 - #endif 84 - static int setup_hostid = -1; 85 - 86 - /* 87 - * Experimental ESP inthandler; check macints.c to make sure dev_id is 88 - * set up properly! 89 - */ 90 - 91 - void mac_esp_intr(int irq, void *dev_id) 92 - { 93 - struct NCR_ESP *esp = (struct NCR_ESP *) dev_id; 94 - int irq_p = 0; 95 - 96 - /* Handle the one ESP interrupt showing at this IRQ level. */ 97 - if(((esp)->irq & 0xff) == irq) { 98 - /* 99 - * Debug .. 100 - */ 101 - irq_p = esp->dma_irq_p(esp); 102 - printk("mac_esp: irq_p %x current %p disconnected %p\n", 103 - irq_p, esp->current_SC, esp->disconnected_SC); 104 - 105 - /* 106 - * Mac: if we're here, it's an ESP interrupt for sure! 107 - */ 108 - if((esp->current_SC || esp->disconnected_SC)) { 109 - esp->dma_ints_off(esp); 110 - 111 - ESPIRQ(("I%d(", esp->esp_id)); 112 - esp_handle(esp); 113 - ESPIRQ((")")); 114 - 115 - esp->dma_ints_on(esp); 116 - } 117 - } 118 - } 119 - 120 - /* 121 - * Debug hooks; use for playing with the interrupt flag testing and interrupt 122 - * acknowledge on the various machines 123 - */ 124 - 125 - void scsi_esp_polled(int irq, void *dev_id) 126 - { 127 - if (esp_initialized == 0) 128 - return; 129 - 130 - mac_esp_intr(irq, dev_id); 131 - } 132 - 133 - void fake_intr(int irq, void *dev_id) 134 - { 135 - #ifdef DEBUG_MAC_ESP 136 - printk("mac_esp: got irq\n"); 137 - #endif 138 - 139 - mac_esp_intr(irq, dev_id); 140 - } 141 - 142 - irqreturn_t fake_drq(int irq, void *dev_id) 143 - { 144 - printk("mac_esp: got drq\n"); 145 - return IRQ_HANDLED; 146 - } 147 - 148 - #define DRIVER_SETUP 149 - 150 - /* 151 - * Function : mac_esp_setup(char *str) 152 - * 153 - * Purpose : booter command line initialization of the overrides array, 154 - * 155 - * Inputs : str - parameters, separated by commas. 156 - * 157 - * Currently unused in the new driver; need to add settable parameters to the 158 - * detect function. 159 - * 160 - */ 161 - 162 - static int __init mac_esp_setup(char *str) { 163 - #ifdef DRIVER_SETUP 164 - /* Format of mac53c9x parameter is: 165 - * mac53c9x=<num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> 166 - * Negative values mean don't change. 167 - */ 168 - 169 - char *this_opt; 170 - long opt; 171 - 172 - this_opt = strsep (&str, ","); 173 - if(this_opt) { 174 - opt = simple_strtol( this_opt, NULL, 0 ); 175 - 176 - if (opt >= 0 && opt <= 2) 177 - setup_num_esps = opt; 178 - else if (opt > 2) 179 - printk( "mac_esp_setup: invalid number of hosts %ld !\n", opt ); 180 - 181 - this_opt = strsep (&str, ","); 182 - } 183 - if(this_opt) { 184 - opt = simple_strtol( this_opt, NULL, 0 ); 185 - 186 - if (opt > 0) 187 - setup_disconnect = opt; 188 - 189 - this_opt = strsep (&str, ","); 190 - } 191 - if(this_opt) { 192 - opt = simple_strtol( this_opt, NULL, 0 ); 193 - 194 - if (opt >= 0) 195 - setup_nosync = opt; 196 - 197 - this_opt = strsep (&str, ","); 198 - } 199 - if(this_opt) { 200 - opt = simple_strtol( this_opt, NULL, 0 ); 201 - 202 - if (opt > 0) 203 - setup_can_queue = opt; 204 - 205 - this_opt = strsep (&str, ","); 206 - } 207 - if(this_opt) { 208 - opt = simple_strtol( this_opt, NULL, 0 ); 209 - 210 - if (opt > 0) 211 - setup_cmd_per_lun = opt; 212 - 213 - this_opt = strsep (&str, ","); 214 - } 215 - if(this_opt) { 216 - opt = simple_strtol( this_opt, NULL, 0 ); 217 - 218 - if (opt >= 0) { 219 - setup_sg_tablesize = opt; 220 - /* Must be <= SG_ALL (255) */ 221 - if (setup_sg_tablesize > SG_ALL) 222 - setup_sg_tablesize = SG_ALL; 223 - } 224 - 225 - this_opt = strsep (&str, ","); 226 - } 227 - if(this_opt) { 228 - opt = simple_strtol( this_opt, NULL, 0 ); 229 - 230 - /* Must be between 0 and 7 */ 231 - if (opt >= 0 && opt <= 7) 232 - setup_hostid = opt; 233 - else if (opt > 7) 234 - printk( "mac_esp_setup: invalid host ID %ld !\n", opt); 235 - 236 - this_opt = strsep (&str, ","); 237 - } 238 - #ifdef SUPPORT_TAGS 239 - if(this_opt) { 240 - opt = simple_strtol( this_opt, NULL, 0 ); 241 - if (opt >= 0) 242 - setup_use_tagged_queuing = !!opt; 243 - } 244 - #endif 245 - #endif 246 - return 1; 247 - } 248 - 249 - __setup("mac53c9x=", mac_esp_setup); 250 - 251 - 252 - /* 253 - * ESP address 'detection' 254 - */ 255 - 256 - unsigned long get_base(int chip_num) 257 - { 258 - /* 259 - * using the chip_num and mac model, figure out where the 260 - * chips are mapped 261 - */ 262 - 263 - unsigned long io_base = 0x50f00000; 264 - unsigned int second_offset = 0x402; 265 - unsigned long scsi_loc = 0; 266 - 267 - switch (macintosh_config->scsi_type) { 268 - 269 - /* 950, 900, 700 */ 270 - case MAC_SCSI_QUADRA2: 271 - scsi_loc = io_base + 0xf000 + ((chip_num == 0) ? 0 : second_offset); 272 - break; 273 - 274 - /* av's */ 275 - case MAC_SCSI_QUADRA3: 276 - scsi_loc = io_base + 0x18000 + ((chip_num == 0) ? 0 : second_offset); 277 - break; 278 - 279 - /* most quadra/centris models are like this */ 280 - case MAC_SCSI_QUADRA: 281 - scsi_loc = io_base + 0x10000; 282 - break; 283 - 284 - default: 285 - printk("mac_esp: get_base: hit default!\n"); 286 - scsi_loc = io_base + 0x10000; 287 - break; 288 - 289 - } /* switch */ 290 - 291 - printk("mac_esp: io base at 0x%lx\n", scsi_loc); 292 - 293 - return scsi_loc; 294 - } 295 - 296 - /* 297 - * Model dependent ESP setup 298 - */ 299 - 300 - int mac_esp_detect(struct scsi_host_template * tpnt) 301 - { 302 - int quick = 0; 303 - int chipnum, chipspresent = 0; 304 - #if 0 305 - unsigned long timeout; 306 - #endif 307 - 308 - if (esp_initialized > 0) 309 - return -ENODEV; 310 - 311 - /* what do we have in this machine... */ 312 - if (MACHW_PRESENT(MAC_SCSI_96)) { 313 - chipspresent ++; 314 - } 315 - 316 - if (MACHW_PRESENT(MAC_SCSI_96_2)) { 317 - chipspresent ++; 318 - } 319 - 320 - /* number of ESPs present ? */ 321 - if (setup_num_esps >= 0) { 322 - if (chipspresent >= setup_num_esps) 323 - chipspresent = setup_num_esps; 324 - else 325 - printk("mac_esp_detect: num_hosts detected %d setup %d \n", 326 - chipspresent, setup_num_esps); 327 - } 328 - 329 - /* TODO: add disconnect / nosync flags */ 330 - 331 - /* setup variables */ 332 - tpnt->can_queue = 333 - (setup_can_queue > 0) ? setup_can_queue : 7; 334 - tpnt->cmd_per_lun = 335 - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : 1; 336 - tpnt->sg_tablesize = 337 - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_ALL; 338 - 339 - if (setup_hostid >= 0) 340 - tpnt->this_id = setup_hostid; 341 - else { 342 - /* use 7 as default */ 343 - tpnt->this_id = 7; 344 - } 345 - 346 - #ifdef SUPPORT_TAGS 347 - if (setup_use_tagged_queuing < 0) 348 - setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING; 349 - #endif 350 - 351 - for (chipnum = 0; chipnum < chipspresent; chipnum ++) { 352 - struct NCR_ESP * esp; 353 - 354 - esp = esp_allocate(tpnt, NULL, 0); 355 - esp->eregs = (struct ESP_regs *) get_base(chipnum); 356 - 357 - esp->dma_irq_p = &esp_dafb_dma_irq_p; 358 - if (chipnum == 0) { 359 - 360 - if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) { 361 - /* most machines except those below :-) */ 362 - quick = 1; 363 - esp->dma_irq_p = &esp_iosb_dma_irq_p; 364 - } else if (macintosh_config->scsi_type == MAC_SCSI_QUADRA3) { 365 - /* mostly av's */ 366 - quick = 0; 367 - } else { 368 - /* q950, 900, 700 */ 369 - quick = 1; 370 - out_be32(0xf9800024, 0x1d1); 371 - esp->dregs = (void *) 0xf9800024; 372 - } 373 - 374 - } else { /* chipnum */ 375 - 376 - quick = 1; 377 - out_be32(0xf9800028, 0x1d1); 378 - esp->dregs = (void *) 0xf9800028; 379 - 380 - } /* chipnum == 0 */ 381 - 382 - /* use pio for command bytes; pio for message/data: TBI */ 383 - esp->do_pio_cmds = 1; 384 - 385 - /* Set the command buffer */ 386 - esp->esp_command = (volatile unsigned char*) cmd_buffer; 387 - esp->esp_command_dvma = (__u32) cmd_buffer; 388 - 389 - /* various functions */ 390 - esp->dma_bytes_sent = &dma_bytes_sent; 391 - esp->dma_can_transfer = &dma_can_transfer; 392 - esp->dma_dump_state = &dma_dump_state; 393 - esp->dma_init_read = NULL; 394 - esp->dma_init_write = NULL; 395 - esp->dma_ints_off = &dma_ints_off; 396 - esp->dma_ints_on = &dma_ints_on; 397 - 398 - esp->dma_ports_p = &dma_ports_p; 399 - 400 - 401 - /* Optional functions */ 402 - esp->dma_barrier = NULL; 403 - esp->dma_drain = NULL; 404 - esp->dma_invalidate = NULL; 405 - esp->dma_irq_entry = NULL; 406 - esp->dma_irq_exit = NULL; 407 - esp->dma_led_on = NULL; 408 - esp->dma_led_off = NULL; 409 - esp->dma_poll = NULL; 410 - esp->dma_reset = NULL; 411 - 412 - /* SCSI chip speed */ 413 - /* below esp->cfreq = 40000000; */ 414 - 415 - 416 - if (quick) { 417 - /* 'quick' means there's handshake glue logic like in the 5380 case */ 418 - esp->dma_setup = &dma_setup_quick; 419 - } else { 420 - esp->dma_setup = &dma_setup; 421 - } 422 - 423 - if (chipnum == 0) { 424 - 425 - esp->irq = IRQ_MAC_SCSI; 426 - 427 - request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost); 428 - #if 0 /* conflicts with IOP ADB */ 429 - request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost); 430 - #endif 431 - 432 - if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) { 433 - esp->cfreq = 16500000; 434 - } else { 435 - esp->cfreq = 25000000; 436 - } 437 - 438 - 439 - } else { /* chipnum == 1 */ 440 - 441 - esp->irq = IRQ_MAC_SCSIDRQ; 442 - #if 0 /* conflicts with IOP ADB */ 443 - request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost); 444 - #endif 445 - 446 - esp->cfreq = 25000000; 447 - 448 - } 449 - 450 - if (quick) { 451 - printk("esp: using quick version\n"); 452 - } 453 - 454 - printk("esp: addr at 0x%p\n", esp->eregs); 455 - 456 - esp->scsi_id = 7; 457 - esp->diff = 0; 458 - 459 - esp_initialize(esp); 460 - 461 - } /* for chipnum */ 462 - 463 - if (chipspresent) 464 - printk("\nmac_esp: %d esp controllers found\n", chipspresent); 465 - 466 - esp_initialized = chipspresent; 467 - 468 - return chipspresent; 469 - } 470 - 471 - static int mac_esp_release(struct Scsi_Host *shost) 472 - { 473 - if (shost->irq) 474 - free_irq(shost->irq, NULL); 475 - if (shost->io_port && shost->n_io_port) 476 - release_region(shost->io_port, shost->n_io_port); 477 - scsi_unregister(shost); 478 - return 0; 479 - } 480 - 481 - /* 482 - * I've been wondering what this is supposed to do, for some time. Talking 483 - * to Allen Briggs: These machines have an extra register someplace where the 484 - * DRQ pin of the ESP can be monitored. That isn't useful for determining 485 - * anything else (such as reselect interrupt or other magic) though. 486 - * Maybe make the semantics should be changed like 487 - * if (esp->current_SC) 488 - * ... check DRQ flag ... 489 - * else 490 - * ... disconnected, check pending VIA interrupt ... 491 - * 492 - * There's a problem with using the dabf flag or mac_irq_pending() here: both 493 - * seem to return 1 even though no interrupt is currently pending, resulting 494 - * in esp_exec_cmd() holding off the next command, and possibly infinite loops 495 - * in esp_intr(). 496 - * Short term fix: just use esp_status & ESP_STAT_INTR here, as long as we 497 - * use simple PIO. The DRQ status will be important when implementing pseudo 498 - * DMA mode (set up ESP transfer count, return, do a batch of bytes in PIO or 499 - * 'hardware handshake' mode upon DRQ). 500 - * If you plan on changing this (i.e. to save the esp_status register access in 501 - * favor of a VIA register access or a shadow register for the IFR), make sure 502 - * to try a debug version of this first to monitor what registers would be a good 503 - * indicator of the ESP interrupt. 504 - */ 505 - 506 - static int esp_dafb_dma_irq_p(struct NCR_ESP * esp) 507 - { 508 - unsigned int ret; 509 - int sreg = esp_read(esp->eregs->esp_status); 510 - 511 - #ifdef DEBUG_MAC_ESP 512 - printk("mac_esp: esp_dafb_dma_irq_p dafb %d irq %d\n", 513 - readl(esp->dregs), mac_irq_pending(IRQ_MAC_SCSI)); 514 - #endif 515 - 516 - sreg &= ESP_STAT_INTR; 517 - 518 - /* 519 - * maybe working; this is essentially what's used for iosb_dma_irq_p 520 - */ 521 - if (sreg) 522 - return 1; 523 - else 524 - return 0; 525 - 526 - /* 527 - * didn't work ... 528 - */ 529 - #if 0 530 - if (esp->current_SC) 531 - ret = readl(esp->dregs) & 0x200; 532 - else if (esp->disconnected_SC) 533 - ret = 1; /* sreg ?? */ 534 - else 535 - ret = mac_irq_pending(IRQ_MAC_SCSI); 536 - 537 - return(ret); 538 - #endif 539 - 540 - } 541 - 542 - /* 543 - * See above: testing mac_irq_pending always returned 8 (SCSI IRQ) regardless 544 - * of the actual ESP status. 545 - */ 546 - 547 - static int esp_iosb_dma_irq_p(struct NCR_ESP * esp) 548 - { 549 - int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ); 550 - int sreg = esp_read(esp->eregs->esp_status); 551 - 552 - #ifdef DEBUG_MAC_ESP 553 - printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n", 554 - mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI), 555 - sreg, esp->current_SC, esp->disconnected_SC); 556 - #endif 557 - 558 - sreg &= ESP_STAT_INTR; 559 - 560 - if (sreg) 561 - return (sreg); 562 - else 563 - return 0; 564 - } 565 - 566 - /* 567 - * This seems to be OK for PIO at least ... usually 0 after PIO. 568 - */ 569 - 570 - static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count) 571 - { 572 - 573 - #ifdef DEBUG_MAC_ESP 574 - printk("mac_esp: dma bytes sent = %x\n", fifo_count); 575 - #endif 576 - 577 - return fifo_count; 578 - } 579 - 580 - /* 581 - * dma_can_transfer is used to switch between DMA and PIO, if DMA (pseudo) 582 - * is ever implemented. Returning 0 here will use PIO. 583 - */ 584 - 585 - static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd * sp) 586 - { 587 - unsigned long sz = sp->SCp.this_residual; 588 - #if 0 /* no DMA yet; make conditional */ 589 - if (sz > 0x10000000) { 590 - sz = 0x10000000; 591 - } 592 - printk("mac_esp: dma can transfer = 0lx%x\n", sz); 593 - #else 594 - 595 - #ifdef DEBUG_MAC_ESP 596 - printk("mac_esp: pio to transfer = %ld\n", sz); 597 - #endif 598 - 599 - sz = 0; 600 - #endif 601 - return sz; 602 - } 603 - 604 - /* 605 - * Not yet ... 606 - */ 607 - 608 - static void dma_dump_state(struct NCR_ESP * esp) 609 - { 610 - #ifdef DEBUG_MAC_ESP 611 - printk("mac_esp: dma_dump_state: called\n"); 612 - #endif 613 - #if 0 614 - ESPLOG(("esp%d: dma -- cond_reg<%02x>\n", 615 - esp->esp_id, ((struct mac_dma_registers *) 616 - (esp->dregs))->cond_reg)); 617 - #endif 618 - } 619 - 620 - /* 621 - * DMA setup: should be used to set up the ESP transfer count for pseudo 622 - * DMA transfers; need a DRQ transfer function to do the actual transfer 623 - */ 624 - 625 - static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length) 626 - { 627 - printk("mac_esp: dma_init_read\n"); 628 - } 629 - 630 - 631 - static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length) 632 - { 633 - printk("mac_esp: dma_init_write\n"); 634 - } 635 - 636 - 637 - static void dma_ints_off(struct NCR_ESP * esp) 638 - { 639 - disable_irq(esp->irq); 640 - } 641 - 642 - 643 - static void dma_ints_on(struct NCR_ESP * esp) 644 - { 645 - enable_irq(esp->irq); 646 - } 647 - 648 - /* 649 - * generic dma_irq_p(), unused 650 - */ 651 - 652 - static int dma_irq_p(struct NCR_ESP * esp) 653 - { 654 - int i = esp_read(esp->eregs->esp_status); 655 - 656 - #ifdef DEBUG_MAC_ESP 657 - printk("mac_esp: dma_irq_p status %d\n", i); 658 - #endif 659 - 660 - return (i & ESP_STAT_INTR); 661 - } 662 - 663 - static int dma_irq_p_quick(struct NCR_ESP * esp) 664 - { 665 - /* 666 - * Copied from iosb_dma_irq_p() 667 - */ 668 - int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ); 669 - int sreg = esp_read(esp->eregs->esp_status); 670 - 671 - #ifdef DEBUG_MAC_ESP 672 - printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n", 673 - mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI), 674 - sreg, esp->current_SC, esp->disconnected_SC); 675 - #endif 676 - 677 - sreg &= ESP_STAT_INTR; 678 - 679 - if (sreg) 680 - return (sreg); 681 - else 682 - return 0; 683 - 684 - } 685 - 686 - static void dma_led_off(struct NCR_ESP * esp) 687 - { 688 - #ifdef DEBUG_MAC_ESP 689 - printk("mac_esp: dma_led_off: called\n"); 690 - #endif 691 - } 692 - 693 - 694 - static void dma_led_on(struct NCR_ESP * esp) 695 - { 696 - #ifdef DEBUG_MAC_ESP 697 - printk("mac_esp: dma_led_on: called\n"); 698 - #endif 699 - } 700 - 701 - 702 - static int dma_ports_p(struct NCR_ESP * esp) 703 - { 704 - return 0; 705 - } 706 - 707 - 708 - static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write) 709 - { 710 - 711 - #ifdef DEBUG_MAC_ESP 712 - printk("mac_esp: dma_setup\n"); 713 - #endif 714 - 715 - if (write) { 716 - dma_init_read(esp, (char *) addr, count); 717 - } else { 718 - dma_init_write(esp, (char *) addr, count); 719 - } 720 - } 721 - 722 - 723 - static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write) 724 - { 725 - #ifdef DEBUG_MAC_ESP 726 - printk("mac_esp: dma_setup_quick\n"); 727 - #endif 728 - } 729 - 730 - static struct scsi_host_template driver_template = { 731 - .proc_name = "mac_esp", 732 - .name = "Mac 53C9x SCSI", 733 - .detect = mac_esp_detect, 734 - .slave_alloc = esp_slave_alloc, 735 - .slave_destroy = esp_slave_destroy, 736 - .release = mac_esp_release, 737 - .info = esp_info, 738 - .queuecommand = esp_queue, 739 - .eh_abort_handler = esp_abort, 740 - .eh_bus_reset_handler = esp_reset, 741 - .can_queue = 7, 742 - .this_id = 7, 743 - .sg_tablesize = SG_ALL, 744 - .cmd_per_lun = 1, 745 - .use_clustering = DISABLE_CLUSTERING 746 - }; 747 - 748 - 749 - #include "scsi_module.c" 750 - 751 - MODULE_LICENSE("GPL");
-520
drivers/scsi/mca_53c9x.c
··· 1 - /* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx 2 - * (and maybe some other) Microchannel machines 3 - * 4 - * Code taken mostly from Cyberstorm SCSI drivers 5 - * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) 6 - * 7 - * Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org) 8 - * 9 - * The CyberStorm SCSI driver (and this driver) is based on David S. Miller's 10 - * ESP driver * for the Sparc computers. 11 - * 12 - * Special thanks to Ken Stewart at Symbios (LSI) for helping with info on 13 - * the 86C01. I was on the brink of going ga-ga... 14 - * 15 - * Also thanks to Jesper Skov for helping me with info on how the Amiga 16 - * does things... 17 - */ 18 - 19 - /* 20 - * This is currently only set up to use one 53c9x card at a time; it could be 21 - * changed fairly easily to detect/use more than one, but I'm not too sure how 22 - * many cards that use the 53c9x on MCA systems there are (if, in fact, there 23 - * are cards that use them, other than the one built into some NCR systems)... 24 - * If anyone requests this, I'll throw it in, otherwise it's not worth the 25 - * effort. 26 - */ 27 - 28 - /* 29 - * Info on the 86C01 MCA interface chip at the bottom, if you care enough to 30 - * look. 31 - */ 32 - 33 - #include <linux/delay.h> 34 - #include <linux/interrupt.h> 35 - #include <linux/kernel.h> 36 - #include <linux/mca.h> 37 - #include <linux/types.h> 38 - #include <linux/string.h> 39 - #include <linux/slab.h> 40 - #include <linux/blkdev.h> 41 - #include <linux/proc_fs.h> 42 - #include <linux/stat.h> 43 - #include <linux/mca-legacy.h> 44 - 45 - #include "scsi.h" 46 - #include <scsi/scsi_host.h> 47 - #include "NCR53C9x.h" 48 - 49 - #include <asm/dma.h> 50 - #include <asm/irq.h> 51 - #include <asm/mca_dma.h> 52 - #include <asm/pgtable.h> 53 - 54 - /* 55 - * From ibmmca.c (IBM scsi controller card driver) -- used for turning PS2 disk 56 - * activity LED on and off 57 - */ 58 - 59 - #define PS2_SYS_CTR 0x92 60 - 61 - /* Ports the ncr's 53c94 can be put at; indexed by pos register value */ 62 - 63 - #define MCA_53C9X_IO_PORTS { \ 64 - 0x0000, 0x0240, 0x0340, 0x0400, \ 65 - 0x0420, 0x3240, 0x8240, 0xA240, \ 66 - } 67 - 68 - /* 69 - * Supposedly there were some cards put together with the 'c9x and 86c01. If 70 - * they have different ID's from the ones on the 3500 series machines, 71 - * you can add them here and hopefully things will work out. 72 - */ 73 - 74 - #define MCA_53C9X_IDS { \ 75 - 0x7F4C, \ 76 - 0x0000, \ 77 - } 78 - 79 - static int dma_bytes_sent(struct NCR_ESP *, int); 80 - static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *); 81 - static void dma_dump_state(struct NCR_ESP *); 82 - static void dma_init_read(struct NCR_ESP *, __u32, int); 83 - static void dma_init_write(struct NCR_ESP *, __u32, int); 84 - static void dma_ints_off(struct NCR_ESP *); 85 - static void dma_ints_on(struct NCR_ESP *); 86 - static int dma_irq_p(struct NCR_ESP *); 87 - static int dma_ports_p(struct NCR_ESP *); 88 - static void dma_setup(struct NCR_ESP *, __u32, int, int); 89 - static void dma_led_on(struct NCR_ESP *); 90 - static void dma_led_off(struct NCR_ESP *); 91 - 92 - /* This is where all commands are put before they are trasfered to the 93 - * 53c9x via PIO. 94 - */ 95 - 96 - static volatile unsigned char cmd_buffer[16]; 97 - 98 - /* 99 - * We keep the structure that is used to access the registers on the 53c9x 100 - * here. 101 - */ 102 - 103 - static struct ESP_regs eregs; 104 - 105 - /***************************************************************** Detection */ 106 - static int mca_esp_detect(struct scsi_host_template *tpnt) 107 - { 108 - struct NCR_ESP *esp; 109 - static int io_port_by_pos[] = MCA_53C9X_IO_PORTS; 110 - int mca_53c9x_ids[] = MCA_53C9X_IDS; 111 - int *id_to_check = mca_53c9x_ids; 112 - int slot; 113 - int pos[3]; 114 - unsigned int tmp_io_addr; 115 - unsigned char tmp_byte; 116 - 117 - 118 - if (!MCA_bus) 119 - return 0; 120 - 121 - while (*id_to_check) { 122 - if ((slot = mca_find_adapter(*id_to_check, 0)) != 123 - MCA_NOTFOUND) 124 - { 125 - esp = esp_allocate(tpnt, NULL, 0); 126 - 127 - pos[0] = mca_read_stored_pos(slot, 2); 128 - pos[1] = mca_read_stored_pos(slot, 3); 129 - pos[2] = mca_read_stored_pos(slot, 4); 130 - 131 - esp->eregs = &eregs; 132 - 133 - /* 134 - * IO port base is given in the first (non-ID) pos 135 - * register, like so: 136 - * 137 - * Bits 3 2 1 IO base 138 - * ---------------------------- 139 - * 0 0 0 <disabled> 140 - * 0 0 1 0x0240 141 - * 0 1 0 0x0340 142 - * 0 1 1 0x0400 143 - * 1 0 0 0x0420 144 - * 1 0 1 0x3240 145 - * 1 1 0 0x8240 146 - * 1 1 1 0xA240 147 - */ 148 - 149 - tmp_io_addr = 150 - io_port_by_pos[(pos[0] & 0x0E) >> 1]; 151 - 152 - esp->eregs->io_addr = tmp_io_addr + 0x10; 153 - 154 - if (esp->eregs->io_addr == 0x0000) { 155 - printk("Adapter is disabled.\n"); 156 - break; 157 - } 158 - 159 - /* 160 - * IRQ is specified in bits 4 and 5: 161 - * 162 - * Bits 4 5 IRQ 163 - * ----------------------- 164 - * 0 0 3 165 - * 0 1 5 166 - * 1 0 7 167 - * 1 1 9 168 - */ 169 - 170 - esp->irq = ((pos[0] & 0x30) >> 3) + 3; 171 - 172 - /* 173 - * DMA channel is in the low 3 bits of the second 174 - * POS register 175 - */ 176 - 177 - esp->dma = pos[1] & 7; 178 - esp->slot = slot; 179 - 180 - if (request_irq(esp->irq, esp_intr, 0, 181 - "NCR 53c9x SCSI", esp->ehost)) 182 - { 183 - printk("Unable to request IRQ %d.\n", esp->irq); 184 - esp_deallocate(esp); 185 - scsi_unregister(esp->ehost); 186 - return 0; 187 - } 188 - 189 - if (request_dma(esp->dma, "NCR 53c9x SCSI")) { 190 - printk("Unable to request DMA channel %d.\n", 191 - esp->dma); 192 - free_irq(esp->irq, esp_intr); 193 - esp_deallocate(esp); 194 - scsi_unregister(esp->ehost); 195 - return 0; 196 - } 197 - 198 - request_region(tmp_io_addr, 32, "NCR 53c9x SCSI"); 199 - 200 - /* 201 - * 86C01 handles DMA, IO mode, from address 202 - * (base + 0x0a) 203 - */ 204 - 205 - mca_disable_dma(esp->dma); 206 - mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a); 207 - mca_enable_dma(esp->dma); 208 - 209 - /* Tell the 86C01 to give us interrupts */ 210 - 211 - tmp_byte = inb(tmp_io_addr + 0x02) | 0x40; 212 - outb(tmp_byte, tmp_io_addr + 0x02); 213 - 214 - /* 215 - * Scsi ID -- general purpose register, hi 216 - * 2 bits; add 4 to this number to get the 217 - * ID 218 - */ 219 - 220 - esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4; 221 - 222 - /* Do command transfer with programmed I/O */ 223 - 224 - esp->do_pio_cmds = 1; 225 - 226 - /* Required functions */ 227 - 228 - esp->dma_bytes_sent = &dma_bytes_sent; 229 - esp->dma_can_transfer = &dma_can_transfer; 230 - esp->dma_dump_state = &dma_dump_state; 231 - esp->dma_init_read = &dma_init_read; 232 - esp->dma_init_write = &dma_init_write; 233 - esp->dma_ints_off = &dma_ints_off; 234 - esp->dma_ints_on = &dma_ints_on; 235 - esp->dma_irq_p = &dma_irq_p; 236 - esp->dma_ports_p = &dma_ports_p; 237 - esp->dma_setup = &dma_setup; 238 - 239 - /* Optional functions */ 240 - 241 - esp->dma_barrier = NULL; 242 - esp->dma_drain = NULL; 243 - esp->dma_invalidate = NULL; 244 - esp->dma_irq_entry = NULL; 245 - esp->dma_irq_exit = NULL; 246 - esp->dma_led_on = dma_led_on; 247 - esp->dma_led_off = dma_led_off; 248 - esp->dma_poll = NULL; 249 - esp->dma_reset = NULL; 250 - 251 - /* Set the command buffer */ 252 - 253 - esp->esp_command = (volatile unsigned char*) 254 - cmd_buffer; 255 - esp->esp_command_dvma = isa_virt_to_bus(cmd_buffer); 256 - 257 - /* SCSI chip speed */ 258 - 259 - esp->cfreq = 25000000; 260 - 261 - /* Differential SCSI? I think not. */ 262 - 263 - esp->diff = 0; 264 - 265 - esp_initialize(esp); 266 - 267 - printk(" Adapter found in slot %2d: io port 0x%x " 268 - "irq %d dma channel %d\n", slot + 1, tmp_io_addr, 269 - esp->irq, esp->dma); 270 - 271 - mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter"); 272 - mca_mark_as_used(slot); 273 - 274 - break; 275 - } 276 - 277 - id_to_check++; 278 - } 279 - 280 - return esps_in_use; 281 - } 282 - 283 - 284 - /******************************************************************* Release */ 285 - 286 - static int mca_esp_release(struct Scsi_Host *host) 287 - { 288 - struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata; 289 - unsigned char tmp_byte; 290 - 291 - esp_deallocate(esp); 292 - /* 293 - * Tell the 86C01 to stop sending interrupts 294 - */ 295 - 296 - tmp_byte = inb(esp->eregs->io_addr - 0x0E); 297 - tmp_byte &= ~0x40; 298 - outb(tmp_byte, esp->eregs->io_addr - 0x0E); 299 - 300 - free_irq(esp->irq, esp_intr); 301 - free_dma(esp->dma); 302 - 303 - mca_mark_as_unused(esp->slot); 304 - 305 - return 0; 306 - } 307 - 308 - /************************************************************* DMA Functions */ 309 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 310 - { 311 - /* Ask the 53c9x. It knows. */ 312 - 313 - return fifo_count; 314 - } 315 - 316 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 317 - { 318 - /* 319 - * The MCA dma channels can only do up to 128K bytes at a time. 320 - * (16 bit mode) 321 - */ 322 - 323 - unsigned long sz = sp->SCp.this_residual; 324 - if(sz > 0x20000) 325 - sz = 0x20000; 326 - return sz; 327 - } 328 - 329 - static void dma_dump_state(struct NCR_ESP *esp) 330 - { 331 - /* 332 - * Doesn't quite match up to the other drivers, but we do what we 333 - * can. 334 - */ 335 - 336 - ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma)); 337 - ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma))); 338 - } 339 - 340 - static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 341 - { 342 - unsigned long flags; 343 - 344 - 345 - save_flags(flags); 346 - cli(); 347 - 348 - mca_disable_dma(esp->dma); 349 - mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 | 350 - MCA_DMA_MODE_IO); 351 - mca_set_dma_addr(esp->dma, addr); 352 - mca_set_dma_count(esp->dma, length / 2); /* !!! */ 353 - mca_enable_dma(esp->dma); 354 - 355 - restore_flags(flags); 356 - } 357 - 358 - static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) 359 - { 360 - unsigned long flags; 361 - 362 - 363 - save_flags(flags); 364 - cli(); 365 - 366 - mca_disable_dma(esp->dma); 367 - mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE | 368 - MCA_DMA_MODE_16 | MCA_DMA_MODE_IO); 369 - mca_set_dma_addr(esp->dma, addr); 370 - mca_set_dma_count(esp->dma, length / 2); /* !!! */ 371 - mca_enable_dma(esp->dma); 372 - 373 - restore_flags(flags); 374 - } 375 - 376 - static void dma_ints_off(struct NCR_ESP *esp) 377 - { 378 - /* 379 - * Tell the 'C01 to shut up. All interrupts are routed through it. 380 - */ 381 - 382 - outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40, 383 - esp->eregs->io_addr - 0x0E); 384 - } 385 - 386 - static void dma_ints_on(struct NCR_ESP *esp) 387 - { 388 - /* 389 - * Ok. You can speak again. 390 - */ 391 - 392 - outb(inb(esp->eregs->io_addr - 0x0E) | 0x40, 393 - esp->eregs->io_addr - 0x0E); 394 - } 395 - 396 - static int dma_irq_p(struct NCR_ESP *esp) 397 - { 398 - /* 399 - * DaveM says that this should return a "yes" if there is an interrupt 400 - * or a DMA error occurred. I copied the Amiga driver's semantics, 401 - * though, because it seems to work and we can't really tell if 402 - * a DMA error happened. This gives the "yes" if the scsi chip 403 - * is sending an interrupt and no DMA activity is taking place 404 - */ 405 - 406 - return (!(inb(esp->eregs->io_addr - 0x04) & 1) && 407 - !(inb(esp->eregs->io_addr - 0x04) & 2) ); 408 - } 409 - 410 - static int dma_ports_p(struct NCR_ESP *esp) 411 - { 412 - /* 413 - * Check to see if interrupts are enabled on the 'C01 (in case abort 414 - * is entered multiple times, so we only do the abort once) 415 - */ 416 - 417 - return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0; 418 - } 419 - 420 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 421 - { 422 - if(write){ 423 - dma_init_write(esp, addr, count); 424 - } else { 425 - dma_init_read(esp, addr, count); 426 - } 427 - } 428 - 429 - /* 430 - * These will not play nicely with other disk controllers that try to use the 431 - * disk active LED... but what can you do? Don't answer that. 432 - * 433 - * Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver 434 - * 435 - */ 436 - 437 - static void dma_led_on(struct NCR_ESP *esp) 438 - { 439 - outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR); 440 - } 441 - 442 - static void dma_led_off(struct NCR_ESP *esp) 443 - { 444 - outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); 445 - } 446 - 447 - static struct scsi_host_template driver_template = { 448 - .proc_name = "mca_53c9x", 449 - .name = "NCR 53c9x SCSI", 450 - .detect = mca_esp_detect, 451 - .slave_alloc = esp_slave_alloc, 452 - .slave_destroy = esp_slave_destroy, 453 - .release = mca_esp_release, 454 - .queuecommand = esp_queue, 455 - .eh_abort_handler = esp_abort, 456 - .eh_bus_reset_handler = esp_reset, 457 - .can_queue = 7, 458 - .sg_tablesize = SG_ALL, 459 - .cmd_per_lun = 1, 460 - .unchecked_isa_dma = 1, 461 - .use_clustering = DISABLE_CLUSTERING 462 - }; 463 - 464 - 465 - #include "scsi_module.c" 466 - 467 - /* 468 - * OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip 469 - * that handles enabling/diabling IRQ, dma interfacing, IO port selection 470 - * and other fun stuff. It takes up 16 addresses, and the chip it is 471 - * connnected to gets the following 16. Registers are as follows: 472 - * 473 - * Offsets 0-1 : Card ID 474 - * 475 - * Offset 2 : Mode enable register -- 476 - * Bit 7 : Data Word width (1 = 16, 0 = 8) 477 - * Bit 6 : IRQ enable (1 = enabled) 478 - * Bits 5,4 : IRQ select 479 - * 0 0 : IRQ 3 480 - * 0 1 : IRQ 5 481 - * 1 0 : IRQ 7 482 - * 1 1 : IRQ 9 483 - * Bits 3-1 : Base Address 484 - * 0 0 0 : <disabled> 485 - * 0 0 1 : 0x0240 486 - * 0 1 0 : 0x0340 487 - * 0 1 1 : 0x0400 488 - * 1 0 0 : 0x0420 489 - * 1 0 1 : 0x3240 490 - * 1 1 0 : 0x8240 491 - * 1 1 1 : 0xA240 492 - * Bit 0 : Card enable (1 = enabled) 493 - * 494 - * Offset 3 : DMA control register -- 495 - * Bit 7 : DMA enable (1 = enabled) 496 - * Bits 6,5 : Preemt Count Select (transfers to complete after 497 - * 'C01 has been preempted on MCA bus) 498 - * 0 0 : 0 499 - * 0 1 : 1 500 - * 1 0 : 3 501 - * 1 1 : 7 502 - * (all these wacky numbers; I'm sure there's a reason somewhere) 503 - * Bit 4 : Fairness enable (1 = fair bus priority) 504 - * Bits 3-0 : Arbitration level (0-15 consecutive) 505 - * 506 - * Offset 4 : General purpose register 507 - * Bits 7-3 : User definable (here, 7,6 are SCSI ID) 508 - * Bits 2-0 : reserved 509 - * 510 - * Offset 10 : DMA decode register (used for IO based DMA; also can do 511 - * PIO through this port) 512 - * 513 - * Offset 12 : Status 514 - * Bits 7-2 : reserved 515 - * Bit 1 : DMA pending (1 = pending) 516 - * Bit 0 : IRQ pending (0 = pending) 517 - * 518 - * Exciting, huh? 519 - * 520 - */
-606
drivers/scsi/oktagon_esp.c
··· 1 - /* 2 - * Oktagon_esp.c -- Driver for bsc Oktagon 3 - * 4 - * Written by Carsten Pluntke 1998 5 - * 6 - * Based on cyber_esp.c 7 - */ 8 - 9 - 10 - #if defined(CONFIG_AMIGA) || defined(CONFIG_APUS) 11 - #define USE_BOTTOM_HALF 12 - #endif 13 - 14 - #include <linux/module.h> 15 - 16 - #include <linux/kernel.h> 17 - #include <linux/delay.h> 18 - #include <linux/types.h> 19 - #include <linux/string.h> 20 - #include <linux/slab.h> 21 - #include <linux/blkdev.h> 22 - #include <linux/proc_fs.h> 23 - #include <linux/stat.h> 24 - #include <linux/reboot.h> 25 - #include <asm/system.h> 26 - #include <asm/ptrace.h> 27 - #include <asm/pgtable.h> 28 - 29 - 30 - #include "scsi.h" 31 - #include <scsi/scsi_host.h> 32 - #include "NCR53C9x.h" 33 - 34 - #include <linux/zorro.h> 35 - #include <asm/irq.h> 36 - #include <asm/amigaints.h> 37 - #include <asm/amigahw.h> 38 - 39 - #ifdef USE_BOTTOM_HALF 40 - #include <linux/workqueue.h> 41 - #include <linux/interrupt.h> 42 - #endif 43 - 44 - /* The controller registers can be found in the Z2 config area at these 45 - * offsets: 46 - */ 47 - #define OKTAGON_ESP_ADDR 0x03000 48 - #define OKTAGON_DMA_ADDR 0x01000 49 - 50 - 51 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 52 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 53 - static void dma_dump_state(struct NCR_ESP *esp); 54 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length); 55 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length); 56 - static void dma_ints_off(struct NCR_ESP *esp); 57 - static void dma_ints_on(struct NCR_ESP *esp); 58 - static int dma_irq_p(struct NCR_ESP *esp); 59 - static void dma_led_off(struct NCR_ESP *esp); 60 - static void dma_led_on(struct NCR_ESP *esp); 61 - static int dma_ports_p(struct NCR_ESP *esp); 62 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 63 - 64 - static void dma_irq_exit(struct NCR_ESP *esp); 65 - static void dma_invalidate(struct NCR_ESP *esp); 66 - 67 - static void dma_mmu_get_scsi_one(struct NCR_ESP *,Scsi_Cmnd *); 68 - static void dma_mmu_get_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *); 69 - static void dma_mmu_release_scsi_one(struct NCR_ESP *,Scsi_Cmnd *); 70 - static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *); 71 - static void dma_advance_sg(Scsi_Cmnd *); 72 - static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x); 73 - 74 - #ifdef USE_BOTTOM_HALF 75 - static void dma_commit(struct work_struct *unused); 76 - 77 - long oktag_to_io(long *paddr, long *addr, long len); 78 - long oktag_from_io(long *addr, long *paddr, long len); 79 - 80 - static DECLARE_WORK(tq_fake_dma, dma_commit); 81 - 82 - #define DMA_MAXTRANSFER 0x8000 83 - 84 - #else 85 - 86 - /* 87 - * No bottom half. Use transfer directly from IRQ. Find a narrow path 88 - * between too much IRQ overhead and clogging the IRQ for too long. 89 - */ 90 - 91 - #define DMA_MAXTRANSFER 0x1000 92 - 93 - #endif 94 - 95 - static struct notifier_block oktagon_notifier = { 96 - oktagon_notify_reboot, 97 - NULL, 98 - 0 99 - }; 100 - 101 - static long *paddress; 102 - static long *address; 103 - static long len; 104 - static long dma_on; 105 - static int direction; 106 - static struct NCR_ESP *current_esp; 107 - 108 - 109 - static volatile unsigned char cmd_buffer[16]; 110 - /* This is where all commands are put 111 - * before they are trasfered to the ESP chip 112 - * via PIO. 113 - */ 114 - 115 - /***************************************************************** Detection */ 116 - int oktagon_esp_detect(struct scsi_host_template *tpnt) 117 - { 118 - struct NCR_ESP *esp; 119 - struct zorro_dev *z = NULL; 120 - unsigned long address; 121 - struct ESP_regs *eregs; 122 - 123 - while ((z = zorro_find_device(ZORRO_PROD_BSC_OKTAGON_2008, z))) { 124 - unsigned long board = z->resource.start; 125 - if (request_mem_region(board+OKTAGON_ESP_ADDR, 126 - sizeof(struct ESP_regs), "NCR53C9x")) { 127 - /* 128 - * It is a SCSI controller. 129 - * Hardwire Host adapter to SCSI ID 7 130 - */ 131 - 132 - address = (unsigned long)ZTWO_VADDR(board); 133 - eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR); 134 - 135 - /* This line was 5 lines lower */ 136 - esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0); 137 - 138 - /* we have to shift the registers only one bit for oktagon */ 139 - esp->shift = 1; 140 - 141 - esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); 142 - udelay(5); 143 - if (esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) 144 - return 0; /* Bail out if address did not hold data */ 145 - 146 - /* Do command transfer with programmed I/O */ 147 - esp->do_pio_cmds = 1; 148 - 149 - /* Required functions */ 150 - esp->dma_bytes_sent = &dma_bytes_sent; 151 - esp->dma_can_transfer = &dma_can_transfer; 152 - esp->dma_dump_state = &dma_dump_state; 153 - esp->dma_init_read = &dma_init_read; 154 - esp->dma_init_write = &dma_init_write; 155 - esp->dma_ints_off = &dma_ints_off; 156 - esp->dma_ints_on = &dma_ints_on; 157 - esp->dma_irq_p = &dma_irq_p; 158 - esp->dma_ports_p = &dma_ports_p; 159 - esp->dma_setup = &dma_setup; 160 - 161 - /* Optional functions */ 162 - esp->dma_barrier = 0; 163 - esp->dma_drain = 0; 164 - esp->dma_invalidate = &dma_invalidate; 165 - esp->dma_irq_entry = 0; 166 - esp->dma_irq_exit = &dma_irq_exit; 167 - esp->dma_led_on = &dma_led_on; 168 - esp->dma_led_off = &dma_led_off; 169 - esp->dma_poll = 0; 170 - esp->dma_reset = 0; 171 - 172 - esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one; 173 - esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl; 174 - esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one; 175 - esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl; 176 - esp->dma_advance_sg = &dma_advance_sg; 177 - 178 - /* SCSI chip speed */ 179 - /* Looking at the quartz of the SCSI board... */ 180 - esp->cfreq = 25000000; 181 - 182 - /* The DMA registers on the CyberStorm are mapped 183 - * relative to the device (i.e. in the same Zorro 184 - * I/O block). 185 - */ 186 - esp->dregs = (void *)(address + OKTAGON_DMA_ADDR); 187 - 188 - paddress = (long *) esp->dregs; 189 - 190 - /* ESP register base */ 191 - esp->eregs = eregs; 192 - 193 - /* Set the command buffer */ 194 - esp->esp_command = (volatile unsigned char*) cmd_buffer; 195 - 196 - /* Yes, the virtual address. See below. */ 197 - esp->esp_command_dvma = (__u32) cmd_buffer; 198 - 199 - esp->irq = IRQ_AMIGA_PORTS; 200 - request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED, 201 - "BSC Oktagon SCSI", esp->ehost); 202 - 203 - /* Figure out our scsi ID on the bus */ 204 - esp->scsi_id = 7; 205 - 206 - /* We don't have a differential SCSI-bus. */ 207 - esp->diff = 0; 208 - 209 - esp_initialize(esp); 210 - 211 - printk("ESP_Oktagon Driver 1.1" 212 - #ifdef USE_BOTTOM_HALF 213 - " [BOTTOM_HALF]" 214 - #else 215 - " [IRQ]" 216 - #endif 217 - " registered.\n"); 218 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use); 219 - esps_running = esps_in_use; 220 - current_esp = esp; 221 - register_reboot_notifier(&oktagon_notifier); 222 - return esps_in_use; 223 - } 224 - } 225 - return 0; 226 - } 227 - 228 - 229 - /* 230 - * On certain configurations the SCSI equipment gets confused on reboot, 231 - * so we have to reset it then. 232 - */ 233 - 234 - static int 235 - oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x) 236 - { 237 - struct NCR_ESP *esp; 238 - 239 - if((code == SYS_DOWN || code == SYS_HALT) && (esp = current_esp)) 240 - { 241 - esp_bootup_reset(esp,esp->eregs); 242 - udelay(500); /* Settle time. Maybe unnecessary. */ 243 - } 244 - return NOTIFY_DONE; 245 - } 246 - 247 - 248 - 249 - #ifdef USE_BOTTOM_HALF 250 - 251 - 252 - /* 253 - * The bsc Oktagon controller has no real DMA, so we have to do the 'DMA 254 - * transfer' in the interrupt (Yikes!) or use a bottom half to not to clutter 255 - * IRQ's for longer-than-good. 256 - * 257 - * FIXME 258 - * BIG PROBLEM: 'len' is usually the buffer length, not the expected length 259 - * of the data. So DMA may finish prematurely, further reads lead to 260 - * 'machine check' on APUS systems (don't know about m68k systems, AmigaOS 261 - * deliberately ignores the bus faults) and a normal copy-loop can't 262 - * be exited prematurely just at the right moment by the dma_invalidate IRQ. 263 - * So do it the hard way, write an own copier in assembler and 264 - * catch the exception. 265 - * -- Carsten 266 - */ 267 - 268 - 269 - static void dma_commit(struct work_struct *unused) 270 - { 271 - long wait,len2,pos; 272 - struct NCR_ESP *esp; 273 - 274 - ESPDATA(("Transfer: %ld bytes, Address 0x%08lX, Direction: %d\n", 275 - len,(long) address,direction)); 276 - dma_ints_off(current_esp); 277 - 278 - pos = 0; 279 - wait = 1; 280 - if(direction) /* write? (memory to device) */ 281 - { 282 - while(len > 0) 283 - { 284 - len2 = oktag_to_io(paddress, address+pos, len); 285 - if(!len2) 286 - { 287 - if(wait > 1000) 288 - { 289 - printk("Expedited DMA exit (writing) %ld\n",len); 290 - break; 291 - } 292 - mdelay(wait); 293 - wait *= 2; 294 - } 295 - pos += len2; 296 - len -= len2*sizeof(long); 297 - } 298 - } else { 299 - while(len > 0) 300 - { 301 - len2 = oktag_from_io(address+pos, paddress, len); 302 - if(!len2) 303 - { 304 - if(wait > 1000) 305 - { 306 - printk("Expedited DMA exit (reading) %ld\n",len); 307 - break; 308 - } 309 - mdelay(wait); 310 - wait *= 2; 311 - } 312 - pos += len2; 313 - len -= len2*sizeof(long); 314 - } 315 - } 316 - 317 - /* to make esp->shift work */ 318 - esp=current_esp; 319 - 320 - #if 0 321 - len2 = (esp_read(current_esp->eregs->esp_tclow) & 0xff) | 322 - ((esp_read(current_esp->eregs->esp_tcmed) & 0xff) << 8); 323 - 324 - /* 325 - * Uh uh. If you see this, len and transfer count registers were out of 326 - * sync. That means really serious trouble. 327 - */ 328 - 329 - if(len2) 330 - printk("Eeeek!! Transfer count still %ld!\n",len2); 331 - #endif 332 - 333 - /* 334 - * Normally we just need to exit and wait for the interrupt to come. 335 - * But at least one device (my Microtek ScanMaker 630) regularly mis- 336 - * calculates the bytes it should send which is really ugly because 337 - * it locks up the SCSI bus if not accounted for. 338 - */ 339 - 340 - if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)) 341 - { 342 - long len = 100; 343 - long trash[10]; 344 - 345 - /* 346 - * Interrupt bit was not set. Either the device is just plain lazy 347 - * so we give it a 10 ms chance or... 348 - */ 349 - while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))) 350 - udelay(100); 351 - 352 - 353 - if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)) 354 - { 355 - /* 356 - * So we think that the transfer count is out of sync. Since we 357 - * have all we want we are happy and can ditch the trash. 358 - */ 359 - 360 - len = DMA_MAXTRANSFER; 361 - 362 - while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))) 363 - oktag_from_io(trash,paddress,2); 364 - 365 - if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)) 366 - { 367 - /* 368 - * Things really have gone wrong. If we leave the system in that 369 - * state, the SCSI bus is locked forever. I hope that this will 370 - * turn the system in a more or less running state. 371 - */ 372 - printk("Device is bolixed, trying bus reset...\n"); 373 - esp_bootup_reset(current_esp,current_esp->eregs); 374 - } 375 - } 376 - } 377 - 378 - ESPDATA(("Transfer_finale: do_data_finale should come\n")); 379 - 380 - len = 0; 381 - dma_on = 0; 382 - dma_ints_on(current_esp); 383 - } 384 - 385 - #endif 386 - 387 - /************************************************************* DMA Functions */ 388 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 389 - { 390 - /* Since the CyberStorm DMA is fully dedicated to the ESP chip, 391 - * the number of bytes sent (to the ESP chip) equals the number 392 - * of bytes in the FIFO - there is no buffering in the DMA controller. 393 - * XXXX Do I read this right? It is from host to ESP, right? 394 - */ 395 - return fifo_count; 396 - } 397 - 398 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 399 - { 400 - unsigned long sz = sp->SCp.this_residual; 401 - if(sz > DMA_MAXTRANSFER) 402 - sz = DMA_MAXTRANSFER; 403 - return sz; 404 - } 405 - 406 - static void dma_dump_state(struct NCR_ESP *esp) 407 - { 408 - } 409 - 410 - /* 411 - * What the f$@& is this? 412 - * 413 - * Some SCSI devices (like my Microtek ScanMaker 630 scanner) want to transfer 414 - * more data than requested. How much? Dunno. So ditch the bogus data into 415 - * the sink, hoping the device will advance to the next phase sooner or later. 416 - * 417 - * -- Carsten 418 - */ 419 - 420 - static long oktag_eva_buffer[16]; /* The data sink */ 421 - 422 - static void oktag_check_dma(void) 423 - { 424 - struct NCR_ESP *esp; 425 - 426 - esp=current_esp; 427 - if(!len) 428 - { 429 - address = oktag_eva_buffer; 430 - len = 2; 431 - /* esp_do_data sets them to zero like len */ 432 - esp_write(current_esp->eregs->esp_tclow,2); 433 - esp_write(current_esp->eregs->esp_tcmed,0); 434 - } 435 - } 436 - 437 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 438 - { 439 - /* Zorro is noncached, everything else done using processor. */ 440 - /* cache_clear(addr, length); */ 441 - 442 - if(dma_on) 443 - panic("dma_init_read while dma process is initialized/running!\n"); 444 - direction = 0; 445 - address = (long *) vaddress; 446 - current_esp = esp; 447 - len = length; 448 - oktag_check_dma(); 449 - dma_on = 1; 450 - } 451 - 452 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 453 - { 454 - /* cache_push(addr, length); */ 455 - 456 - if(dma_on) 457 - panic("dma_init_write while dma process is initialized/running!\n"); 458 - direction = 1; 459 - address = (long *) vaddress; 460 - current_esp = esp; 461 - len = length; 462 - oktag_check_dma(); 463 - dma_on = 1; 464 - } 465 - 466 - static void dma_ints_off(struct NCR_ESP *esp) 467 - { 468 - disable_irq(esp->irq); 469 - } 470 - 471 - static void dma_ints_on(struct NCR_ESP *esp) 472 - { 473 - enable_irq(esp->irq); 474 - } 475 - 476 - static int dma_irq_p(struct NCR_ESP *esp) 477 - { 478 - /* It's important to check the DMA IRQ bit in the correct way! */ 479 - return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR); 480 - } 481 - 482 - static void dma_led_off(struct NCR_ESP *esp) 483 - { 484 - } 485 - 486 - static void dma_led_on(struct NCR_ESP *esp) 487 - { 488 - } 489 - 490 - static int dma_ports_p(struct NCR_ESP *esp) 491 - { 492 - return ((amiga_custom.intenar) & IF_PORTS); 493 - } 494 - 495 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 496 - { 497 - /* On the Sparc, DMA_ST_WRITE means "move data from device to memory" 498 - * so when (write) is true, it actually means READ! 499 - */ 500 - if(write){ 501 - dma_init_read(esp, addr, count); 502 - } else { 503 - dma_init_write(esp, addr, count); 504 - } 505 - } 506 - 507 - /* 508 - * IRQ entry when DMA transfer is ready to be started 509 - */ 510 - 511 - static void dma_irq_exit(struct NCR_ESP *esp) 512 - { 513 - #ifdef USE_BOTTOM_HALF 514 - if(dma_on) 515 - { 516 - schedule_work(&tq_fake_dma); 517 - } 518 - #else 519 - while(len && !dma_irq_p(esp)) 520 - { 521 - if(direction) 522 - *paddress = *address++; 523 - else 524 - *address++ = *paddress; 525 - len -= (sizeof(long)); 526 - } 527 - len = 0; 528 - dma_on = 0; 529 - #endif 530 - } 531 - 532 - /* 533 - * IRQ entry when DMA has just finished 534 - */ 535 - 536 - static void dma_invalidate(struct NCR_ESP *esp) 537 - { 538 - } 539 - 540 - /* 541 - * Since the processor does the data transfer we have to use the custom 542 - * mmu interface to pass the virtual address, not the physical. 543 - */ 544 - 545 - void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp) 546 - { 547 - sp->SCp.ptr = 548 - sp->request_buffer; 549 - } 550 - 551 - void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp) 552 - { 553 - sp->SCp.ptr = sg_virt(sp->SCp.buffer); 554 - } 555 - 556 - void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp) 557 - { 558 - } 559 - 560 - void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp) 561 - { 562 - } 563 - 564 - void dma_advance_sg(Scsi_Cmnd *sp) 565 - { 566 - sp->SCp.ptr = sg_virt(sp->SCp.buffer); 567 - } 568 - 569 - 570 - #define HOSTS_C 571 - 572 - int oktagon_esp_release(struct Scsi_Host *instance) 573 - { 574 - #ifdef MODULE 575 - unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev; 576 - esp_release(); 577 - release_mem_region(address, sizeof(struct ESP_regs)); 578 - free_irq(IRQ_AMIGA_PORTS, esp_intr); 579 - unregister_reboot_notifier(&oktagon_notifier); 580 - #endif 581 - return 1; 582 - } 583 - 584 - 585 - static struct scsi_host_template driver_template = { 586 - .proc_name = "esp-oktagon", 587 - .proc_info = &esp_proc_info, 588 - .name = "BSC Oktagon SCSI", 589 - .detect = oktagon_esp_detect, 590 - .slave_alloc = esp_slave_alloc, 591 - .slave_destroy = esp_slave_destroy, 592 - .release = oktagon_esp_release, 593 - .queuecommand = esp_queue, 594 - .eh_abort_handler = esp_abort, 595 - .eh_bus_reset_handler = esp_reset, 596 - .can_queue = 7, 597 - .this_id = 7, 598 - .sg_tablesize = SG_ALL, 599 - .cmd_per_lun = 1, 600 - .use_clustering = ENABLE_CLUSTERING 601 - }; 602 - 603 - 604 - #include "scsi_module.c" 605 - 606 - MODULE_LICENSE("GPL");
-194
drivers/scsi/oktagon_io.S
··· 1 - /* -*- mode: asm -*- 2 - * Due to problems while transferring data I've put these routines as assembly 3 - * code. 4 - * Since I'm no PPC assembler guru, the code is just the assembler version of 5 - 6 - int oktag_to_io(long *paddr,long *addr,long len) 7 - { 8 - long *addr2 = addr; 9 - for(len=(len+sizeof(long)-1)/sizeof(long);len--;) 10 - *paddr = *addr2++; 11 - return addr2 - addr; 12 - } 13 - 14 - int oktag_from_io(long *addr,long *paddr,long len) 15 - { 16 - long *addr2 = addr; 17 - for(len=(len+sizeof(long)-1)/sizeof(long);len--;) 18 - *addr2++ = *paddr; 19 - return addr2 - addr; 20 - } 21 - 22 - * assembled using gcc -O2 -S, with two exception catch points where data 23 - * is moved to/from the IO register. 24 - */ 25 - 26 - 27 - #ifdef CONFIG_APUS 28 - 29 - .file "oktagon_io.c" 30 - 31 - gcc2_compiled.: 32 - /* 33 - .section ".text" 34 - */ 35 - .align 2 36 - .globl oktag_to_io 37 - .type oktag_to_io,@function 38 - oktag_to_io: 39 - addi 5,5,3 40 - srwi 5,5,2 41 - cmpwi 1,5,0 42 - mr 9,3 43 - mr 3,4 44 - addi 5,5,-1 45 - bc 12,6,.L3 46 - .L5: 47 - cmpwi 1,5,0 48 - lwz 0,0(3) 49 - addi 3,3,4 50 - addi 5,5,-1 51 - exp1: stw 0,0(9) 52 - bc 4,6,.L5 53 - .L3: 54 - ret1: subf 3,4,3 55 - srawi 3,3,2 56 - blr 57 - .Lfe1: 58 - .size oktag_to_io,.Lfe1-oktag_to_io 59 - .align 2 60 - .globl oktag_from_io 61 - .type oktag_from_io,@function 62 - oktag_from_io: 63 - addi 5,5,3 64 - srwi 5,5,2 65 - cmpwi 1,5,0 66 - mr 9,3 67 - addi 5,5,-1 68 - bc 12,6,.L9 69 - .L11: 70 - cmpwi 1,5,0 71 - exp2: lwz 0,0(4) 72 - addi 5,5,-1 73 - stw 0,0(3) 74 - addi 3,3,4 75 - bc 4,6,.L11 76 - .L9: 77 - ret2: subf 3,9,3 78 - srawi 3,3,2 79 - blr 80 - .Lfe2: 81 - .size oktag_from_io,.Lfe2-oktag_from_io 82 - .ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)" 83 - 84 - /* 85 - * Exception table. 86 - * Second longword shows where to jump when an exception at the addr the first 87 - * longword is pointing to is caught. 88 - */ 89 - 90 - .section __ex_table,"a" 91 - .align 2 92 - oktagon_except: 93 - .long exp1,ret1 94 - .long exp2,ret2 95 - 96 - #else 97 - 98 - /* 99 - The code which follows is for 680x0 based assembler and is meant for 100 - Linux/m68k. It was created by cross compiling the code using the 101 - instructions given above. I then added the four labels used in the 102 - exception handler table at the bottom of this file. 103 - - Kevin <kcozens@interlog.com> 104 - */ 105 - 106 - #ifdef CONFIG_AMIGA 107 - 108 - .file "oktagon_io.c" 109 - .version "01.01" 110 - gcc2_compiled.: 111 - .text 112 - .align 2 113 - .globl oktag_to_io 114 - .type oktag_to_io,@function 115 - oktag_to_io: 116 - link.w %a6,#0 117 - move.l %d2,-(%sp) 118 - move.l 8(%a6),%a1 119 - move.l 12(%a6),%d1 120 - move.l %d1,%a0 121 - move.l 16(%a6),%d0 122 - addq.l #3,%d0 123 - lsr.l #2,%d0 124 - subq.l #1,%d0 125 - moveq.l #-1,%d2 126 - cmp.l %d0,%d2 127 - jbeq .L3 128 - .L5: 129 - exp1: 130 - move.l (%a0)+,(%a1) 131 - dbra %d0,.L5 132 - clr.w %d0 133 - subq.l #1,%d0 134 - jbcc .L5 135 - .L3: 136 - ret1: 137 - move.l %a0,%d0 138 - sub.l %d1,%d0 139 - asr.l #2,%d0 140 - move.l -4(%a6),%d2 141 - unlk %a6 142 - rts 143 - 144 - .Lfe1: 145 - .size oktag_to_io,.Lfe1-oktag_to_io 146 - .align 2 147 - .globl oktag_from_io 148 - .type oktag_from_io,@function 149 - oktag_from_io: 150 - link.w %a6,#0 151 - move.l %d2,-(%sp) 152 - move.l 8(%a6),%d1 153 - move.l 12(%a6),%a1 154 - move.l %d1,%a0 155 - move.l 16(%a6),%d0 156 - addq.l #3,%d0 157 - lsr.l #2,%d0 158 - subq.l #1,%d0 159 - moveq.l #-1,%d2 160 - cmp.l %d0,%d2 161 - jbeq .L9 162 - .L11: 163 - exp2: 164 - move.l (%a1),(%a0)+ 165 - dbra %d0,.L11 166 - clr.w %d0 167 - subq.l #1,%d0 168 - jbcc .L11 169 - .L9: 170 - ret2: 171 - move.l %a0,%d0 172 - sub.l %d1,%d0 173 - asr.l #2,%d0 174 - move.l -4(%a6),%d2 175 - unlk %a6 176 - rts 177 - .Lfe2: 178 - .size oktag_from_io,.Lfe2-oktag_from_io 179 - .ident "GCC: (GNU) 2.7.2.1" 180 - 181 - /* 182 - * Exception table. 183 - * Second longword shows where to jump when an exception at the addr the first 184 - * longword is pointing to is caught. 185 - */ 186 - 187 - .section __ex_table,"a" 188 - .align 2 189 - oktagon_except: 190 - .long exp1,ret1 191 - .long exp2,ret2 192 - 193 - #endif 194 - #endif
+1 -1
drivers/scsi/ps3rom.c
··· 35 35 36 36 #define BOUNCE_SIZE (64*1024) 37 37 38 - #define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE) 38 + #define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9) 39 39 40 40 41 41 struct ps3rom_private {
+20 -4
drivers/scsi/qla2xxx/qla_attr.c
··· 428 428 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 429 429 return 0; 430 430 431 + if (ha->sfp_data) 432 + goto do_read; 433 + 434 + ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 435 + &ha->sfp_data_dma); 436 + if (!ha->sfp_data) { 437 + qla_printk(KERN_WARNING, ha, 438 + "Unable to allocate memory for SFP read-data.\n"); 439 + return 0; 440 + } 441 + 442 + do_read: 443 + memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 431 444 addr = 0xa0; 432 445 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 433 446 iter++, offset += SFP_BLOCK_SIZE) { ··· 848 835 static void 849 836 qla2x00_get_host_speed(struct Scsi_Host *shost) 850 837 { 851 - scsi_qla_host_t *ha = shost_priv(shost); 838 + scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 852 839 uint32_t speed = 0; 853 840 854 841 switch (ha->link_data_rate) { ··· 861 848 case PORT_SPEED_4GB: 862 849 speed = 4; 863 850 break; 851 + case PORT_SPEED_8GB: 852 + speed = 8; 853 + break; 864 854 } 865 855 fc_host_speed(shost) = speed; 866 856 } ··· 871 855 static void 872 856 qla2x00_get_host_port_type(struct Scsi_Host *shost) 873 857 { 874 - scsi_qla_host_t *ha = shost_priv(shost); 858 + scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 875 859 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 876 860 877 861 switch (ha->current_topology) { ··· 981 965 static struct fc_host_statistics * 982 966 qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 983 967 { 984 - scsi_qla_host_t *ha = shost_priv(shost); 968 + scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 985 969 int rval; 986 970 struct link_statistics *stats; 987 971 dma_addr_t stats_dma; ··· 1065 1049 static void 1066 1050 qla2x00_get_host_port_state(struct Scsi_Host *shost) 1067 1051 { 1068 - scsi_qla_host_t *ha = shost_priv(shost); 1052 + scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1069 1053 1070 1054 if (!ha->flags.online) 1071 1055 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-2
drivers/scsi/qla2xxx/qla_def.h
··· 2041 2041 #define VP_RET_CODE_NO_MEM 5 2042 2042 #define VP_RET_CODE_NOT_FOUND 6 2043 2043 2044 - #define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x)) 2045 - 2046 2044 /* 2047 2045 * ISP operations 2048 2046 */
+1
drivers/scsi/qla2xxx/qla_gbl.h
··· 66 66 extern int num_hosts; 67 67 68 68 extern int qla2x00_loop_reset(scsi_qla_host_t *); 69 + extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 69 70 70 71 /* 71 72 * Global Functions in qla_mid.c source file.
+26 -61
drivers/scsi/qla2xxx/qla_init.c
··· 925 925 { 926 926 int rval; 927 927 uint32_t srisc_address = 0; 928 + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 929 + unsigned long flags; 930 + 931 + if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 932 + /* Disable SRAM, Instruction RAM and GP RAM parity. */ 933 + spin_lock_irqsave(&ha->hardware_lock, flags); 934 + WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0)); 935 + RD_REG_WORD(&reg->hccr); 936 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 937 + } 928 938 929 939 /* Load firmware sequences */ 930 940 rval = ha->isp_ops->load_risc(ha, &srisc_address); ··· 976 966 "scsi(%ld): ISP Firmware failed checksum.\n", 977 967 ha->host_no)); 978 968 } 969 + } 970 + 971 + if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 972 + /* Enable proper parity. */ 973 + spin_lock_irqsave(&ha->hardware_lock, flags); 974 + if (IS_QLA2300(ha)) 975 + /* SRAM parity */ 976 + WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1); 977 + else 978 + /* SRAM, Instruction RAM and GP RAM parity */ 979 + WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7); 980 + RD_REG_WORD(&reg->hccr); 981 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 979 982 } 980 983 981 984 if (rval) { ··· 3236 3213 qla2x00_abort_isp(scsi_qla_host_t *ha) 3237 3214 { 3238 3215 int rval; 3239 - unsigned long flags = 0; 3240 - uint16_t cnt; 3241 - srb_t *sp; 3242 3216 uint8_t status = 0; 3243 3217 3244 3218 if (ha->flags.online) { ··· 3256 3236 LOOP_DOWN_TIME); 3257 3237 } 3258 3238 3259 - spin_lock_irqsave(&ha->hardware_lock, flags); 3260 3239 /* Requeue all commands in outstanding command list. */ 3261 - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3262 - sp = ha->outstanding_cmds[cnt]; 3263 - if (sp) { 3264 - ha->outstanding_cmds[cnt] = NULL; 3265 - sp->flags = 0; 3266 - sp->cmd->result = DID_RESET << 16; 3267 - sp->cmd->host_scribble = (unsigned char *)NULL; 3268 - qla2x00_sp_compl(ha, sp); 3269 - } 3270 - } 3271 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3240 + qla2x00_abort_all_cmds(ha, DID_RESET << 16); 3272 3241 3273 3242 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3274 3243 ··· 3282 3273 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3283 3274 3284 3275 if (ha->eft) { 3276 + memset(ha->eft, 0, EFT_SIZE); 3285 3277 rval = qla2x00_enable_eft_trace(ha, 3286 3278 ha->eft_dma, EFT_NUM_BUFFERS); 3287 3279 if (rval) { ··· 3367 3357 qla2x00_restart_isp(scsi_qla_host_t *ha) 3368 3358 { 3369 3359 uint8_t status = 0; 3370 - struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3371 - unsigned long flags = 0; 3372 3360 uint32_t wait_time; 3373 3361 3374 3362 /* If firmware needs to be loaded */ 3375 3363 if (qla2x00_isp_firmware(ha)) { 3376 3364 ha->flags.online = 0; 3377 - if (!(status = ha->isp_ops->chip_diag(ha))) { 3378 - if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 3379 - status = qla2x00_setup_chip(ha); 3380 - goto done; 3381 - } 3382 - 3383 - spin_lock_irqsave(&ha->hardware_lock, flags); 3384 - 3385 - if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) && 3386 - !IS_QLA25XX(ha)) { 3387 - /* 3388 - * Disable SRAM, Instruction RAM and GP RAM 3389 - * parity. 3390 - */ 3391 - WRT_REG_WORD(&reg->hccr, 3392 - (HCCR_ENABLE_PARITY + 0x0)); 3393 - RD_REG_WORD(&reg->hccr); 3394 - } 3395 - 3396 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3397 - 3365 + if (!(status = ha->isp_ops->chip_diag(ha))) 3398 3366 status = qla2x00_setup_chip(ha); 3399 - 3400 - spin_lock_irqsave(&ha->hardware_lock, flags); 3401 - 3402 - if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) && 3403 - !IS_QLA25XX(ha)) { 3404 - /* Enable proper parity */ 3405 - if (IS_QLA2300(ha)) 3406 - /* SRAM parity */ 3407 - WRT_REG_WORD(&reg->hccr, 3408 - (HCCR_ENABLE_PARITY + 0x1)); 3409 - else 3410 - /* 3411 - * SRAM, Instruction RAM and GP RAM 3412 - * parity. 3413 - */ 3414 - WRT_REG_WORD(&reg->hccr, 3415 - (HCCR_ENABLE_PARITY + 0x7)); 3416 - RD_REG_WORD(&reg->hccr); 3417 - } 3418 - 3419 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 3420 - } 3421 3367 } 3422 3368 3423 - done: 3424 3369 if (!status && !(status = qla2x00_init_rings(ha))) { 3425 3370 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3426 3371 if (!(status = qla2x00_fw_ready(ha))) {
+7
drivers/scsi/qla2xxx/qla_inline.h
··· 119 119 qla2x00_get_firmware_state(ha, &fw_state); 120 120 } 121 121 122 + static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *); 123 + static __inline__ scsi_qla_host_t * 124 + to_qla_parent(scsi_qla_host_t *ha) 125 + { 126 + return ha->parent ? ha->parent : ha; 127 + } 128 + 122 129 /** 123 130 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. 124 131 * @ha: HA context
+22 -5
drivers/scsi/qla2xxx/qla_isr.c
··· 1815 1815 qla2x00_request_irqs(scsi_qla_host_t *ha) 1816 1816 { 1817 1817 int ret; 1818 + device_reg_t __iomem *reg = ha->iobase; 1819 + unsigned long flags; 1818 1820 1819 1821 /* If possible, enable MSI-X. */ 1820 1822 if (!IS_QLA2432(ha) && !IS_QLA2532(ha)) ··· 1848 1846 DEBUG2(qla_printk(KERN_INFO, ha, 1849 1847 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1850 1848 ha->fw_attributes)); 1851 - return ret; 1849 + goto clear_risc_ints; 1852 1850 } 1853 1851 qla_printk(KERN_WARNING, ha, 1854 1852 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); ··· 1866 1864 1867 1865 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1868 1866 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1869 - if (!ret) { 1870 - ha->flags.inta_enabled = 1; 1871 - ha->host->irq = ha->pdev->irq; 1872 - } else { 1867 + if (ret) { 1873 1868 qla_printk(KERN_WARNING, ha, 1874 1869 "Failed to reserve interrupt %d already in use.\n", 1875 1870 ha->pdev->irq); 1871 + goto fail; 1876 1872 } 1873 + ha->flags.inta_enabled = 1; 1874 + ha->host->irq = ha->pdev->irq; 1875 + clear_risc_ints: 1877 1876 1877 + ha->isp_ops->disable_intrs(ha); 1878 + spin_lock_irqsave(&ha->hardware_lock, flags); 1879 + if (IS_FWI2_CAPABLE(ha)) { 1880 + WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 1881 + WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); 1882 + } else { 1883 + WRT_REG_WORD(&reg->isp.semaphore, 0); 1884 + WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT); 1885 + WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); 1886 + } 1887 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1888 + ha->isp_ops->enable_intrs(ha); 1889 + 1890 + fail: 1878 1891 return ret; 1879 1892 } 1880 1893
+1 -1
drivers/scsi/qla2xxx/qla_mbx.c
··· 980 980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 981 981 ha->host_no)); 982 982 983 - if (ha->fw_attributes & BIT_2) 983 + if (ha->flags.npiv_supported) 984 984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 985 985 else 986 986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+134 -258
drivers/scsi/qla2xxx/qla_os.c
··· 204 204 205 205 static void qla2x00_rst_aen(scsi_qla_host_t *); 206 206 207 - static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 207 + static int qla2x00_mem_alloc(scsi_qla_host_t *); 208 208 static void qla2x00_mem_free(scsi_qla_host_t *ha); 209 - static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha); 210 - static void qla2x00_free_sp_pool(scsi_qla_host_t *ha); 211 209 static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 212 210 213 211 /* -------------------------------------------------------------------------- */ ··· 1115 1117 return ha->isp_ops->abort_target(reset_fcport); 1116 1118 } 1117 1119 1120 + void 1121 + qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1122 + { 1123 + int cnt; 1124 + unsigned long flags; 1125 + srb_t *sp; 1126 + 1127 + spin_lock_irqsave(&ha->hardware_lock, flags); 1128 + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1129 + sp = ha->outstanding_cmds[cnt]; 1130 + if (sp) { 1131 + ha->outstanding_cmds[cnt] = NULL; 1132 + sp->flags = 0; 1133 + sp->cmd->result = res; 1134 + sp->cmd->host_scribble = (unsigned char *)NULL; 1135 + qla2x00_sp_compl(ha, sp); 1136 + } 1137 + } 1138 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 1139 + } 1140 + 1118 1141 static int 1119 1142 qla2xxx_slave_alloc(struct scsi_device *sdev) 1120 1143 { ··· 1576 1557 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1577 1558 { 1578 1559 int ret = -ENODEV; 1579 - device_reg_t __iomem *reg; 1580 1560 struct Scsi_Host *host; 1581 1561 scsi_qla_host_t *ha; 1582 - unsigned long flags = 0; 1583 1562 char pci_info[30]; 1584 1563 char fw_str[30]; 1585 1564 struct scsi_host_template *sht; ··· 1625 1608 ha->parent = NULL; 1626 1609 ha->bars = bars; 1627 1610 ha->mem_only = mem_only; 1611 + spin_lock_init(&ha->hardware_lock); 1628 1612 1629 1613 /* Set ISP-type information. */ 1630 1614 qla2x00_set_isp_flags(ha); ··· 1638 1620 qla_printk(KERN_INFO, ha, 1639 1621 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1640 1622 ha->iobase); 1641 - 1642 - spin_lock_init(&ha->hardware_lock); 1643 1623 1644 1624 ha->prev_topology = 0; 1645 1625 ha->init_cb_size = sizeof(init_cb_t); ··· 1767 1751 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1768 1752 ha->host_no, ha)); 1769 1753 1770 - ha->isp_ops->disable_intrs(ha); 1771 - 1772 - spin_lock_irqsave(&ha->hardware_lock, flags); 1773 - reg = ha->iobase; 1774 - if (IS_FWI2_CAPABLE(ha)) { 1775 - WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 1776 - WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); 1777 - } else { 1778 - WRT_REG_WORD(&reg->isp.semaphore, 0); 1779 - WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT); 1780 - WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); 1781 - 1782 - /* Enable proper parity */ 1783 - if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1784 - if (IS_QLA2300(ha)) 1785 - /* SRAM parity */ 1786 - WRT_REG_WORD(&reg->isp.hccr, 1787 - (HCCR_ENABLE_PARITY + 0x1)); 1788 - else 1789 - /* SRAM, Instruction RAM and GP RAM parity */ 1790 - WRT_REG_WORD(&reg->isp.hccr, 1791 - (HCCR_ENABLE_PARITY + 0x7)); 1792 - } 1793 - } 1794 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 1795 - 1796 - ha->isp_ops->enable_intrs(ha); 1797 - 1798 1754 pci_set_drvdata(pdev, ha); 1799 1755 1800 1756 ha->flags.init_done = 1; ··· 1836 1848 static void 1837 1849 qla2x00_free_device(scsi_qla_host_t *ha) 1838 1850 { 1851 + qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 1852 + 1839 1853 /* Disable timer */ 1840 1854 if (ha->timer_active) 1841 1855 qla2x00_stop_timer(ha); 1856 + 1857 + ha->flags.online = 0; 1842 1858 1843 1859 /* Kill the kernel thread for this host */ 1844 1860 if (ha->dpc_thread) { ··· 1861 1869 1862 1870 if (ha->eft) 1863 1871 qla2x00_disable_eft_trace(ha); 1864 - 1865 - ha->flags.online = 0; 1866 1872 1867 1873 /* Stop currently executing firmware. */ 1868 1874 qla2x00_try_to_stop_firmware(ha); ··· 2000 2010 * 2001 2011 * Returns: 2002 2012 * 0 = success. 2003 - * 1 = failure. 2013 + * !0 = failure. 2004 2014 */ 2005 - static uint8_t 2015 + static int 2006 2016 qla2x00_mem_alloc(scsi_qla_host_t *ha) 2007 2017 { 2008 2018 char name[16]; 2009 - uint8_t status = 1; 2010 - int retry= 10; 2011 2019 2012 - do { 2013 - /* 2014 - * This will loop only once if everything goes well, else some 2015 - * number of retries will be performed to get around a kernel 2016 - * bug where available mem is not allocated until after a 2017 - * little delay and a retry. 2018 - */ 2019 - ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2020 - (ha->request_q_length + 1) * sizeof(request_t), 2021 - &ha->request_dma, GFP_KERNEL); 2022 - if (ha->request_ring == NULL) { 2023 - qla_printk(KERN_WARNING, ha, 2024 - "Memory Allocation failed - request_ring\n"); 2020 + ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2021 + (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 2022 + GFP_KERNEL); 2023 + if (!ha->request_ring) 2024 + goto fail; 2025 2025 2026 - qla2x00_mem_free(ha); 2027 - msleep(100); 2026 + ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 2027 + (ha->response_q_length + 1) * sizeof(response_t), 2028 + &ha->response_dma, GFP_KERNEL); 2029 + if (!ha->response_ring) 2030 + goto fail_free_request_ring; 2028 2031 2029 - continue; 2030 - } 2032 + ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 2033 + &ha->gid_list_dma, GFP_KERNEL); 2034 + if (!ha->gid_list) 2035 + goto fail_free_response_ring; 2031 2036 2032 - ha->response_ring = dma_alloc_coherent(&ha->pdev->dev, 2033 - (ha->response_q_length + 1) * sizeof(response_t), 2034 - &ha->response_dma, GFP_KERNEL); 2035 - if (ha->response_ring == NULL) { 2036 - qla_printk(KERN_WARNING, ha, 2037 - "Memory Allocation failed - response_ring\n"); 2037 + ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 2038 + &ha->init_cb_dma, GFP_KERNEL); 2039 + if (!ha->init_cb) 2040 + goto fail_free_gid_list; 2038 2041 2039 - qla2x00_mem_free(ha); 2040 - msleep(100); 2042 + snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2043 + ha->host_no); 2044 + ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2045 + DMA_POOL_SIZE, 8, 0); 2046 + if (!ha->s_dma_pool) 2047 + goto fail_free_init_cb; 2041 2048 2042 - continue; 2043 - } 2049 + ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2050 + if (!ha->srb_mempool) 2051 + goto fail_free_s_dma_pool; 2044 2052 2045 - ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 2046 - &ha->gid_list_dma, GFP_KERNEL); 2047 - if (ha->gid_list == NULL) { 2048 - qla_printk(KERN_WARNING, ha, 2049 - "Memory Allocation failed - gid_list\n"); 2053 + /* Get memory for cached NVRAM */ 2054 + ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2055 + if (!ha->nvram) 2056 + goto fail_free_srb_mempool; 2050 2057 2051 - qla2x00_mem_free(ha); 2052 - msleep(100); 2058 + /* Allocate memory for SNS commands */ 2059 + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2060 + /* Get consistent memory allocated for SNS commands */ 2061 + ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2062 + sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2063 + if (!ha->sns_cmd) 2064 + goto fail_free_nvram; 2065 + } else { 2066 + /* Get consistent memory allocated for MS IOCB */ 2067 + ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2068 + &ha->ms_iocb_dma); 2069 + if (!ha->ms_iocb) 2070 + goto fail_free_nvram; 2053 2071 2054 - continue; 2055 - } 2056 - 2057 - /* get consistent memory allocated for init control block */ 2058 - ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, 2059 - ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL); 2060 - if (ha->init_cb == NULL) { 2061 - qla_printk(KERN_WARNING, ha, 2062 - "Memory Allocation failed - init_cb\n"); 2063 - 2064 - qla2x00_mem_free(ha); 2065 - msleep(100); 2066 - 2067 - continue; 2068 - } 2069 - memset(ha->init_cb, 0, ha->init_cb_size); 2070 - 2071 - snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2072 - ha->host_no); 2073 - ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2074 - DMA_POOL_SIZE, 8, 0); 2075 - if (ha->s_dma_pool == NULL) { 2076 - qla_printk(KERN_WARNING, ha, 2077 - "Memory Allocation failed - s_dma_pool\n"); 2078 - 2079 - qla2x00_mem_free(ha); 2080 - msleep(100); 2081 - 2082 - continue; 2083 - } 2084 - 2085 - if (qla2x00_allocate_sp_pool(ha)) { 2086 - qla_printk(KERN_WARNING, ha, 2087 - "Memory Allocation failed - " 2088 - "qla2x00_allocate_sp_pool()\n"); 2089 - 2090 - qla2x00_mem_free(ha); 2091 - msleep(100); 2092 - 2093 - continue; 2094 - } 2095 - 2096 - /* Allocate memory for SNS commands */ 2097 - if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2098 - /* Get consistent memory allocated for SNS commands */ 2099 - ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2100 - sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, 2101 - GFP_KERNEL); 2102 - if (ha->sns_cmd == NULL) { 2103 - /* error */ 2104 - qla_printk(KERN_WARNING, ha, 2105 - "Memory Allocation failed - sns_cmd\n"); 2106 - 2107 - qla2x00_mem_free(ha); 2108 - msleep(100); 2109 - 2110 - continue; 2111 - } 2112 - memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 2113 - } else { 2114 - /* Get consistent memory allocated for MS IOCB */ 2115 - ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2116 - &ha->ms_iocb_dma); 2117 - if (ha->ms_iocb == NULL) { 2118 - /* error */ 2119 - qla_printk(KERN_WARNING, ha, 2120 - "Memory Allocation failed - ms_iocb\n"); 2121 - 2122 - qla2x00_mem_free(ha); 2123 - msleep(100); 2124 - 2125 - continue; 2126 - } 2127 - memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t)); 2128 - 2129 - /* 2130 - * Get consistent memory allocated for CT SNS 2131 - * commands 2132 - */ 2133 - ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2134 - sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, 2135 - GFP_KERNEL); 2136 - if (ha->ct_sns == NULL) { 2137 - /* error */ 2138 - qla_printk(KERN_WARNING, ha, 2139 - "Memory Allocation failed - ct_sns\n"); 2140 - 2141 - qla2x00_mem_free(ha); 2142 - msleep(100); 2143 - 2144 - continue; 2145 - } 2146 - memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 2147 - 2148 - if (IS_FWI2_CAPABLE(ha)) { 2149 - /* 2150 - * Get consistent memory allocated for SFP 2151 - * block. 2152 - */ 2153 - ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, 2154 - GFP_KERNEL, &ha->sfp_data_dma); 2155 - if (ha->sfp_data == NULL) { 2156 - qla_printk(KERN_WARNING, ha, 2157 - "Memory Allocation failed - " 2158 - "sfp_data\n"); 2159 - 2160 - qla2x00_mem_free(ha); 2161 - msleep(100); 2162 - 2163 - continue; 2164 - } 2165 - memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 2166 - } 2167 - } 2168 - 2169 - /* Get memory for cached NVRAM */ 2170 - ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2171 - if (ha->nvram == NULL) { 2172 - /* error */ 2173 - qla_printk(KERN_WARNING, ha, 2174 - "Memory Allocation failed - nvram cache\n"); 2175 - 2176 - qla2x00_mem_free(ha); 2177 - msleep(100); 2178 - 2179 - continue; 2180 - } 2181 - 2182 - /* Done all allocations without any error. */ 2183 - status = 0; 2184 - 2185 - } while (retry-- && status != 0); 2186 - 2187 - if (status) { 2188 - printk(KERN_WARNING 2189 - "%s(): **** FAILED ****\n", __func__); 2072 + /* Get consistent memory allocated for CT SNS commands */ 2073 + ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2074 + sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2075 + if (!ha->ct_sns) 2076 + goto fail_free_ms_iocb; 2190 2077 } 2191 2078 2192 - return(status); 2079 + return 0; 2080 + 2081 + fail_free_ms_iocb: 2082 + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2083 + ha->ms_iocb = NULL; 2084 + ha->ms_iocb_dma = 0; 2085 + fail_free_nvram: 2086 + kfree(ha->nvram); 2087 + ha->nvram = NULL; 2088 + fail_free_srb_mempool: 2089 + mempool_destroy(ha->srb_mempool); 2090 + ha->srb_mempool = NULL; 2091 + fail_free_s_dma_pool: 2092 + dma_pool_destroy(ha->s_dma_pool); 2093 + ha->s_dma_pool = NULL; 2094 + fail_free_init_cb: 2095 + dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 2096 + ha->init_cb_dma); 2097 + ha->init_cb = NULL; 2098 + ha->init_cb_dma = 0; 2099 + fail_free_gid_list: 2100 + dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2101 + ha->gid_list_dma); 2102 + ha->gid_list = NULL; 2103 + ha->gid_list_dma = 0; 2104 + fail_free_response_ring: 2105 + dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2106 + sizeof(response_t), ha->response_ring, ha->response_dma); 2107 + ha->response_ring = NULL; 2108 + ha->response_dma = 0; 2109 + fail_free_request_ring: 2110 + dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) * 2111 + sizeof(request_t), ha->request_ring, ha->request_dma); 2112 + ha->request_ring = NULL; 2113 + ha->request_dma = 0; 2114 + fail: 2115 + return -ENOMEM; 2193 2116 } 2194 2117 2195 2118 /* ··· 2118 2215 struct list_head *fcpl, *fcptemp; 2119 2216 fc_port_t *fcport; 2120 2217 2121 - if (ha == NULL) { 2122 - /* error */ 2123 - DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__)); 2124 - return; 2125 - } 2126 - 2127 - /* free sp pool */ 2128 - qla2x00_free_sp_pool(ha); 2218 + if (ha->srb_mempool) 2219 + mempool_destroy(ha->srb_mempool); 2129 2220 2130 2221 if (ha->fce) 2131 2222 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ··· 2167 2270 (ha->request_q_length + 1) * sizeof(request_t), 2168 2271 ha->request_ring, ha->request_dma); 2169 2272 2273 + ha->srb_mempool = NULL; 2170 2274 ha->eft = NULL; 2171 2275 ha->eft_dma = 0; 2172 2276 ha->sns_cmd = NULL; ··· 2206 2308 kfree(ha->nvram); 2207 2309 } 2208 2310 2209 - /* 2210 - * qla2x00_allocate_sp_pool 2211 - * This routine is called during initialization to allocate 2212 - * memory for local srb_t. 2213 - * 2214 - * Input: 2215 - * ha = adapter block pointer. 2216 - * 2217 - * Context: 2218 - * Kernel context. 2219 - */ 2220 - static int 2221 - qla2x00_allocate_sp_pool(scsi_qla_host_t *ha) 2222 - { 2223 - int rval; 2224 - 2225 - rval = QLA_SUCCESS; 2226 - ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2227 - if (ha->srb_mempool == NULL) { 2228 - qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); 2229 - rval = QLA_FUNCTION_FAILED; 2230 - } 2231 - return (rval); 2232 - } 2233 - 2234 - /* 2235 - * This routine frees all adapter allocated memory. 2236 - * 2237 - */ 2238 - static void 2239 - qla2x00_free_sp_pool( scsi_qla_host_t *ha) 2240 - { 2241 - if (ha->srb_mempool) { 2242 - mempool_destroy(ha->srb_mempool); 2243 - ha->srb_mempool = NULL; 2244 - } 2245 - } 2246 - 2247 2311 /************************************************************************** 2248 2312 * qla2x00_do_dpc 2249 2313 * This kernel thread is a task that is schedule by the interrupt handler ··· 2227 2367 fc_port_t *fcport; 2228 2368 uint8_t status; 2229 2369 uint16_t next_loopid; 2370 + struct scsi_qla_host *vha; 2371 + int i; 2372 + 2230 2373 2231 2374 ha = (scsi_qla_host_t *)data; 2232 2375 ··· 2272 2409 } 2273 2410 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2274 2411 } 2412 + 2413 + for_each_mapped_vp_idx(ha, i) { 2414 + list_for_each_entry(vha, &ha->vp_list, 2415 + vp_list) { 2416 + if (i == vha->vp_idx) { 2417 + set_bit(ISP_ABORT_NEEDED, 2418 + &vha->dpc_flags); 2419 + break; 2420 + } 2421 + } 2422 + } 2423 + 2275 2424 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2276 2425 ha->host_no)); 2277 2426 } ··· 2904 3029 MODULE_FIRMWARE(FW_FILE_ISP2300); 2905 3030 MODULE_FIRMWARE(FW_FILE_ISP2322); 2906 3031 MODULE_FIRMWARE(FW_FILE_ISP24XX); 3032 + MODULE_FIRMWARE(FW_FILE_ISP25XX);
+15 -21
drivers/scsi/qla2xxx/qla_sup.c
··· 893 893 } 894 894 } 895 895 896 + #define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) 897 + 896 898 void 897 899 qla2x00_beacon_blink(struct scsi_qla_host *ha) 898 900 { ··· 904 902 unsigned long flags; 905 903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 906 904 907 - if (ha->pio_address) 908 - reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 909 - 910 905 spin_lock_irqsave(&ha->hardware_lock, flags); 911 906 912 907 /* Save the Original GPIOE. */ 913 908 if (ha->pio_address) { 914 - gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 915 - gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 909 + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); 910 + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); 916 911 } else { 917 912 gpio_enable = RD_REG_WORD(&reg->gpioe); 918 913 gpio_data = RD_REG_WORD(&reg->gpiod); ··· 919 920 gpio_enable |= GPIO_LED_MASK; 920 921 921 922 if (ha->pio_address) { 922 - WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 923 + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); 923 924 } else { 924 925 WRT_REG_WORD(&reg->gpioe, gpio_enable); 925 926 RD_REG_WORD(&reg->gpioe); ··· 935 936 936 937 /* Set the modified gpio_data values */ 937 938 if (ha->pio_address) { 938 - WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 939 + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); 939 940 } else { 940 941 WRT_REG_WORD(&reg->gpiod, gpio_data); 941 942 RD_REG_WORD(&reg->gpiod); ··· 961 962 return QLA_FUNCTION_FAILED; 962 963 } 963 964 964 - if (ha->pio_address) 965 - reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 966 - 967 965 /* Turn off LEDs. */ 968 966 spin_lock_irqsave(&ha->hardware_lock, flags); 969 967 if (ha->pio_address) { 970 - gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 971 - gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 968 + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); 969 + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); 972 970 } else { 973 971 gpio_enable = RD_REG_WORD(&reg->gpioe); 974 972 gpio_data = RD_REG_WORD(&reg->gpiod); ··· 974 978 975 979 /* Set the modified gpio_enable values. */ 976 980 if (ha->pio_address) { 977 - WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 981 + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); 978 982 } else { 979 983 WRT_REG_WORD(&reg->gpioe, gpio_enable); 980 984 RD_REG_WORD(&reg->gpioe); ··· 983 987 /* Clear out previously set LED colour. */ 984 988 gpio_data &= ~GPIO_LED_MASK; 985 989 if (ha->pio_address) { 986 - WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 990 + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); 987 991 } else { 988 992 WRT_REG_WORD(&reg->gpiod, gpio_data); 989 993 RD_REG_WORD(&reg->gpiod); ··· 1240 1244 if (ha->pio_address) { 1241 1245 uint16_t data2; 1242 1246 1243 - reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1244 - WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1247 + WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); 1245 1248 do { 1246 - data = RD_REG_WORD_PIO(&reg->flash_data); 1249 + data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); 1247 1250 barrier(); 1248 1251 cpu_relax(); 1249 - data2 = RD_REG_WORD_PIO(&reg->flash_data); 1252 + data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); 1250 1253 } while (data != data2); 1251 1254 } else { 1252 1255 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); ··· 1299 1304 1300 1305 /* Always perform IO mapped accesses to the FLASH registers. */ 1301 1306 if (ha->pio_address) { 1302 - reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1303 - WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1304 - WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data); 1307 + WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); 1308 + WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data); 1305 1309 } else { 1306 1310 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1307 1311 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.02.00-k7" 10 + #define QLA2XXX_VERSION "8.02.00-k8" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 2
+1
drivers/scsi/qla4xxx/ql4_init.c
··· 1306 1306 atomic_set(&ddb_entry->relogin_timer, 0); 1307 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1308 1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); 1309 + iscsi_unblock_session(ddb_entry->sess); 1309 1310 iscsi_session_event(ddb_entry->sess, 1310 1311 ISCSI_KEVENT_CREATE_SESSION); 1311 1312 /*
+30 -45
drivers/scsi/qla4xxx/ql4_os.c
··· 63 63 enum iscsi_param param, char *buf); 64 64 static int qla4xxx_host_get_param(struct Scsi_Host *shost, 65 65 enum iscsi_host_param param, char *buf); 66 - static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag); 67 - static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 68 66 static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 69 67 70 68 /* ··· 89 91 .slave_alloc = qla4xxx_slave_alloc, 90 92 .slave_destroy = qla4xxx_slave_destroy, 91 93 94 + .scan_finished = iscsi_scan_finished, 95 + 92 96 .this_id = -1, 93 97 .cmd_per_lun = 3, 94 98 .use_clustering = ENABLE_CLUSTERING, ··· 116 116 .get_conn_param = qla4xxx_conn_get_param, 117 117 .get_session_param = qla4xxx_sess_get_param, 118 118 .get_host_param = qla4xxx_host_get_param, 119 - .start_conn = qla4xxx_conn_start, 120 - .stop_conn = qla4xxx_conn_stop, 121 119 .session_recovery_timedout = qla4xxx_recovery_timedout, 122 120 }; 123 121 ··· 126 128 struct ddb_entry *ddb_entry = session->dd_data; 127 129 struct scsi_qla_host *ha = ddb_entry->ha; 128 130 129 - DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) " 130 - "secs exhausted, marking device DEAD.\n", ha->host_no, 131 - __func__, ddb_entry->fw_ddb_index, 132 - ha->port_down_retry_count)); 131 + if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 132 + atomic_set(&ddb_entry->state, DDB_STATE_DEAD); 133 133 134 - atomic_set(&ddb_entry->state, DDB_STATE_DEAD); 134 + DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count " 135 + "of (%d) secs exhausted, marking device DEAD.\n", 136 + ha->host_no, __func__, ddb_entry->fw_ddb_index, 137 + ha->port_down_retry_count)); 135 138 136 - DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = " 137 - "0x%lx\n", ha->host_no, __func__, ha->dpc_flags)); 138 - queue_work(ha->dpc_thread, &ha->dpc_work); 139 - } 140 - 141 - static int qla4xxx_conn_start(struct iscsi_cls_conn *conn) 142 - { 143 - struct iscsi_cls_session *session; 144 - struct ddb_entry *ddb_entry; 145 - 146 - session = iscsi_dev_to_session(conn->dev.parent); 147 - ddb_entry = session->dd_data; 148 - 149 - DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n", 150 - ddb_entry->ha->host_no, __func__, 151 - ddb_entry->fw_ddb_index)); 152 - iscsi_unblock_session(session); 153 - return 0; 154 - } 155 - 156 - static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag) 157 - { 158 - struct iscsi_cls_session *session; 159 - struct ddb_entry *ddb_entry; 160 - 161 - session = iscsi_dev_to_session(conn->dev.parent); 162 - ddb_entry = session->dd_data; 163 - 164 - DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n", 165 - ddb_entry->ha->host_no, __func__, 166 - ddb_entry->fw_ddb_index)); 167 - if (flag == STOP_CONN_RECOVER) 168 - iscsi_block_session(session); 169 - else 170 - printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag); 139 + DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc " 140 + "flags = 0x%lx\n", 141 + ha->host_no, __func__, ha->dpc_flags)); 142 + queue_work(ha->dpc_thread, &ha->dpc_work); 143 + } 171 144 } 172 145 173 146 static int qla4xxx_host_get_param(struct Scsi_Host *shost, ··· 277 308 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 278 309 return -ENOMEM; 279 310 } 311 + 312 + /* finally ready to go */ 313 + iscsi_unblock_session(ddb_entry->sess); 280 314 return 0; 281 315 } 282 316 ··· 336 364 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n", 337 365 ha->host_no, ddb_entry->bus, ddb_entry->target, 338 366 ddb_entry->fw_ddb_index)); 367 + iscsi_block_session(ddb_entry->sess); 339 368 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); 340 369 } 341 370 ··· 403 430 { 404 431 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 405 432 struct ddb_entry *ddb_entry = cmd->device->hostdata; 433 + struct iscsi_cls_session *sess = ddb_entry->sess; 406 434 struct srb *srb; 407 435 int rval; 436 + 437 + if (!sess) { 438 + cmd->result = DID_IMM_RETRY << 16; 439 + goto qc_fail_command; 440 + } 441 + 442 + rval = iscsi_session_chkready(sess); 443 + if (rval) { 444 + cmd->result = rval; 445 + goto qc_fail_command; 446 + } 408 447 409 448 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 410 449 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) { ··· 1308 1323 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 1309 1324 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 1310 1325 ha->patch_number, ha->build_number); 1311 - 1326 + scsi_scan_host(host); 1312 1327 return 0; 1313 1328 1314 1329 remove_host:
+3 -2
drivers/scsi/scsi.c
··· 969 969 EXPORT_SYMBOL(starget_for_each_device); 970 970 971 971 /** 972 - * __starget_for_each_device - helper to walk all devices of a target 973 - * (UNLOCKED) 972 + * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) 974 973 * @starget: target whose devices we want to iterate over. 974 + * @data: parameter for callback @fn() 975 + * @fn: callback function that is invoked for each device 975 976 * 976 977 * This traverses over each device of @starget. It does _not_ 977 978 * take a reference on the scsi_device, so the whole loop must be
-1
drivers/scsi/scsi_lib.c
··· 301 301 page = sg_page(sg); 302 302 off = sg->offset; 303 303 len = sg->length; 304 - data_len += len; 305 304 306 305 while (len > 0 && data_len > 0) { 307 306 /*
+199 -39
drivers/scsi/scsi_transport_iscsi.c
··· 30 30 #include <scsi/scsi_transport_iscsi.h> 31 31 #include <scsi/iscsi_if.h> 32 32 33 - #define ISCSI_SESSION_ATTRS 18 34 - #define ISCSI_CONN_ATTRS 11 33 + #define ISCSI_SESSION_ATTRS 19 34 + #define ISCSI_CONN_ATTRS 13 35 35 #define ISCSI_HOST_ATTRS 4 36 - #define ISCSI_TRANSPORT_VERSION "2.0-867" 36 + #define ISCSI_TRANSPORT_VERSION "2.0-868" 37 37 38 38 struct iscsi_internal { 39 39 int daemon_pid; ··· 127 127 memset(ihost, 0, sizeof(*ihost)); 128 128 INIT_LIST_HEAD(&ihost->sessions); 129 129 mutex_init(&ihost->mutex); 130 + atomic_set(&ihost->nr_scans, 0); 130 131 131 - snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d", 132 + snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d", 132 133 shost->host_no); 133 - ihost->unbind_workq = create_singlethread_workqueue( 134 - ihost->unbind_workq_name); 135 - if (!ihost->unbind_workq) 134 + ihost->scan_workq = create_singlethread_workqueue( 135 + ihost->scan_workq_name); 136 + if (!ihost->scan_workq) 136 137 return -ENOMEM; 137 138 return 0; 138 139 } ··· 144 143 struct Scsi_Host *shost = dev_to_shost(dev); 145 144 struct iscsi_host *ihost = shost->shost_data; 146 145 147 - destroy_workqueue(ihost->unbind_workq); 146 + destroy_workqueue(ihost->scan_workq); 148 147 return 0; 149 148 } 150 149 ··· 222 221 * The following functions can be used by LLDs that allocate 223 222 * their own scsi_hosts or by software iscsi LLDs 224 223 */ 224 + static struct { 225 + int value; 226 + char *name; 227 + } iscsi_session_state_names[] = { 228 + { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" }, 229 + { ISCSI_SESSION_FAILED, "FAILED" }, 230 + { ISCSI_SESSION_FREE, "FREE" }, 231 + }; 232 + 233 + const char *iscsi_session_state_name(int state) 234 + { 235 + int i; 236 + char *name = NULL; 237 + 238 + for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) { 239 + if (iscsi_session_state_names[i].value == state) { 240 + name = iscsi_session_state_names[i].name; 241 + break; 242 + } 243 + } 244 + return name; 245 + } 246 + 247 + int iscsi_session_chkready(struct iscsi_cls_session *session) 248 + { 249 + unsigned long flags; 250 + int err; 251 + 252 + spin_lock_irqsave(&session->lock, flags); 253 + switch (session->state) { 254 + case ISCSI_SESSION_LOGGED_IN: 255 + err = 0; 256 + break; 257 + case ISCSI_SESSION_FAILED: 258 + err = DID_IMM_RETRY << 16; 259 + break; 260 + case ISCSI_SESSION_FREE: 261 + err = DID_NO_CONNECT << 16; 262 + break; 263 + default: 264 + err = DID_NO_CONNECT << 16; 265 + break; 266 + } 267 + spin_unlock_irqrestore(&session->lock, flags); 268 + return err; 269 + } 270 + EXPORT_SYMBOL_GPL(iscsi_session_chkready); 271 + 225 272 static void iscsi_session_release(struct device *dev) 226 273 { 227 274 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); ··· 284 235 { 285 236 return dev->release == iscsi_session_release; 286 237 } 238 + 239 + /** 240 + * iscsi_scan_finished - helper to report when running scans are done 241 + * @shost: scsi host 242 + * @time: scan run time 243 + * 244 + * This function can be used by drives like qla4xxx to report to the scsi 245 + * layer when the scans it kicked off at module load time are done. 246 + */ 247 + int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time) 248 + { 249 + struct iscsi_host *ihost = shost->shost_data; 250 + /* 251 + * qla4xxx will have kicked off some session unblocks before calling 252 + * scsi_scan_host, so just wait for them to complete. 253 + */ 254 + return !atomic_read(&ihost->nr_scans); 255 + } 256 + EXPORT_SYMBOL_GPL(iscsi_scan_finished); 287 257 288 258 static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, 289 259 uint id, uint lun) ··· 322 254 return 0; 323 255 } 324 256 257 + static void iscsi_scan_session(struct work_struct *work) 258 + { 259 + struct iscsi_cls_session *session = 260 + container_of(work, struct iscsi_cls_session, scan_work); 261 + struct Scsi_Host *shost = iscsi_session_to_shost(session); 262 + struct iscsi_host *ihost = shost->shost_data; 263 + unsigned long flags; 264 + 265 + spin_lock_irqsave(&session->lock, flags); 266 + if (session->state != ISCSI_SESSION_LOGGED_IN) { 267 + spin_unlock_irqrestore(&session->lock, flags); 268 + goto done; 269 + } 270 + spin_unlock_irqrestore(&session->lock, flags); 271 + 272 + scsi_scan_target(&session->dev, 0, session->target_id, 273 + SCAN_WILD_CARD, 1); 274 + done: 275 + atomic_dec(&ihost->nr_scans); 276 + } 277 + 325 278 static void session_recovery_timedout(struct work_struct *work) 326 279 { 327 280 struct iscsi_cls_session *session = 328 281 container_of(work, struct iscsi_cls_session, 329 282 recovery_work.work); 283 + unsigned long flags; 330 284 331 - dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 332 - "out after %d secs\n", session->recovery_tmo); 285 + iscsi_cls_session_printk(KERN_INFO, session, 286 + "session recovery timed out after %d secs\n", 287 + session->recovery_tmo); 288 + 289 + spin_lock_irqsave(&session->lock, flags); 290 + switch (session->state) { 291 + case ISCSI_SESSION_FAILED: 292 + session->state = ISCSI_SESSION_FREE; 293 + break; 294 + case ISCSI_SESSION_LOGGED_IN: 295 + case ISCSI_SESSION_FREE: 296 + /* we raced with the unblock's flush */ 297 + spin_unlock_irqrestore(&session->lock, flags); 298 + return; 299 + } 300 + spin_unlock_irqrestore(&session->lock, flags); 333 301 334 302 if (session->transport->session_recovery_timedout) 335 303 session->transport->session_recovery_timedout(session); ··· 373 269 scsi_target_unblock(&session->dev); 374 270 } 375 271 376 - void iscsi_unblock_session(struct iscsi_cls_session *session) 272 + void __iscsi_unblock_session(struct iscsi_cls_session *session) 377 273 { 378 274 if (!cancel_delayed_work(&session->recovery_work)) 379 275 flush_workqueue(iscsi_eh_timer_workq); 380 276 scsi_target_unblock(&session->dev); 381 277 } 278 + 279 + void iscsi_unblock_session(struct iscsi_cls_session *session) 280 + { 281 + struct Scsi_Host *shost = iscsi_session_to_shost(session); 282 + struct iscsi_host *ihost = shost->shost_data; 283 + unsigned long flags; 284 + 285 + spin_lock_irqsave(&session->lock, flags); 286 + session->state = ISCSI_SESSION_LOGGED_IN; 287 + spin_unlock_irqrestore(&session->lock, flags); 288 + 289 + __iscsi_unblock_session(session); 290 + /* 291 + * Only do kernel scanning if the driver is properly hooked into 292 + * the async scanning code (drivers like iscsi_tcp do login and 293 + * scanning from userspace). 294 + */ 295 + if (shost->hostt->scan_finished) { 296 + if (queue_work(ihost->scan_workq, &session->scan_work)) 297 + atomic_inc(&ihost->nr_scans); 298 + } 299 + } 382 300 EXPORT_SYMBOL_GPL(iscsi_unblock_session); 383 301 384 302 void iscsi_block_session(struct iscsi_cls_session *session) 385 303 { 304 + unsigned long flags; 305 + 306 + spin_lock_irqsave(&session->lock, flags); 307 + session->state = ISCSI_SESSION_FAILED; 308 + spin_unlock_irqrestore(&session->lock, flags); 309 + 386 310 scsi_target_block(&session->dev); 387 311 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 388 312 session->recovery_tmo * HZ); ··· 443 311 struct Scsi_Host *shost = iscsi_session_to_shost(session); 444 312 struct iscsi_host *ihost = shost->shost_data; 445 313 446 - return queue_work(ihost->unbind_workq, &session->unbind_work); 314 + return queue_work(ihost->scan_workq, &session->unbind_work); 447 315 } 448 316 449 317 struct iscsi_cls_session * ··· 459 327 460 328 session->transport = transport; 461 329 session->recovery_tmo = 120; 330 + session->state = ISCSI_SESSION_FREE; 462 331 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 463 332 INIT_LIST_HEAD(&session->host_list); 464 333 INIT_LIST_HEAD(&session->sess_list); 465 334 INIT_WORK(&session->unbind_work, __iscsi_unbind_session); 335 + INIT_WORK(&session->scan_work, iscsi_scan_session); 336 + spin_lock_init(&session->lock); 466 337 467 338 /* this is released in the dev's release function */ 468 339 scsi_host_get(shost); ··· 493 358 session->sid); 494 359 err = device_add(&session->dev); 495 360 if (err) { 496 - dev_printk(KERN_ERR, &session->dev, "iscsi: could not " 497 - "register session's dev\n"); 361 + iscsi_cls_session_printk(KERN_ERR, session, 362 + "could not register session's dev\n"); 498 363 goto release_host; 499 364 } 500 365 transport_register_device(&session->dev); ··· 579 444 * If we are blocked let commands flow again. The lld or iscsi 580 445 * layer should set up the queuecommand to fail commands. 581 446 */ 582 - iscsi_unblock_session(session); 583 - iscsi_unbind_session(session); 447 + spin_lock_irqsave(&session->lock, flags); 448 + session->state = ISCSI_SESSION_FREE; 449 + spin_unlock_irqrestore(&session->lock, flags); 450 + __iscsi_unblock_session(session); 451 + __iscsi_unbind_session(&session->unbind_work); 452 + 453 + /* flush running scans */ 454 + flush_workqueue(ihost->scan_workq); 584 455 /* 585 456 * If the session dropped while removing devices then we need to make 586 457 * sure it is not blocked 587 458 */ 588 459 if (!cancel_delayed_work(&session->recovery_work)) 589 460 flush_workqueue(iscsi_eh_timer_workq); 590 - flush_workqueue(ihost->unbind_workq); 591 461 592 462 /* hw iscsi may not have removed all connections from session */ 593 463 err = device_for_each_child(&session->dev, NULL, 594 464 iscsi_iter_destroy_conn_fn); 595 465 if (err) 596 - dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete " 597 - "all connections for session. Error %d.\n", err); 466 + iscsi_cls_session_printk(KERN_ERR, session, 467 + "Could not delete all connections " 468 + "for session. Error %d.\n", err); 598 469 599 470 transport_unregister_device(&session->dev); 600 471 device_del(&session->dev); ··· 672 531 conn->dev.release = iscsi_conn_release; 673 532 err = device_register(&conn->dev); 674 533 if (err) { 675 - dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register " 676 - "connection's dev\n"); 534 + iscsi_cls_session_printk(KERN_ERR, session, "could not " 535 + "register connection's dev\n"); 677 536 goto release_parent_ref; 678 537 } 679 538 transport_register_device(&conn->dev); ··· 780 639 skb = alloc_skb(len, GFP_ATOMIC); 781 640 if (!skb) { 782 641 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 783 - dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 784 - "control PDU: OOM\n"); 642 + iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver " 643 + "control PDU: OOM\n"); 785 644 return -ENOMEM; 786 645 } 787 646 ··· 802 661 803 662 void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 804 663 { 664 + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); 805 665 struct nlmsghdr *nlh; 806 666 struct sk_buff *skb; 807 667 struct iscsi_uevent *ev; 808 668 struct iscsi_internal *priv; 809 669 int len = NLMSG_SPACE(sizeof(*ev)); 670 + unsigned long flags; 810 671 811 672 priv = iscsi_if_transport_lookup(conn->transport); 812 673 if (!priv) 813 674 return; 814 675 676 + spin_lock_irqsave(&session->lock, flags); 677 + if (session->state == ISCSI_SESSION_LOGGED_IN) 678 + session->state = ISCSI_SESSION_FAILED; 679 + spin_unlock_irqrestore(&session->lock, flags); 680 + 815 681 skb = alloc_skb(len, GFP_ATOMIC); 816 682 if (!skb) { 817 - dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored " 818 - "conn error (%d)\n", error); 683 + iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " 684 + "conn error (%d)\n", error); 819 685 return; 820 686 } 821 687 ··· 836 688 837 689 iscsi_broadcast_skb(skb, GFP_ATOMIC); 838 690 839 - dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 840 - error); 691 + iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 692 + error); 841 693 } 842 694 EXPORT_SYMBOL_GPL(iscsi_conn_error); 843 695 ··· 892 744 893 745 skbstat = alloc_skb(len, GFP_ATOMIC); 894 746 if (!skbstat) { 895 - dev_printk(KERN_ERR, &conn->dev, "iscsi: can not " 896 - "deliver stats: OOM\n"); 747 + iscsi_cls_conn_printk(KERN_ERR, conn, "can not " 748 + "deliver stats: OOM\n"); 897 749 return -ENOMEM; 898 750 } 899 751 ··· 949 801 950 802 skb = alloc_skb(len, GFP_KERNEL); 951 803 if (!skb) { 952 - dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 953 - "of session event %u\n", event); 804 + iscsi_cls_session_printk(KERN_ERR, session, 805 + "Cannot notify userspace of session " 806 + "event %u\n", event); 954 807 return -ENOMEM; 955 808 } 956 809 ··· 974 825 ev->r.unbind_session.sid = session->sid; 975 826 break; 976 827 default: 977 - dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n", 978 - event); 828 + iscsi_cls_session_printk(KERN_ERR, session, "Invalid event " 829 + "%u.\n", event); 979 830 kfree_skb(skb); 980 831 return -EINVAL; 981 832 } ··· 986 837 */ 987 838 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 988 839 if (rc < 0) 989 - dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 990 - "of session event %u. Check iscsi daemon\n", event); 840 + iscsi_cls_session_printk(KERN_ERR, session, 841 + "Cannot notify userspace of session " 842 + "event %u. Check iscsi daemon\n", 843 + event); 991 844 return rc; 992 845 } 993 846 EXPORT_SYMBOL_GPL(iscsi_session_event); ··· 1022 871 1023 872 session = iscsi_session_lookup(ev->u.c_conn.sid); 1024 873 if (!session) { 1025 - printk(KERN_ERR "iscsi: invalid session %d\n", 874 + printk(KERN_ERR "iscsi: invalid session %d.\n", 1026 875 ev->u.c_conn.sid); 1027 876 return -EINVAL; 1028 877 } 1029 878 1030 879 conn = transport->create_conn(session, ev->u.c_conn.cid); 1031 880 if (!conn) { 1032 - printk(KERN_ERR "iscsi: couldn't create a new " 1033 - "connection for session %d\n", 1034 - session->sid); 881 + iscsi_cls_session_printk(KERN_ERR, session, 882 + "couldn't create a new connection."); 1035 883 return -ENOMEM; 1036 884 } 1037 885 ··· 1396 1246 iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1397 1247 iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1398 1248 1249 + static ssize_t 1250 + show_priv_session_state(struct class_device *cdev, char *buf) 1251 + { 1252 + struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); 1253 + return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); 1254 + } 1255 + static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, 1256 + NULL); 1257 + 1399 1258 #define iscsi_priv_session_attr_show(field, format) \ 1400 1259 static ssize_t \ 1401 1260 show_priv_session_##field(struct class_device *cdev, char *buf) \ ··· 1631 1472 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 1632 1473 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 1633 1474 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1475 + SETUP_PRIV_SESSION_RD_ATTR(state); 1634 1476 1635 1477 BUG_ON(count > ISCSI_SESSION_ATTRS); 1636 1478 priv->session_attrs[count] = NULL;
+16 -18
drivers/scsi/sd.c
··· 929 929 unsigned int xfer_size = scsi_bufflen(SCpnt); 930 930 unsigned int good_bytes = result ? 0 : xfer_size; 931 931 u64 start_lba = SCpnt->request->sector; 932 + u64 end_lba = SCpnt->request->sector + (xfer_size / 512); 932 933 u64 bad_lba; 933 934 struct scsi_sense_hdr sshdr; 934 935 int sense_valid = 0; ··· 968 967 goto out; 969 968 if (xfer_size <= SCpnt->device->sector_size) 970 969 goto out; 971 - switch (SCpnt->device->sector_size) { 972 - case 256: 970 + if (SCpnt->device->sector_size < 512) { 971 + /* only legitimate sector_size here is 256 */ 973 972 start_lba <<= 1; 974 - break; 975 - case 512: 976 - break; 977 - case 1024: 978 - start_lba >>= 1; 979 - break; 980 - case 2048: 981 - start_lba >>= 2; 982 - break; 983 - case 4096: 984 - start_lba >>= 3; 985 - break; 986 - default: 987 - /* Print something here with limiting frequency. */ 988 - goto out; 989 - break; 973 + end_lba <<= 1; 974 + } else { 975 + /* be careful ... don't want any overflows */ 976 + u64 factor = SCpnt->device->sector_size / 512; 977 + do_div(start_lba, factor); 978 + do_div(end_lba, factor); 990 979 } 980 + 981 + if (bad_lba < start_lba || bad_lba >= end_lba) 982 + /* the bad lba was reported incorrectly, we have 983 + * no idea where the error is 984 + */ 985 + goto out; 986 + 991 987 /* This computation should always be done in terms of 992 988 * the resolution of the device's medium. 993 989 */
+689
drivers/scsi/ses.c
··· 1 + /* 2 + * SCSI Enclosure Services 3 + * 4 + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> 5 + * 6 + **----------------------------------------------------------------------------- 7 + ** 8 + ** This program is free software; you can redistribute it and/or 9 + ** modify it under the terms of the GNU General Public License 10 + ** version 2 as published by the Free Software Foundation. 11 + ** 12 + ** This program is distributed in the hope that it will be useful, 13 + ** but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + ** GNU General Public License for more details. 16 + ** 17 + ** You should have received a copy of the GNU General Public License 18 + ** along with this program; if not, write to the Free Software 19 + ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 + ** 21 + **----------------------------------------------------------------------------- 22 + */ 23 + 24 + #include <linux/module.h> 25 + #include <linux/kernel.h> 26 + #include <linux/enclosure.h> 27 + 28 + #include <scsi/scsi.h> 29 + #include <scsi/scsi_cmnd.h> 30 + #include <scsi/scsi_dbg.h> 31 + #include <scsi/scsi_device.h> 32 + #include <scsi/scsi_driver.h> 33 + #include <scsi/scsi_host.h> 34 + 35 + struct ses_device { 36 + char *page1; 37 + char *page2; 38 + char *page10; 39 + short page1_len; 40 + short page2_len; 41 + short page10_len; 42 + }; 43 + 44 + struct ses_component { 45 + u64 addr; 46 + unsigned char *desc; 47 + }; 48 + 49 + static int ses_probe(struct device *dev) 50 + { 51 + struct scsi_device *sdev = to_scsi_device(dev); 52 + int err = -ENODEV; 53 + 54 + if (sdev->type != TYPE_ENCLOSURE) 55 + goto out; 56 + 57 + err = 0; 58 + sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n"); 59 + 60 + out: 61 + return err; 62 + } 63 + 64 + #define SES_TIMEOUT 30 65 + #define SES_RETRIES 3 66 + 67 + static int ses_recv_diag(struct scsi_device *sdev, int page_code, 68 + void *buf, int bufflen) 69 + { 70 + char cmd[] = { 71 + RECEIVE_DIAGNOSTIC, 72 + 1, /* Set PCV bit */ 73 + page_code, 74 + bufflen >> 8, 75 + bufflen & 0xff, 76 + 0 77 + }; 78 + 79 + return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 80 + NULL, SES_TIMEOUT, SES_RETRIES); 81 + } 82 + 83 + static int ses_send_diag(struct scsi_device *sdev, int page_code, 84 + void *buf, int bufflen) 85 + { 86 + u32 result; 87 + 88 + char cmd[] = { 89 + SEND_DIAGNOSTIC, 90 + 0x10, /* Set PF bit */ 91 + 0, 92 + bufflen >> 8, 93 + bufflen & 0xff, 94 + 0 95 + }; 96 + 97 + result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 98 + NULL, SES_TIMEOUT, SES_RETRIES); 99 + if (result) 100 + sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 101 + result); 102 + return result; 103 + } 104 + 105 + static int ses_set_page2_descriptor(struct enclosure_device *edev, 106 + struct enclosure_component *ecomp, 107 + char *desc) 108 + { 109 + int i, j, count = 0, descriptor = ecomp->number; 110 + struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 111 + struct ses_device *ses_dev = edev->scratch; 112 + char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 113 + char *desc_ptr = ses_dev->page2 + 8; 114 + 115 + /* Clear everything */ 116 + memset(desc_ptr, 0, ses_dev->page2_len - 8); 117 + for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { 118 + for (j = 0; j < type_ptr[1]; j++) { 119 + desc_ptr += 4; 120 + if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 121 + type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 122 + continue; 123 + if (count++ == descriptor) { 124 + memcpy(desc_ptr, desc, 4); 125 + /* set select */ 126 + desc_ptr[0] |= 0x80; 127 + /* clear reserved, just in case */ 128 + desc_ptr[0] &= 0xf0; 129 + } 130 + } 131 + } 132 + 133 + return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 134 + } 135 + 136 + static char *ses_get_page2_descriptor(struct enclosure_device *edev, 137 + struct enclosure_component *ecomp) 138 + { 139 + int i, j, count = 0, descriptor = ecomp->number; 140 + struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 141 + struct ses_device *ses_dev = edev->scratch; 142 + char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 143 + char *desc_ptr = ses_dev->page2 + 8; 144 + 145 + ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 146 + 147 + for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) { 148 + for (j = 0; j < type_ptr[1]; j++) { 149 + desc_ptr += 4; 150 + if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 151 + type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 152 + continue; 153 + if (count++ == descriptor) 154 + return desc_ptr; 155 + } 156 + } 157 + return NULL; 158 + } 159 + 160 + static void ses_get_fault(struct enclosure_device *edev, 161 + struct enclosure_component *ecomp) 162 + { 163 + char *desc; 164 + 165 + desc = ses_get_page2_descriptor(edev, ecomp); 166 + ecomp->fault = (desc[3] & 0x60) >> 4; 167 + } 168 + 169 + static int ses_set_fault(struct enclosure_device *edev, 170 + struct enclosure_component *ecomp, 171 + enum enclosure_component_setting val) 172 + { 173 + char desc[4] = {0 }; 174 + 175 + switch (val) { 176 + case ENCLOSURE_SETTING_DISABLED: 177 + /* zero is disabled */ 178 + break; 179 + case ENCLOSURE_SETTING_ENABLED: 180 + desc[2] = 0x02; 181 + break; 182 + default: 183 + /* SES doesn't do the SGPIO blink settings */ 184 + return -EINVAL; 185 + } 186 + 187 + return ses_set_page2_descriptor(edev, ecomp, desc); 188 + } 189 + 190 + static void ses_get_status(struct enclosure_device *edev, 191 + struct enclosure_component *ecomp) 192 + { 193 + char *desc; 194 + 195 + desc = ses_get_page2_descriptor(edev, ecomp); 196 + ecomp->status = (desc[0] & 0x0f); 197 + } 198 + 199 + static void ses_get_locate(struct enclosure_device *edev, 200 + struct enclosure_component *ecomp) 201 + { 202 + char *desc; 203 + 204 + desc = ses_get_page2_descriptor(edev, ecomp); 205 + ecomp->locate = (desc[2] & 0x02) ? 1 : 0; 206 + } 207 + 208 + static int ses_set_locate(struct enclosure_device *edev, 209 + struct enclosure_component *ecomp, 210 + enum enclosure_component_setting val) 211 + { 212 + char desc[4] = {0 }; 213 + 214 + switch (val) { 215 + case ENCLOSURE_SETTING_DISABLED: 216 + /* zero is disabled */ 217 + break; 218 + case ENCLOSURE_SETTING_ENABLED: 219 + desc[2] = 0x02; 220 + break; 221 + default: 222 + /* SES doesn't do the SGPIO blink settings */ 223 + return -EINVAL; 224 + } 225 + return ses_set_page2_descriptor(edev, ecomp, desc); 226 + } 227 + 228 + static int ses_set_active(struct enclosure_device *edev, 229 + struct enclosure_component *ecomp, 230 + enum enclosure_component_setting val) 231 + { 232 + char desc[4] = {0 }; 233 + 234 + switch (val) { 235 + case ENCLOSURE_SETTING_DISABLED: 236 + /* zero is disabled */ 237 + ecomp->active = 0; 238 + break; 239 + case ENCLOSURE_SETTING_ENABLED: 240 + desc[2] = 0x80; 241 + ecomp->active = 1; 242 + break; 243 + default: 244 + /* SES doesn't do the SGPIO blink settings */ 245 + return -EINVAL; 246 + } 247 + return ses_set_page2_descriptor(edev, ecomp, desc); 248 + } 249 + 250 + static struct enclosure_component_callbacks ses_enclosure_callbacks = { 251 + .get_fault = ses_get_fault, 252 + .set_fault = ses_set_fault, 253 + .get_status = ses_get_status, 254 + .get_locate = ses_get_locate, 255 + .set_locate = ses_set_locate, 256 + .set_active = ses_set_active, 257 + }; 258 + 259 + struct ses_host_edev { 260 + struct Scsi_Host *shost; 261 + struct enclosure_device *edev; 262 + }; 263 + 264 + int ses_match_host(struct enclosure_device *edev, void *data) 265 + { 266 + struct ses_host_edev *sed = data; 267 + struct scsi_device *sdev; 268 + 269 + if (!scsi_is_sdev_device(edev->cdev.dev)) 270 + return 0; 271 + 272 + sdev = to_scsi_device(edev->cdev.dev); 273 + 274 + if (sdev->host != sed->shost) 275 + return 0; 276 + 277 + sed->edev = edev; 278 + return 1; 279 + } 280 + 281 + static void ses_process_descriptor(struct enclosure_component *ecomp, 282 + unsigned char *desc) 283 + { 284 + int eip = desc[0] & 0x10; 285 + int invalid = desc[0] & 0x80; 286 + enum scsi_protocol proto = desc[0] & 0x0f; 287 + u64 addr = 0; 288 + struct ses_component *scomp = ecomp->scratch; 289 + unsigned char *d; 290 + 291 + scomp->desc = desc; 292 + 293 + if (invalid) 294 + return; 295 + 296 + switch (proto) { 297 + case SCSI_PROTOCOL_SAS: 298 + if (eip) 299 + d = desc + 8; 300 + else 301 + d = desc + 4; 302 + /* only take the phy0 addr */ 303 + addr = (u64)d[12] << 56 | 304 + (u64)d[13] << 48 | 305 + (u64)d[14] << 40 | 306 + (u64)d[15] << 32 | 307 + (u64)d[16] << 24 | 308 + (u64)d[17] << 16 | 309 + (u64)d[18] << 8 | 310 + (u64)d[19]; 311 + break; 312 + default: 313 + /* FIXME: Need to add more protocols than just SAS */ 314 + break; 315 + } 316 + scomp->addr = addr; 317 + } 318 + 319 + struct efd { 320 + u64 addr; 321 + struct device *dev; 322 + }; 323 + 324 + static int ses_enclosure_find_by_addr(struct enclosure_device *edev, 325 + void *data) 326 + { 327 + struct efd *efd = data; 328 + int i; 329 + struct ses_component *scomp; 330 + 331 + if (!edev->component[0].scratch) 332 + return 0; 333 + 334 + for (i = 0; i < edev->components; i++) { 335 + scomp = edev->component[i].scratch; 336 + if (scomp->addr != efd->addr) 337 + continue; 338 + 339 + enclosure_add_device(edev, i, efd->dev); 340 + return 1; 341 + } 342 + return 0; 343 + } 344 + 345 + #define VPD_INQUIRY_SIZE 512 346 + 347 + static void ses_match_to_enclosure(struct enclosure_device *edev, 348 + struct scsi_device *sdev) 349 + { 350 + unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL); 351 + unsigned char *desc; 352 + int len; 353 + struct efd efd = { 354 + .addr = 0, 355 + }; 356 + unsigned char cmd[] = { 357 + INQUIRY, 358 + 1, 359 + 0x83, 360 + VPD_INQUIRY_SIZE >> 8, 361 + VPD_INQUIRY_SIZE & 0xff, 362 + 0 363 + }; 364 + 365 + if (!buf) 366 + return; 367 + 368 + if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, 369 + VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES)) 370 + goto free; 371 + 372 + len = (buf[2] << 8) + buf[3]; 373 + desc = buf + 4; 374 + while (desc < buf + len) { 375 + enum scsi_protocol proto = desc[0] >> 4; 376 + u8 code_set = desc[0] & 0x0f; 377 + u8 piv = desc[1] & 0x80; 378 + u8 assoc = (desc[1] & 0x30) >> 4; 379 + u8 type = desc[1] & 0x0f; 380 + u8 len = desc[3]; 381 + 382 + if (piv && code_set == 1 && assoc == 1 && code_set == 1 383 + && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) 384 + efd.addr = (u64)desc[4] << 56 | 385 + (u64)desc[5] << 48 | 386 + (u64)desc[6] << 40 | 387 + (u64)desc[7] << 32 | 388 + (u64)desc[8] << 24 | 389 + (u64)desc[9] << 16 | 390 + (u64)desc[10] << 8 | 391 + (u64)desc[11]; 392 + 393 + desc += len + 4; 394 + } 395 + if (!efd.addr) 396 + goto free; 397 + 398 + efd.dev = &sdev->sdev_gendev; 399 + 400 + enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); 401 + free: 402 + kfree(buf); 403 + } 404 + 405 + #define INIT_ALLOC_SIZE 32 406 + 407 + static int ses_intf_add(struct class_device *cdev, 408 + struct class_interface *intf) 409 + { 410 + struct scsi_device *sdev = to_scsi_device(cdev->dev); 411 + struct scsi_device *tmp_sdev; 412 + unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, 413 + *addl_desc_ptr; 414 + struct ses_device *ses_dev; 415 + u32 result; 416 + int i, j, types, len, components = 0; 417 + int err = -ENOMEM; 418 + struct enclosure_device *edev; 419 + struct ses_component *scomp; 420 + 421 + if (!scsi_device_enclosure(sdev)) { 422 + /* not an enclosure, but might be in one */ 423 + edev = enclosure_find(&sdev->host->shost_gendev); 424 + if (edev) { 425 + ses_match_to_enclosure(edev, sdev); 426 + class_device_put(&edev->cdev); 427 + } 428 + return -ENODEV; 429 + } 430 + 431 + /* TYPE_ENCLOSURE prints a message in probe */ 432 + if (sdev->type != TYPE_ENCLOSURE) 433 + sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n"); 434 + 435 + ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL); 436 + hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL); 437 + if (!hdr_buf || !ses_dev) 438 + goto err_init_free; 439 + 440 + result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE); 441 + if (result) 442 + goto recv_failed; 443 + 444 + if (hdr_buf[1] != 0) { 445 + /* FIXME: need subenclosure support; I've just never 446 + * seen a device with subenclosures and it makes the 447 + * traversal routines more complex */ 448 + sdev_printk(KERN_ERR, sdev, 449 + "FIXME driver has no support for subenclosures (%d)\n", 450 + buf[1]); 451 + goto err_free; 452 + } 453 + 454 + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 455 + buf = kzalloc(len, GFP_KERNEL); 456 + if (!buf) 457 + goto err_free; 458 + 459 + ses_dev->page1 = buf; 460 + ses_dev->page1_len = len; 461 + 462 + result = ses_recv_diag(sdev, 1, buf, len); 463 + if (result) 464 + goto recv_failed; 465 + 466 + types = buf[10]; 467 + len = buf[11]; 468 + 469 + type_ptr = buf + 12 + len; 470 + 471 + for (i = 0; i < types; i++, type_ptr += 4) { 472 + if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 473 + type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) 474 + components += type_ptr[1]; 475 + } 476 + 477 + result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); 478 + if (result) 479 + goto recv_failed; 480 + 481 + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 482 + buf = kzalloc(len, GFP_KERNEL); 483 + if (!buf) 484 + goto err_free; 485 + 486 + /* make sure getting page 2 actually works */ 487 + result = ses_recv_diag(sdev, 2, buf, len); 488 + if (result) 489 + goto recv_failed; 490 + ses_dev->page2 = buf; 491 + ses_dev->page2_len = len; 492 + 493 + /* The additional information page --- allows us 494 + * to match up the devices */ 495 + result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); 496 + if (result) 497 + goto no_page10; 498 + 499 + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 500 + buf = kzalloc(len, GFP_KERNEL); 501 + if (!buf) 502 + goto err_free; 503 + 504 + result = ses_recv_diag(sdev, 10, buf, len); 505 + if (result) 506 + goto recv_failed; 507 + ses_dev->page10 = buf; 508 + ses_dev->page10_len = len; 509 + 510 + no_page10: 511 + scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 512 + if (!scomp) 513 + goto err_free; 514 + 515 + edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, 516 + components, &ses_enclosure_callbacks); 517 + if (IS_ERR(edev)) { 518 + err = PTR_ERR(edev); 519 + goto err_free; 520 + } 521 + 522 + edev->scratch = ses_dev; 523 + for (i = 0; i < components; i++) 524 + edev->component[i].scratch = scomp++; 525 + 526 + /* Page 7 for the descriptors is optional */ 527 + buf = NULL; 528 + result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); 529 + if (result) 530 + goto simple_populate; 531 + 532 + len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 533 + /* add 1 for trailing '\0' we'll use */ 534 + buf = kzalloc(len + 1, GFP_KERNEL); 535 + result = ses_recv_diag(sdev, 7, buf, len); 536 + if (result) { 537 + simple_populate: 538 + kfree(buf); 539 + buf = NULL; 540 + desc_ptr = NULL; 541 + addl_desc_ptr = NULL; 542 + } else { 543 + desc_ptr = buf + 8; 544 + len = (desc_ptr[2] << 8) + desc_ptr[3]; 545 + /* skip past overall descriptor */ 546 + desc_ptr += len + 4; 547 + addl_desc_ptr = ses_dev->page10 + 8; 548 + } 549 + type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 550 + components = 0; 551 + for (i = 0; i < types; i++, type_ptr += 4) { 552 + for (j = 0; j < type_ptr[1]; j++) { 553 + char *name = NULL; 554 + struct enclosure_component *ecomp; 555 + 556 + if (desc_ptr) { 557 + len = (desc_ptr[2] << 8) + desc_ptr[3]; 558 + desc_ptr += 4; 559 + /* Add trailing zero - pushes into 560 + * reserved space */ 561 + desc_ptr[len] = '\0'; 562 + name = desc_ptr; 563 + } 564 + if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 565 + type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 566 + continue; 567 + ecomp = enclosure_component_register(edev, 568 + components++, 569 + type_ptr[0], 570 + name); 571 + if (desc_ptr) { 572 + desc_ptr += len; 573 + if (!IS_ERR(ecomp)) 574 + ses_process_descriptor(ecomp, 575 + addl_desc_ptr); 576 + 577 + if (addl_desc_ptr) 578 + addl_desc_ptr += addl_desc_ptr[1] + 2; 579 + } 580 + } 581 + } 582 + kfree(buf); 583 + kfree(hdr_buf); 584 + 585 + /* see if there are any devices matching before 586 + * we found the enclosure */ 587 + shost_for_each_device(tmp_sdev, sdev->host) { 588 + if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) 589 + continue; 590 + ses_match_to_enclosure(edev, tmp_sdev); 591 + } 592 + 593 + return 0; 594 + 595 + recv_failed: 596 + sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n", 597 + result); 598 + err = -ENODEV; 599 + err_free: 600 + kfree(buf); 601 + kfree(ses_dev->page10); 602 + kfree(ses_dev->page2); 603 + kfree(ses_dev->page1); 604 + err_init_free: 605 + kfree(ses_dev); 606 + kfree(hdr_buf); 607 + sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err); 608 + return err; 609 + } 610 + 611 + static int ses_remove(struct device *dev) 612 + { 613 + return 0; 614 + } 615 + 616 + static void ses_intf_remove(struct class_device *cdev, 617 + struct class_interface *intf) 618 + { 619 + struct scsi_device *sdev = to_scsi_device(cdev->dev); 620 + struct enclosure_device *edev; 621 + struct ses_device *ses_dev; 622 + 623 + if (!scsi_device_enclosure(sdev)) 624 + return; 625 + 626 + edev = enclosure_find(cdev->dev); 627 + if (!edev) 628 + return; 629 + 630 + ses_dev = edev->scratch; 631 + edev->scratch = NULL; 632 + 633 + kfree(ses_dev->page1); 634 + kfree(ses_dev->page2); 635 + kfree(ses_dev); 636 + 637 + kfree(edev->component[0].scratch); 638 + 639 + class_device_put(&edev->cdev); 640 + enclosure_unregister(edev); 641 + } 642 + 643 + static struct class_interface ses_interface = { 644 + .add = ses_intf_add, 645 + .remove = ses_intf_remove, 646 + }; 647 + 648 + static struct scsi_driver ses_template = { 649 + .owner = THIS_MODULE, 650 + .gendrv = { 651 + .name = "ses", 652 + .probe = ses_probe, 653 + .remove = ses_remove, 654 + }, 655 + }; 656 + 657 + static int __init ses_init(void) 658 + { 659 + int err; 660 + 661 + err = scsi_register_interface(&ses_interface); 662 + if (err) 663 + return err; 664 + 665 + err = scsi_register_driver(&ses_template.gendrv); 666 + if (err) 667 + goto out_unreg; 668 + 669 + return 0; 670 + 671 + out_unreg: 672 + scsi_unregister_interface(&ses_interface); 673 + return err; 674 + } 675 + 676 + static void __exit ses_exit(void) 677 + { 678 + scsi_unregister_driver(&ses_template.gendrv); 679 + scsi_unregister_interface(&ses_interface); 680 + } 681 + 682 + module_init(ses_init); 683 + module_exit(ses_exit); 684 + 685 + MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE); 686 + 687 + MODULE_AUTHOR("James Bottomley"); 688 + MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver"); 689 + MODULE_LICENSE("GPL v2");
+27 -22
drivers/scsi/sr.c
··· 163 163 mutex_unlock(&sr_ref_mutex); 164 164 } 165 165 166 + /* identical to scsi_test_unit_ready except that it doesn't 167 + * eat the NOT_READY returns for removable media */ 168 + int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr) 169 + { 170 + int retries = MAX_RETRIES; 171 + int the_result; 172 + u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 173 + 174 + /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION 175 + * conditions are gone, or a timeout happens 176 + */ 177 + do { 178 + the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 179 + 0, sshdr, SR_TIMEOUT, 180 + retries--); 181 + 182 + } while (retries > 0 && 183 + (!scsi_status_is_good(the_result) || 184 + (scsi_sense_valid(sshdr) && 185 + sshdr->sense_key == UNIT_ATTENTION))); 186 + return the_result; 187 + } 188 + 166 189 /* 167 190 * This function checks to see if the media has been changed in the 168 191 * CDROM drive. It is possible that we have already sensed a change, ··· 208 185 } 209 186 210 187 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 211 - retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 212 - sshdr); 188 + retval = sr_test_unit_ready(cd->device, sshdr); 213 189 if (retval || (scsi_sense_valid(sshdr) && 214 190 /* 0x3a is medium not present */ 215 191 sshdr->asc == 0x3a)) { ··· 755 733 { 756 734 unsigned char *buffer; 757 735 struct scsi_mode_data data; 758 - unsigned char cmd[MAX_COMMAND_SIZE]; 759 736 struct scsi_sense_hdr sshdr; 760 - unsigned int the_result; 761 - int retries, rc, n; 737 + int rc, n; 762 738 763 739 static const char *loadmech[] = 764 740 { ··· 778 758 return; 779 759 } 780 760 781 - /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION 782 - * conditions are gone, or a timeout happens 783 - */ 784 - retries = 0; 785 - do { 786 - memset((void *)cmd, 0, MAX_COMMAND_SIZE); 787 - cmd[0] = TEST_UNIT_READY; 788 - 789 - the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL, 790 - 0, &sshdr, SR_TIMEOUT, 791 - MAX_RETRIES); 792 - 793 - retries++; 794 - } while (retries < 5 && 795 - (!scsi_status_is_good(the_result) || 796 - (scsi_sense_valid(&sshdr) && 797 - sshdr.sense_key == UNIT_ATTENTION))); 761 + /* eat unit attentions */ 762 + sr_test_unit_ready(cd->device, &sshdr); 798 763 799 764 /* ask for mode page 0x2a */ 800 765 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+1
drivers/scsi/sr.h
··· 61 61 int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); 62 62 63 63 int sr_is_xa(Scsi_CD *); 64 + int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr); 64 65 65 66 /* sr_vendor.c */ 66 67 void sr_vendor_init(Scsi_CD *);
+1 -2
drivers/scsi/sr_ioctl.c
··· 306 306 /* we have no changer support */ 307 307 return -EINVAL; 308 308 } 309 - if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 310 - &sshdr)) 309 + if (0 == sr_test_unit_ready(cd->device, &sshdr)) 311 310 return CDS_DISC_OK; 312 311 313 312 if (!cdrom_get_media_event(cdi, &med)) {
+263 -339
drivers/scsi/sun3x_esp.c
··· 1 - /* sun3x_esp.c: EnhancedScsiProcessor Sun3x SCSI driver code. 1 + /* sun3x_esp.c: ESP front-end for Sun3x systems. 2 2 * 3 - * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 4 - * 5 - * Based on David S. Miller's esp driver 3 + * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 6 4 */ 7 5 8 6 #include <linux/kernel.h> 9 7 #include <linux/types.h> 10 - #include <linux/string.h> 11 - #include <linux/slab.h> 12 - #include <linux/blkdev.h> 13 - #include <linux/proc_fs.h> 14 - #include <linux/stat.h> 15 8 #include <linux/delay.h> 9 + #include <linux/module.h> 10 + #include <linux/init.h> 11 + #include <linux/platform_device.h> 12 + #include <linux/dma-mapping.h> 16 13 #include <linux/interrupt.h> 17 14 18 - #include "scsi.h" 19 - #include <scsi/scsi_host.h> 20 - #include "NCR53C9x.h" 21 - 22 15 #include <asm/sun3x.h> 16 + #include <asm/io.h> 17 + #include <asm/dma.h> 23 18 #include <asm/dvma.h> 24 - #include <asm/irq.h> 25 19 26 - static void dma_barrier(struct NCR_ESP *esp); 27 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); 28 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp); 29 - static void dma_drain(struct NCR_ESP *esp); 30 - static void dma_invalidate(struct NCR_ESP *esp); 31 - static void dma_dump_state(struct NCR_ESP *esp); 32 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length); 33 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length); 34 - static void dma_ints_off(struct NCR_ESP *esp); 35 - static void dma_ints_on(struct NCR_ESP *esp); 36 - static int dma_irq_p(struct NCR_ESP *esp); 37 - static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr); 38 - static int dma_ports_p(struct NCR_ESP *esp); 39 - static void dma_reset(struct NCR_ESP *esp); 40 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write); 41 - static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp); 42 - static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp); 43 - static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp); 44 - static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp); 45 - static void dma_advance_sg (Scsi_Cmnd *sp); 20 + /* DMA controller reg offsets */ 21 + #define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */ 22 + #define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */ 23 + #define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */ 24 + #define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */ 46 25 47 - /* Detecting ESP chips on the machine. This is the simple and easy 48 - * version. 26 + #include <scsi/scsi_host.h> 27 + 28 + #include "esp_scsi.h" 29 + 30 + #define DRV_MODULE_NAME "sun3x_esp" 31 + #define PFX DRV_MODULE_NAME ": " 32 + #define DRV_VERSION "1.000" 33 + #define DRV_MODULE_RELDATE "Nov 1, 2007" 34 + 35 + /* 36 + * m68k always assumes readl/writel operate on little endian 37 + * mmio space; this is wrong at least for Sun3x, so we 38 + * need to workaround this until a proper way is found 49 39 */ 50 - int sun3x_esp_detect(struct scsi_host_template *tpnt) 40 + #if 0 41 + #define dma_read32(REG) \ 42 + readl(esp->dma_regs + (REG)) 43 + #define dma_write32(VAL, REG) \ 44 + writel((VAL), esp->dma_regs + (REG)) 45 + #else 46 + #define dma_read32(REG) \ 47 + *(volatile u32 *)(esp->dma_regs + (REG)) 48 + #define dma_write32(VAL, REG) \ 49 + do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0) 50 + #endif 51 + 52 + static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg) 51 53 { 52 - struct NCR_ESP *esp; 53 - struct ConfigDev *esp_dev; 54 - 55 - esp_dev = 0; 56 - esp = esp_allocate(tpnt, esp_dev, 0); 57 - 58 - /* Do command transfer with DMA */ 59 - esp->do_pio_cmds = 0; 60 - 61 - /* Required functions */ 62 - esp->dma_bytes_sent = &dma_bytes_sent; 63 - esp->dma_can_transfer = &dma_can_transfer; 64 - esp->dma_dump_state = &dma_dump_state; 65 - esp->dma_init_read = &dma_init_read; 66 - esp->dma_init_write = &dma_init_write; 67 - esp->dma_ints_off = &dma_ints_off; 68 - esp->dma_ints_on = &dma_ints_on; 69 - esp->dma_irq_p = &dma_irq_p; 70 - esp->dma_ports_p = &dma_ports_p; 71 - esp->dma_setup = &dma_setup; 72 - 73 - /* Optional functions */ 74 - esp->dma_barrier = &dma_barrier; 75 - esp->dma_invalidate = &dma_invalidate; 76 - esp->dma_drain = &dma_drain; 77 - esp->dma_irq_entry = 0; 78 - esp->dma_irq_exit = 0; 79 - esp->dma_led_on = 0; 80 - esp->dma_led_off = 0; 81 - esp->dma_poll = &dma_poll; 82 - esp->dma_reset = &dma_reset; 83 - 84 - /* virtual DMA functions */ 85 - esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one; 86 - esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl; 87 - esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one; 88 - esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl; 89 - esp->dma_advance_sg = &dma_advance_sg; 90 - 91 - /* SCSI chip speed */ 92 - esp->cfreq = 20000000; 93 - esp->eregs = (struct ESP_regs *)(SUN3X_ESP_BASE); 94 - esp->dregs = (void *)SUN3X_ESP_DMA; 95 - 96 - esp->esp_command = (volatile unsigned char *)dvma_malloc(DVMA_PAGE_SIZE); 97 - esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command); 98 - 99 - esp->irq = 2; 100 - if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, 101 - "SUN3X SCSI", esp->ehost)) { 102 - esp_deallocate(esp); 103 - return 0; 104 - } 105 - 106 - esp->scsi_id = 7; 107 - esp->diff = 0; 108 - 109 - esp_initialize(esp); 110 - 111 - /* for reasons beyond my knowledge (and which should likely be fixed) 112 - sync mode doesn't work on a 3/80 at 5mhz. but it does at 4. */ 113 - esp->sync_defp = 0x3f; 114 - 115 - printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, 116 - esps_in_use); 117 - esps_running = esps_in_use; 118 - return esps_in_use; 54 + writeb(val, esp->regs + (reg * 4UL)); 119 55 } 120 56 121 - static void dma_do_drain(struct NCR_ESP *esp) 57 + static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg) 122 58 { 123 - struct sparc_dma_registers *dregs = 124 - (struct sparc_dma_registers *) esp->dregs; 125 - 126 - int count = 500000; 127 - 128 - while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0)) 129 - udelay(1); 130 - 131 - if(!count) { 132 - printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 133 - } 134 - 135 - dregs->cond_reg |= DMA_FIFO_STDRAIN; 136 - 137 - count = 500000; 138 - 139 - while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0)) 140 - udelay(1); 141 - 142 - if(!count) { 143 - printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 144 - } 145 - 146 - } 147 - 148 - static void dma_barrier(struct NCR_ESP *esp) 149 - { 150 - struct sparc_dma_registers *dregs = 151 - (struct sparc_dma_registers *) esp->dregs; 152 - int count = 500000; 153 - 154 - while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0)) 155 - udelay(1); 156 - 157 - if(!count) { 158 - printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 159 - } 160 - 161 - dregs->cond_reg &= ~(DMA_ENABLE); 59 + return readb(esp->regs + (reg * 4UL)); 162 60 } 163 61 164 - /* This uses various DMA csr fields and the fifo flags count value to 165 - * determine how many bytes were successfully sent/received by the ESP. 166 - */ 167 - static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) 62 + static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf, 63 + size_t sz, int dir) 168 64 { 169 - struct sparc_dma_registers *dregs = 170 - (struct sparc_dma_registers *) esp->dregs; 171 - 172 - int rval = dregs->st_addr - esp->esp_command_dvma; 173 - 174 - return rval - fifo_count; 65 + return dma_map_single(esp->dev, buf, sz, dir); 175 66 } 176 67 177 - static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 68 + static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg, 69 + int num_sg, int dir) 178 70 { 179 - return sp->SCp.this_residual; 71 + return dma_map_sg(esp->dev, sg, num_sg, dir); 180 72 } 181 73 182 - static void dma_drain(struct NCR_ESP *esp) 74 + static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr, 75 + size_t sz, int dir) 183 76 { 184 - struct sparc_dma_registers *dregs = 185 - (struct sparc_dma_registers *) esp->dregs; 186 - int count = 500000; 77 + dma_unmap_single(esp->dev, addr, sz, dir); 78 + } 187 79 188 - if(dregs->cond_reg & DMA_FIFO_ISDRAIN) { 189 - dregs->cond_reg |= DMA_FIFO_STDRAIN; 190 - while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0)) 191 - udelay(1); 192 - if(!count) { 193 - printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 80 + static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, 81 + int num_sg, int dir) 82 + { 83 + dma_unmap_sg(esp->dev, sg, num_sg, dir); 84 + } 85 + 86 + static int sun3x_esp_irq_pending(struct esp *esp) 87 + { 88 + if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) 89 + return 1; 90 + return 0; 91 + } 92 + 93 + static void sun3x_esp_reset_dma(struct esp *esp) 94 + { 95 + u32 val; 96 + 97 + val = dma_read32(DMA_CSR); 98 + dma_write32(val | DMA_RST_SCSI, DMA_CSR); 99 + dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); 100 + 101 + /* Enable interrupts. */ 102 + val = dma_read32(DMA_CSR); 103 + dma_write32(val | DMA_INT_ENAB, DMA_CSR); 104 + } 105 + 106 + static void sun3x_esp_dma_drain(struct esp *esp) 107 + { 108 + u32 csr; 109 + int lim; 110 + 111 + csr = dma_read32(DMA_CSR); 112 + if (!(csr & DMA_FIFO_ISDRAIN)) 113 + return; 114 + 115 + dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); 116 + 117 + lim = 1000; 118 + while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { 119 + if (--lim == 0) { 120 + printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", 121 + esp->host->unique_id); 122 + break; 194 123 } 195 - 196 - } 197 - } 198 - 199 - static void dma_invalidate(struct NCR_ESP *esp) 200 - { 201 - struct sparc_dma_registers *dregs = 202 - (struct sparc_dma_registers *) esp->dregs; 203 - 204 - __u32 tmp; 205 - int count = 500000; 206 - 207 - while(((tmp = dregs->cond_reg) & DMA_PEND_READ) && (--count > 0)) 208 124 udelay(1); 209 - 210 - if(!count) { 211 - printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 212 125 } 213 - 214 - dregs->cond_reg = tmp | DMA_FIFO_INV; 215 - dregs->cond_reg &= ~DMA_FIFO_INV; 216 - 217 126 } 218 127 219 - static void dma_dump_state(struct NCR_ESP *esp) 128 + static void sun3x_esp_dma_invalidate(struct esp *esp) 220 129 { 221 - struct sparc_dma_registers *dregs = 222 - (struct sparc_dma_registers *) esp->dregs; 130 + u32 val; 131 + int lim; 223 132 224 - ESPLOG(("esp%d: dma -- cond_reg<%08lx> addr<%08lx>\n", 225 - esp->esp_id, dregs->cond_reg, dregs->st_addr)); 226 - } 227 - 228 - static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 229 - { 230 - struct sparc_dma_registers *dregs = 231 - (struct sparc_dma_registers *) esp->dregs; 232 - 233 - dregs->st_addr = vaddress; 234 - dregs->cond_reg |= (DMA_ST_WRITE | DMA_ENABLE); 235 - } 236 - 237 - static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 238 - { 239 - struct sparc_dma_registers *dregs = 240 - (struct sparc_dma_registers *) esp->dregs; 241 - 242 - /* Set up the DMA counters */ 243 - 244 - dregs->st_addr = vaddress; 245 - dregs->cond_reg = ((dregs->cond_reg & ~(DMA_ST_WRITE)) | DMA_ENABLE); 246 - } 247 - 248 - static void dma_ints_off(struct NCR_ESP *esp) 249 - { 250 - DMA_INTSOFF((struct sparc_dma_registers *) esp->dregs); 251 - } 252 - 253 - static void dma_ints_on(struct NCR_ESP *esp) 254 - { 255 - DMA_INTSON((struct sparc_dma_registers *) esp->dregs); 256 - } 257 - 258 - static int dma_irq_p(struct NCR_ESP *esp) 259 - { 260 - return DMA_IRQ_P((struct sparc_dma_registers *) esp->dregs); 261 - } 262 - 263 - static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr) 264 - { 265 - int count = 50; 266 - dma_do_drain(esp); 267 - 268 - /* Wait till the first bits settle. */ 269 - while((*(volatile unsigned char *)vaddr == 0xff) && (--count > 0)) 133 + lim = 1000; 134 + while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { 135 + if (--lim == 0) { 136 + printk(KERN_ALERT PFX "esp%d: DMA will not " 137 + "invalidate!\n", esp->host->unique_id); 138 + break; 139 + } 270 140 udelay(1); 271 - 272 - if(!count) { 273 - // printk("%s:%d timeout expire (data %02x)\n", __FILE__, __LINE__, 274 - // esp_read(esp->eregs->esp_fdata)); 275 - //mach_halt(); 276 - vaddr[0] = esp_read(esp->eregs->esp_fdata); 277 - vaddr[1] = esp_read(esp->eregs->esp_fdata); 278 141 } 279 142 280 - } 281 - 282 - static int dma_ports_p(struct NCR_ESP *esp) 283 - { 284 - return (((struct sparc_dma_registers *) esp->dregs)->cond_reg 285 - & DMA_INT_ENAB); 143 + val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); 144 + val |= DMA_FIFO_INV; 145 + dma_write32(val, DMA_CSR); 146 + val &= ~DMA_FIFO_INV; 147 + dma_write32(val, DMA_CSR); 286 148 } 287 149 288 - /* Resetting various pieces of the ESP scsi driver chipset/buses. */ 289 - static void dma_reset(struct NCR_ESP *esp) 150 + static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, 151 + u32 dma_count, int write, u8 cmd) 290 152 { 291 - struct sparc_dma_registers *dregs = 292 - (struct sparc_dma_registers *)esp->dregs; 153 + u32 csr; 293 154 294 - /* Punt the DVMA into a known state. */ 295 - dregs->cond_reg |= DMA_RST_SCSI; 296 - dregs->cond_reg &= ~(DMA_RST_SCSI); 297 - DMA_INTSON(dregs); 155 + BUG_ON(!(cmd & ESP_CMD_DMA)); 156 + 157 + sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 158 + sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 159 + csr = dma_read32(DMA_CSR); 160 + csr |= DMA_ENABLE; 161 + if (write) 162 + csr |= DMA_ST_WRITE; 163 + else 164 + csr &= ~DMA_ST_WRITE; 165 + dma_write32(csr, DMA_CSR); 166 + dma_write32(addr, DMA_ADDR); 167 + 168 + scsi_esp_cmd(esp, cmd); 298 169 } 299 170 300 - static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 171 + static int sun3x_esp_dma_error(struct esp *esp) 301 172 { 302 - struct sparc_dma_registers *dregs = 303 - (struct sparc_dma_registers *) esp->dregs; 304 - unsigned long nreg = dregs->cond_reg; 173 + u32 csr = dma_read32(DMA_CSR); 305 174 306 - // printk("dma_setup %c addr %08x cnt %08x\n", 307 - // write ? 'W' : 'R', addr, count); 175 + if (csr & DMA_HNDL_ERROR) 176 + return 1; 308 177 309 - dma_do_drain(esp); 310 - 311 - if(write) 312 - nreg |= DMA_ST_WRITE; 313 - else { 314 - nreg &= ~(DMA_ST_WRITE); 315 - } 316 - 317 - nreg |= DMA_ENABLE; 318 - dregs->cond_reg = nreg; 319 - dregs->st_addr = addr; 178 + return 0; 320 179 } 321 180 322 - static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 323 - { 324 - sp->SCp.have_data_in = dvma_map((unsigned long)sp->SCp.buffer, 325 - sp->SCp.this_residual); 326 - sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); 327 - } 328 - 329 - static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 330 - { 331 - int sz = sp->SCp.buffers_residual; 332 - struct scatterlist *sg = sp->SCp.buffer; 333 - 334 - while (sz >= 0) { 335 - sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]), 336 - sg[sz].length); 337 - sz--; 338 - } 339 - sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address); 340 - } 341 - 342 - static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 343 - { 344 - dvma_unmap((char *)sp->SCp.have_data_in); 345 - } 346 - 347 - static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 348 - { 349 - int sz = sp->use_sg - 1; 350 - struct scatterlist *sg = (struct scatterlist *)sp->request_buffer; 351 - 352 - while(sz >= 0) { 353 - dvma_unmap((char *)sg[sz].dma_address); 354 - sz--; 355 - } 356 - } 357 - 358 - static void dma_advance_sg (Scsi_Cmnd *sp) 359 - { 360 - sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address); 361 - } 362 - 363 - static int sun3x_esp_release(struct Scsi_Host *instance) 364 - { 365 - /* this code does not support being compiled as a module */ 366 - return 1; 367 - 368 - } 369 - 370 - static struct scsi_host_template driver_template = { 371 - .proc_name = "sun3x_esp", 372 - .proc_info = &esp_proc_info, 373 - .name = "Sun ESP 100/100a/200", 374 - .detect = sun3x_esp_detect, 375 - .release = sun3x_esp_release, 376 - .slave_alloc = esp_slave_alloc, 377 - .slave_destroy = esp_slave_destroy, 378 - .info = esp_info, 379 - .queuecommand = esp_queue, 380 - .eh_abort_handler = esp_abort, 381 - .eh_bus_reset_handler = esp_reset, 382 - .can_queue = 7, 383 - .this_id = 7, 384 - .sg_tablesize = SG_ALL, 385 - .cmd_per_lun = 1, 386 - .use_clustering = DISABLE_CLUSTERING, 181 + static const struct esp_driver_ops sun3x_esp_ops = { 182 + .esp_write8 = sun3x_esp_write8, 183 + .esp_read8 = sun3x_esp_read8, 184 + .map_single = sun3x_esp_map_single, 185 + .map_sg = sun3x_esp_map_sg, 186 + .unmap_single = sun3x_esp_unmap_single, 187 + .unmap_sg = sun3x_esp_unmap_sg, 188 + .irq_pending = sun3x_esp_irq_pending, 189 + .reset_dma = sun3x_esp_reset_dma, 190 + .dma_drain = sun3x_esp_dma_drain, 191 + .dma_invalidate = sun3x_esp_dma_invalidate, 192 + .send_dma_cmd = sun3x_esp_send_dma_cmd, 193 + .dma_error = sun3x_esp_dma_error, 387 194 }; 388 195 196 + static int __devinit esp_sun3x_probe(struct platform_device *dev) 197 + { 198 + struct scsi_host_template *tpnt = &scsi_esp_template; 199 + struct Scsi_Host *host; 200 + struct esp *esp; 201 + struct resource *res; 202 + int err = -ENOMEM; 389 203 390 - #include "scsi_module.c" 204 + host = scsi_host_alloc(tpnt, sizeof(struct esp)); 205 + if (!host) 206 + goto fail; 391 207 208 + host->max_id = 8; 209 + esp = shost_priv(host); 210 + 211 + esp->host = host; 212 + esp->dev = dev; 213 + esp->ops = &sun3x_esp_ops; 214 + 215 + res = platform_get_resource(dev, IORESOURCE_MEM, 0); 216 + if (!res && !res->start) 217 + goto fail_unlink; 218 + 219 + esp->regs = ioremap_nocache(res->start, 0x20); 220 + if (!esp->regs) 221 + goto fail_unmap_regs; 222 + 223 + res = platform_get_resource(dev, IORESOURCE_MEM, 1); 224 + if (!res && !res->start) 225 + goto fail_unmap_regs; 226 + 227 + esp->dma_regs = ioremap_nocache(res->start, 0x10); 228 + 229 + esp->command_block = dma_alloc_coherent(esp->dev, 16, 230 + &esp->command_block_dma, 231 + GFP_KERNEL); 232 + if (!esp->command_block) 233 + goto fail_unmap_regs_dma; 234 + 235 + host->irq = platform_get_irq(dev, 0); 236 + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, 237 + "SUN3X ESP", esp); 238 + if (err < 0) 239 + goto fail_unmap_command_block; 240 + 241 + esp->scsi_id = 7; 242 + esp->host->this_id = esp->scsi_id; 243 + esp->scsi_id_mask = (1 << esp->scsi_id); 244 + esp->cfreq = 20000000; 245 + 246 + dev_set_drvdata(&dev->dev, esp); 247 + 248 + err = scsi_esp_register(esp, &dev->dev); 249 + if (err) 250 + goto fail_free_irq; 251 + 252 + return 0; 253 + 254 + fail_free_irq: 255 + free_irq(host->irq, esp); 256 + fail_unmap_command_block: 257 + dma_free_coherent(esp->dev, 16, 258 + esp->command_block, 259 + esp->command_block_dma); 260 + fail_unmap_regs_dma: 261 + iounmap(esp->dma_regs); 262 + fail_unmap_regs: 263 + iounmap(esp->regs); 264 + fail_unlink: 265 + scsi_host_put(host); 266 + fail: 267 + return err; 268 + } 269 + 270 + static int __devexit esp_sun3x_remove(struct platform_device *dev) 271 + { 272 + struct esp *esp = dev_get_drvdata(&dev->dev); 273 + unsigned int irq = esp->host->irq; 274 + u32 val; 275 + 276 + scsi_esp_unregister(esp); 277 + 278 + /* Disable interrupts. */ 279 + val = dma_read32(DMA_CSR); 280 + dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); 281 + 282 + free_irq(irq, esp); 283 + dma_free_coherent(esp->dev, 16, 284 + esp->command_block, 285 + esp->command_block_dma); 286 + 287 + scsi_host_put(esp->host); 288 + 289 + return 0; 290 + } 291 + 292 + static struct platform_driver esp_sun3x_driver = { 293 + .probe = esp_sun3x_probe, 294 + .remove = __devexit_p(esp_sun3x_remove), 295 + .driver = { 296 + .name = "sun3x_esp", 297 + }, 298 + }; 299 + 300 + static int __init sun3x_esp_init(void) 301 + { 302 + return platform_driver_register(&esp_sun3x_driver); 303 + } 304 + 305 + static void __exit sun3x_esp_exit(void) 306 + { 307 + platform_driver_unregister(&esp_sun3x_driver); 308 + } 309 + 310 + MODULE_DESCRIPTION("Sun3x ESP SCSI driver"); 311 + MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)"); 392 312 MODULE_LICENSE("GPL"); 313 + MODULE_VERSION(DRV_VERSION); 314 + 315 + module_init(sun3x_esp_init); 316 + module_exit(sun3x_esp_exit);
+1 -1
drivers/scsi/sym53c8xx_2/sym_hipd.c
··· 3842 3842 if (cp->startp == cp->phys.head.lastp || 3843 3843 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), 3844 3844 &dp_ofs) < 0) { 3845 - return cp->data_len; 3845 + return cp->data_len - cp->odd_byte_adjustment; 3846 3846 } 3847 3847 3848 3848 /*
+1 -1
drivers/scsi/u14-34f.c
··· 1216 1216 cpp->xdir = DTD_IN; 1217 1217 return; 1218 1218 } 1219 - else if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { 1219 + else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { 1220 1220 cpp->xdir = DTD_OUT; 1221 1221 return; 1222 1222 }
+129
include/linux/enclosure.h
··· 1 + /* 2 + * Enclosure Services 3 + * 4 + * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> 5 + * 6 + **----------------------------------------------------------------------------- 7 + ** 8 + ** This program is free software; you can redistribute it and/or 9 + ** modify it under the terms of the GNU General Public License 10 + ** version 2 as published by the Free Software Foundation. 11 + ** 12 + ** This program is distributed in the hope that it will be useful, 13 + ** but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + ** GNU General Public License for more details. 16 + ** 17 + ** You should have received a copy of the GNU General Public License 18 + ** along with this program; if not, write to the Free Software 19 + ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 + ** 21 + **----------------------------------------------------------------------------- 22 + */ 23 + #ifndef _LINUX_ENCLOSURE_H_ 24 + #define _LINUX_ENCLOSURE_H_ 25 + 26 + #include <linux/device.h> 27 + #include <linux/list.h> 28 + 29 + /* A few generic types ... taken from ses-2 */ 30 + enum enclosure_component_type { 31 + ENCLOSURE_COMPONENT_DEVICE = 0x01, 32 + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 33 + }; 34 + 35 + /* ses-2 common element status */ 36 + enum enclosure_status { 37 + ENCLOSURE_STATUS_UNSUPPORTED = 0, 38 + ENCLOSURE_STATUS_OK, 39 + ENCLOSURE_STATUS_CRITICAL, 40 + ENCLOSURE_STATUS_NON_CRITICAL, 41 + ENCLOSURE_STATUS_UNRECOVERABLE, 42 + ENCLOSURE_STATUS_NOT_INSTALLED, 43 + ENCLOSURE_STATUS_UNKNOWN, 44 + ENCLOSURE_STATUS_UNAVAILABLE, 45 + }; 46 + 47 + /* SFF-8485 activity light settings */ 48 + enum enclosure_component_setting { 49 + ENCLOSURE_SETTING_DISABLED = 0, 50 + ENCLOSURE_SETTING_ENABLED = 1, 51 + ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2, 52 + ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3, 53 + ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6, 54 + ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7, 55 + }; 56 + 57 + struct enclosure_device; 58 + struct enclosure_component; 59 + struct enclosure_component_callbacks { 60 + void (*get_status)(struct enclosure_device *, 61 + struct enclosure_component *); 62 + int (*set_status)(struct enclosure_device *, 63 + struct enclosure_component *, 64 + enum enclosure_status); 65 + void (*get_fault)(struct enclosure_device *, 66 + struct enclosure_component *); 67 + int (*set_fault)(struct enclosure_device *, 68 + struct enclosure_component *, 69 + enum enclosure_component_setting); 70 + void (*get_active)(struct enclosure_device *, 71 + struct enclosure_component *); 72 + int (*set_active)(struct enclosure_device *, 73 + struct enclosure_component *, 74 + enum enclosure_component_setting); 75 + void (*get_locate)(struct enclosure_device *, 76 + struct enclosure_component *); 77 + int (*set_locate)(struct enclosure_device *, 78 + struct enclosure_component *, 79 + enum enclosure_component_setting); 80 + }; 81 + 82 + 83 + struct enclosure_component { 84 + void *scratch; 85 + struct class_device cdev; 86 + enum enclosure_component_type type; 87 + int number; 88 + int fault; 89 + int active; 90 + int locate; 91 + enum enclosure_status status; 92 + }; 93 + 94 + struct enclosure_device { 95 + void *scratch; 96 + struct list_head node; 97 + struct class_device cdev; 98 + struct enclosure_component_callbacks *cb; 99 + int components; 100 + struct enclosure_component component[0]; 101 + }; 102 + 103 + static inline struct enclosure_device * 104 + to_enclosure_device(struct class_device *dev) 105 + { 106 + return container_of(dev, struct enclosure_device, cdev); 107 + } 108 + 109 + static inline struct enclosure_component * 110 + to_enclosure_component(struct class_device *dev) 111 + { 112 + return container_of(dev, struct enclosure_component, cdev); 113 + } 114 + 115 + struct enclosure_device * 116 + enclosure_register(struct device *, const char *, int, 117 + struct enclosure_component_callbacks *); 118 + void enclosure_unregister(struct enclosure_device *); 119 + struct enclosure_component * 120 + enclosure_component_register(struct enclosure_device *, unsigned int, 121 + enum enclosure_component_type, const char *); 122 + int enclosure_add_device(struct enclosure_device *enclosure, int component, 123 + struct device *dev); 124 + int enclosure_remove_device(struct enclosure_device *enclosure, int component); 125 + struct enclosure_device *enclosure_find(struct device *dev); 126 + int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *), 127 + void *data); 128 + 129 + #endif /* _LINUX_ENCLOSURE_H_ */
+2 -2
include/scsi/iscsi_proto.h
··· 45 45 /* initiator tags; opaque for target */ 46 46 typedef uint32_t __bitwise__ itt_t; 47 47 /* below makes sense only for initiator that created this tag */ 48 - #define build_itt(itt, id, age) ((__force itt_t)\ 49 - ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT))) 48 + #define build_itt(itt, age) ((__force itt_t)\ 49 + ((itt) | ((age) << ISCSI_AGE_SHIFT))) 50 50 #define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK) 51 51 #define RESERVED_ITT ((__force itt_t)0xffffffff) 52 52
+26 -4
include/scsi/libiscsi.h
··· 70 70 #define ISCSI_SUSPEND_BIT 1 71 71 72 72 #define ISCSI_ITT_MASK (0xfff) 73 - #define ISCSI_CID_SHIFT 12 74 - #define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT) 75 73 #define ISCSI_AGE_SHIFT 28 76 74 #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT) 77 75 ··· 132 134 { 133 135 return (void*)ctask->hdr + ctask->hdr_len; 134 136 } 137 + 138 + /* Connection's states */ 139 + enum { 140 + ISCSI_CONN_INITIAL_STAGE, 141 + ISCSI_CONN_STARTED, 142 + ISCSI_CONN_STOPPED, 143 + ISCSI_CONN_CLEANUP_WAIT, 144 + }; 135 145 136 146 struct iscsi_conn { 137 147 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ ··· 231 225 struct kfifo *queue; /* FIFO Queue */ 232 226 void **pool; /* Pool of elements */ 233 227 int max; /* Max number of elements */ 228 + }; 229 + 230 + /* Session's states */ 231 + enum { 232 + ISCSI_STATE_FREE = 1, 233 + ISCSI_STATE_LOGGED_IN, 234 + ISCSI_STATE_FAILED, 235 + ISCSI_STATE_TERMINATE, 236 + ISCSI_STATE_IN_RECOVERY, 237 + ISCSI_STATE_RECOVERY_FAILED, 238 + ISCSI_STATE_LOGGING_OUT, 234 239 }; 235 240 236 241 struct iscsi_session { ··· 342 325 #define session_to_cls(_sess) \ 343 326 hostdata_session(_sess->host->hostdata) 344 327 328 + #define iscsi_session_printk(prefix, _sess, fmt, a...) \ 329 + iscsi_cls_session_printk(prefix, \ 330 + (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a) 331 + 345 332 /* 346 333 * connection management 347 334 */ ··· 360 339 extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 361 340 enum iscsi_param param, char *buf); 362 341 342 + #define iscsi_conn_printk(prefix, _c, fmt, a...) \ 343 + iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a) 344 + 363 345 /* 364 346 * pdu and task processing 365 347 */ ··· 373 349 char *, uint32_t); 374 350 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, 375 351 char *, int); 376 - extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, 377 - char *, int); 378 352 extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *, 379 353 uint32_t *); 380 354 extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
+14
include/scsi/scsi.h
··· 235 235 #define TYPE_RBC 0x0e 236 236 #define TYPE_NO_LUN 0x7f 237 237 238 + /* SCSI protocols; these are taken from SPC-3 section 7.5 */ 239 + enum scsi_protocol { 240 + SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */ 241 + SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */ 242 + SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */ 243 + SCSI_PROTOCOL_SBP = 3, /* firewire */ 244 + SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */ 245 + SCSI_PROTOCOL_ISCSI = 5, 246 + SCSI_PROTOCOL_SAS = 6, 247 + SCSI_PROTOCOL_ADT = 7, /* Media Changers */ 248 + SCSI_PROTOCOL_ATA = 8, 249 + SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */ 250 + }; 251 + 238 252 /* Returns a human-readable name for the device */ 239 253 extern const char * scsi_device_type(unsigned type); 240 254
+25 -19
include/scsi/scsi_host.h
··· 280 280 * If the host wants to be called before the scan starts, but 281 281 * after the midlayer has set up ready for the scan, it can fill 282 282 * in this function. 283 + * 284 + * Status: OPTIONAL 283 285 */ 284 286 void (* scan_start)(struct Scsi_Host *); 285 287 286 288 /* 287 - * fill in this function to allow the queue depth of this host 288 - * to be changeable (on a per device basis). returns either 289 + * Fill in this function to allow the queue depth of this host 290 + * to be changeable (on a per device basis). Returns either 289 291 * the current queue depth setting (may be different from what 290 292 * was passed in) or an error. An error should only be 291 293 * returned if the requested depth is legal but the driver was 292 294 * unable to set it. If the requested depth is illegal, the 293 295 * driver should set and return the closest legal queue depth. 294 296 * 297 + * Status: OPTIONAL 295 298 */ 296 299 int (* change_queue_depth)(struct scsi_device *, int); 297 300 298 301 /* 299 - * fill in this function to allow the changing of tag types 302 + * Fill in this function to allow the changing of tag types 300 303 * (this also allows the enabling/disabling of tag command 301 304 * queueing). An error should only be returned if something 302 305 * went wrong in the driver while trying to set the tag type. 303 306 * If the driver doesn't support the requested tag type, then 304 307 * it should set the closest type it does support without 305 308 * returning an error. Returns the actual tag type set. 309 + * 310 + * Status: OPTIONAL 306 311 */ 307 312 int (* change_queue_type)(struct scsi_device *, int); 308 313 309 314 /* 310 - * This function determines the bios parameters for a given 315 + * This function determines the BIOS parameters for a given 311 316 * harddisk. These tend to be numbers that are made up by 312 317 * the host adapter. Parameters: 313 318 * size, device, list (heads, sectors, cylinders) 314 319 * 315 - * Status: OPTIONAL */ 320 + * Status: OPTIONAL 321 + */ 316 322 int (* bios_param)(struct scsi_device *, struct block_device *, 317 323 sector_t, int []); 318 324 ··· 357 351 358 352 /* 359 353 * This determines if we will use a non-interrupt driven 360 - * or an interrupt driven scheme, It is set to the maximum number 354 + * or an interrupt driven scheme. It is set to the maximum number 361 355 * of simultaneous commands a given host adapter will accept. 362 356 */ 363 357 int can_queue; ··· 378 372 unsigned short sg_tablesize; 379 373 380 374 /* 381 - * If the host adapter has limitations beside segment count 375 + * Set this if the host adapter has limitations beside segment count. 382 376 */ 383 377 unsigned short max_sectors; 384 378 385 379 /* 386 - * dma scatter gather segment boundary limit. a segment crossing this 380 + * DMA scatter gather segment boundary limit. A segment crossing this 387 381 * boundary will be split in two. 388 382 */ 389 383 unsigned long dma_boundary; ··· 392 386 * This specifies "machine infinity" for host templates which don't 393 387 * limit the transfer size. Note this limit represents an absolute 394 388 * maximum, and may be over the transfer limits allowed for 395 - * individual devices (e.g. 256 for SCSI-1) 389 + * individual devices (e.g. 256 for SCSI-1). 396 390 */ 397 391 #define SCSI_DEFAULT_MAX_SECTORS 1024 398 392 ··· 419 413 unsigned supported_mode:2; 420 414 421 415 /* 422 - * true if this host adapter uses unchecked DMA onto an ISA bus. 416 + * True if this host adapter uses unchecked DMA onto an ISA bus. 423 417 */ 424 418 unsigned unchecked_isa_dma:1; 425 419 426 420 /* 427 - * true if this host adapter can make good use of clustering. 421 + * True if this host adapter can make good use of clustering. 428 422 * I originally thought that if the tablesize was large that it 429 423 * was a waste of CPU cycles to prepare a cluster list, but 430 424 * it works out that the Buslogic is faster if you use a smaller ··· 434 428 unsigned use_clustering:1; 435 429 436 430 /* 437 - * True for emulated SCSI host adapters (e.g. ATAPI) 431 + * True for emulated SCSI host adapters (e.g. ATAPI). 438 432 */ 439 433 unsigned emulated:1; 440 434 ··· 444 438 unsigned skip_settle_delay:1; 445 439 446 440 /* 447 - * ordered write support 441 + * True if we are using ordered write support. 448 442 */ 449 443 unsigned ordered_tag:1; 450 444 451 445 /* 452 - * Countdown for host blocking with no commands outstanding 446 + * Countdown for host blocking with no commands outstanding. 453 447 */ 454 448 unsigned int max_host_blocked; 455 449 ··· 528 522 struct scsi_transport_template *transportt; 529 523 530 524 /* 531 - * area to keep a shared tag map (if needed, will be 532 - * NULL if not) 525 + * Area to keep a shared tag map (if needed, will be 526 + * NULL if not). 533 527 */ 534 528 struct blk_queue_tag *bqt; 535 529 ··· 602 596 /* 603 597 * Host uses correct SCSI ordering not PC ordering. The bit is 604 598 * set for the minority of drivers whose authors actually read 605 - * the spec ;) 599 + * the spec ;). 606 600 */ 607 601 unsigned reverse_ordering:1; 608 602 609 603 /* 610 - * ordered write support 604 + * Ordered write support 611 605 */ 612 606 unsigned ordered_tag:1; 613 607 614 - /* task mgmt function in progress */ 608 + /* Task mgmt function in progress */ 615 609 unsigned tmf_in_progress:1; 616 610 617 611 /* Asynchronous scan in progress */
+24 -19
include/scsi/scsi_transport_iscsi.h
··· 149 149 extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 150 150 char *data, uint32_t data_size); 151 151 152 - 153 - /* Connection's states */ 154 - #define ISCSI_CONN_INITIAL_STAGE 0 155 - #define ISCSI_CONN_STARTED 1 156 - #define ISCSI_CONN_STOPPED 2 157 - #define ISCSI_CONN_CLEANUP_WAIT 3 158 - 159 152 struct iscsi_cls_conn { 160 153 struct list_head conn_list; /* item in connlist */ 161 154 void *dd_data; /* LLD private data */ ··· 162 169 #define iscsi_dev_to_conn(_dev) \ 163 170 container_of(_dev, struct iscsi_cls_conn, dev) 164 171 165 - /* Session's states */ 166 - #define ISCSI_STATE_FREE 1 167 - #define ISCSI_STATE_LOGGED_IN 2 168 - #define ISCSI_STATE_FAILED 3 169 - #define ISCSI_STATE_TERMINATE 4 170 - #define ISCSI_STATE_IN_RECOVERY 5 171 - #define ISCSI_STATE_RECOVERY_FAILED 6 172 - #define ISCSI_STATE_LOGGING_OUT 7 172 + #define iscsi_conn_to_session(_conn) \ 173 + iscsi_dev_to_session(_conn->dev.parent) 174 + 175 + /* iscsi class session state */ 176 + enum { 177 + ISCSI_SESSION_LOGGED_IN, 178 + ISCSI_SESSION_FAILED, 179 + ISCSI_SESSION_FREE, 180 + }; 173 181 174 182 struct iscsi_cls_session { 175 183 struct list_head sess_list; /* item in session_list */ 176 184 struct list_head host_list; 177 185 struct iscsi_transport *transport; 186 + spinlock_t lock; 187 + struct work_struct scan_work; 188 + struct work_struct unbind_work; 178 189 179 190 /* recovery fields */ 180 191 int recovery_tmo; 181 192 struct delayed_work recovery_work; 182 - struct work_struct unbind_work; 183 193 184 194 int target_id; 185 195 196 + int state; 186 197 int sid; /* session id */ 187 198 void *dd_data; /* LLD private data */ 188 199 struct device dev; /* sysfs transport/container device */ ··· 203 206 204 207 struct iscsi_host { 205 208 struct list_head sessions; 209 + atomic_t nr_scans; 206 210 struct mutex mutex; 207 - struct workqueue_struct *unbind_workq; 208 - char unbind_workq_name[KOBJ_NAME_LEN]; 211 + struct workqueue_struct *scan_workq; 212 + char scan_workq_name[KOBJ_NAME_LEN]; 209 213 }; 210 214 211 215 /* 212 216 * session and connection functions that can be used by HW iSCSI LLDs 213 217 */ 218 + #define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \ 219 + dev_printk(prefix, &(_cls_session)->dev, fmt, ##a) 220 + 221 + #define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \ 222 + dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a) 223 + 224 + extern int iscsi_session_chkready(struct iscsi_cls_session *session); 214 225 extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost, 215 226 struct iscsi_transport *transport); 216 227 extern int iscsi_add_session(struct iscsi_cls_session *session, ··· 236 231 extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); 237 232 extern void iscsi_unblock_session(struct iscsi_cls_session *session); 238 233 extern void iscsi_block_session(struct iscsi_cls_session *session); 239 - 234 + extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); 240 235 241 236 #endif