Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull second round of SCSI updates from James Bottomley:
"There's one late arriving patch here (added today), fixing a build
issue which the scsi_dh patch set in here uncovered. Other than that,
everything has been incubated in -next and the checkers for a week.

The major pieces of this patch are a set patches facilitating better
integration between scsi and scsi_dh (the device handling layer used
by multi-path; all the dm parts are acked by Mike Snitzer).

This also includes driver updates for mp3sas, scsi_debug and an
assortment of bug fixes"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (50 commits)
scsi_dh: fix randconfig build error
scsi: fix scsi_error_handler vs. scsi_host_dev_release race
fcoe: Convert use of __constant_htons to htons
mpt2sas: setpci reset kernel oops fix
pm80xx: Don't override ts->stat on IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY
lpfc: Fix possible use-after-free and double free in lpfc_mbx_cmpl_rdp_page_a2()
bfa: Fix incorrect de-reference of pointer
bfa: Fix indentation
scsi_transport_sas: Remove check for SAS expander when querying bay/enclosure IDs.
scsi_debug: resp_request: remove unused variable
scsi_debug: fix REPORT LUNS Well Known LU
scsi_debug: schedule_resp fix input variable check
scsi_debug: make dump_sector static
scsi_debug: vfree is null safe so drop the check
scsi_debug: use SCSI_W_LUN_REPORT_LUNS instead of SAM2_WLUN_REPORT_LUNS;
scsi_debug: define pr_fmt() for consistent logging
mpt2sas: Refcount fw_events and fix unsafe list usage
mpt2sas: Refcount sas_device objects and fix unsafe list usage
scsi_dh: return SCSI_DH_NOTCONN in scsi_dh_activate()
scsi_dh: don't allow to detach device handlers at runtime
...

+1860 -1271
+1 -1
drivers/md/Kconfig
··· 393 393 # of SCSI_DH if the latter isn't defined but if 394 394 # it is, DM_MULTIPATH must depend on it. We get a build 395 395 # error if SCSI_DH=m and DM_MULTIPATH=y 396 - depends on SCSI_DH || !SCSI_DH 396 + depends on !SCSI_DH || SCSI 397 397 ---help--- 398 398 Allow volume managers to support multipath hardware. 399 399
+6 -21
drivers/md/dm-mpath.c
··· 159 159 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 160 160 { 161 161 struct pgpath *pgpath, *tmp; 162 - struct multipath *m = ti->private; 163 162 164 163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 165 164 list_del(&pgpath->list); 166 - if (m->hw_handler_name) 167 - scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); 168 165 dm_put_device(ti, pgpath->path.dev); 169 166 free_pgpath(pgpath); 170 167 } ··· 577 580 q = bdev_get_queue(p->path.dev->bdev); 578 581 579 582 if (m->retain_attached_hw_handler) { 583 + retain: 580 584 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 581 585 if (attached_handler_name) { 582 586 /* ··· 597 599 } 598 600 599 601 if (m->hw_handler_name) { 600 - /* 601 - * Increments scsi_dh reference, even when using an 602 - * already-attached handler. 603 - */ 604 602 r = scsi_dh_attach(q, m->hw_handler_name); 605 603 if (r == -EBUSY) { 606 - /* 607 - * Already attached to different hw_handler: 608 - * try to reattach with correct one. 609 - */ 610 - scsi_dh_detach(q); 611 - r = scsi_dh_attach(q, m->hw_handler_name); 612 - } 604 + char b[BDEVNAME_SIZE]; 613 605 606 + printk(KERN_INFO "dm-mpath: retaining handler on device %s\n", 607 + bdevname(p->path.dev->bdev, b)); 608 + goto retain; 609 + } 614 610 if (r < 0) { 615 611 ti->error = "error attaching hardware handler"; 616 612 dm_put_device(ti, p->path.dev); ··· 616 624 if (r < 0) { 617 625 ti->error = "unable to set hardware " 618 626 "handler parameters"; 619 - scsi_dh_detach(q); 620 627 dm_put_device(ti, p->path.dev); 621 628 goto bad; 622 629 } ··· 725 734 return 0; 726 735 727 736 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 728 - if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name), 729 - "scsi_dh_%s", m->hw_handler_name)) { 730 - ti->error = "unknown hardware handler type"; 731 - ret = -EINVAL; 732 - goto fail; 733 - } 734 737 735 738 if (hw_argc > 1) { 736 739 char *p;
+1
drivers/scsi/Makefile
··· 172 172 scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 173 173 scsi_mod-y += scsi_trace.o scsi_logging.o 174 174 scsi_mod-$(CONFIG_PM) += scsi_pm.o 175 + scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o 175 176 176 177 hv_storvsc-y := storvsc_drv.o 177 178
+3 -2
drivers/scsi/aic94xx/aic94xx_sds.c
··· 983 983 { 984 984 int err, i; 985 985 u32 offs, size; 986 - struct asd_ll_el *el; 986 + struct asd_ll_el *el = NULL; 987 987 struct asd_ctrla_phy_settings *ps; 988 988 struct asd_ctrla_phy_settings dflt_ps; 989 989 ··· 1004 1004 1005 1005 size = sizeof(struct asd_ctrla_phy_settings); 1006 1006 ps = &dflt_ps; 1007 + goto out_process; 1007 1008 } 1008 1009 1009 1010 if (size == 0) ··· 1029 1028 ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); 1030 1029 goto out2; 1031 1030 } 1032 - 1031 + out_process: 1033 1032 err = asd_process_ctrla_phy_settings(asd_ha, ps); 1034 1033 if (err) { 1035 1034 ASD_DPRINTK("couldn't process ctrla phy settings\n");
+12 -12
drivers/scsi/bfa/bfa_ioc.c
··· 3665 3665 if (sfp->state_query_cbfn) 3666 3666 sfp->state_query_cbfn(sfp->state_query_cbarg, 3667 3667 sfp->status); 3668 - sfp->media = NULL; 3669 - } 3668 + sfp->media = NULL; 3669 + } 3670 3670 3671 - if (sfp->portspeed) { 3672 - sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); 3673 - if (sfp->state_query_cbfn) 3674 - sfp->state_query_cbfn(sfp->state_query_cbarg, 3675 - sfp->status); 3676 - sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; 3677 - } 3671 + if (sfp->portspeed) { 3672 + sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed); 3673 + if (sfp->state_query_cbfn) 3674 + sfp->state_query_cbfn(sfp->state_query_cbarg, 3675 + sfp->status); 3676 + sfp->portspeed = BFA_PORT_SPEED_UNKNOWN; 3677 + } 3678 3678 3679 - sfp->state_query_lock = 0; 3680 - sfp->state_query_cbfn = NULL; 3679 + sfp->state_query_lock = 0; 3680 + sfp->state_query_cbfn = NULL; 3681 3681 } 3682 3682 3683 3683 /* ··· 3878 3878 bfa_trc(sfp, sfp->data_valid); 3879 3879 if (sfp->data_valid) { 3880 3880 u32 size = sizeof(struct sfp_mem_s); 3881 - u8 *des = (u8 *) &(sfp->sfpmem); 3881 + u8 *des = (u8 *)(sfp->sfpmem); 3882 3882 memcpy(des, sfp->dbuf_kva, size); 3883 3883 } 3884 3884 /*
+1 -1
drivers/scsi/device_handler/Kconfig
··· 3 3 # 4 4 5 5 menuconfig SCSI_DH 6 - tristate "SCSI Device Handlers" 6 + bool "SCSI Device Handlers" 7 7 depends on SCSI 8 8 default n 9 9 help
-1
drivers/scsi/device_handler/Makefile
··· 1 1 # 2 2 # SCSI Device Handler 3 3 # 4 - obj-$(CONFIG_SCSI_DH) += scsi_dh.o 5 4 obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o 6 5 obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o 7 6 obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
-621
drivers/scsi/device_handler/scsi_dh.c
··· 1 - /* 2 - * SCSI device handler infrastruture. 3 - * 4 - * This program is free software; you can redistribute it and/or modify it 5 - * under the terms of the GNU General Public License as published by the 6 - * Free Software Foundation; either version 2 of the License, or (at your 7 - * option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, but 10 - * WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 - * General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License along 15 - * with this program; if not, write to the Free Software Foundation, Inc., 16 - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 - * 18 - * Copyright IBM Corporation, 2007 19 - * Authors: 20 - * Chandra Seetharaman <sekharan@us.ibm.com> 21 - * Mike Anderson <andmike@linux.vnet.ibm.com> 22 - */ 23 - 24 - #include <linux/slab.h> 25 - #include <linux/module.h> 26 - #include <scsi/scsi_dh.h> 27 - #include "../scsi_priv.h" 28 - 29 - static DEFINE_SPINLOCK(list_lock); 30 - static LIST_HEAD(scsi_dh_list); 31 - 32 - static struct scsi_device_handler *get_device_handler(const char *name) 33 - { 34 - struct scsi_device_handler *tmp, *found = NULL; 35 - 36 - spin_lock(&list_lock); 37 - list_for_each_entry(tmp, &scsi_dh_list, list) { 38 - if (!strncmp(tmp->name, name, strlen(tmp->name))) { 39 - found = tmp; 40 - break; 41 - } 42 - } 43 - spin_unlock(&list_lock); 44 - return found; 45 - } 46 - 47 - /* 48 - * device_handler_match_function - Match a device handler to a device 49 - * @sdev - SCSI device to be tested 50 - * 51 - * Tests @sdev against the match function of all registered device_handler. 52 - * Returns the found device handler or NULL if not found. 53 - */ 54 - static struct scsi_device_handler * 55 - device_handler_match_function(struct scsi_device *sdev) 56 - { 57 - struct scsi_device_handler *tmp_dh, *found_dh = NULL; 58 - 59 - spin_lock(&list_lock); 60 - list_for_each_entry(tmp_dh, &scsi_dh_list, list) { 61 - if (tmp_dh->match && tmp_dh->match(sdev)) { 62 - found_dh = tmp_dh; 63 - break; 64 - } 65 - } 66 - spin_unlock(&list_lock); 67 - return found_dh; 68 - } 69 - 70 - /* 71 - * device_handler_match - Attach a device handler to a device 72 - * @scsi_dh - The device handler to match against or NULL 73 - * @sdev - SCSI device to be tested against @scsi_dh 74 - * 75 - * Tests @sdev against the device handler @scsi_dh or against 76 - * all registered device_handler if @scsi_dh == NULL. 77 - * Returns the found device handler or NULL if not found. 78 - */ 79 - static struct scsi_device_handler * 80 - device_handler_match(struct scsi_device_handler *scsi_dh, 81 - struct scsi_device *sdev) 82 - { 83 - struct scsi_device_handler *found_dh; 84 - 85 - found_dh = device_handler_match_function(sdev); 86 - 87 - if (scsi_dh && found_dh != scsi_dh) 88 - found_dh = NULL; 89 - 90 - return found_dh; 91 - } 92 - 93 - /* 94 - * scsi_dh_handler_attach - Attach a device handler to a device 95 - * @sdev - SCSI device the device handler should attach to 96 - * @scsi_dh - The device handler to attach 97 - */ 98 - static int scsi_dh_handler_attach(struct scsi_device *sdev, 99 - struct scsi_device_handler *scsi_dh) 100 - { 101 - struct scsi_dh_data *d; 102 - 103 - if (sdev->scsi_dh_data) { 104 - if (sdev->scsi_dh_data->scsi_dh != scsi_dh) 105 - return -EBUSY; 106 - 107 - kref_get(&sdev->scsi_dh_data->kref); 108 - return 0; 109 - } 110 - 111 - if (!try_module_get(scsi_dh->module)) 112 - return -EINVAL; 113 - 114 - d = scsi_dh->attach(sdev); 115 - if (IS_ERR(d)) { 116 - sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n", 117 - scsi_dh->name, PTR_ERR(d)); 118 - module_put(scsi_dh->module); 119 - return PTR_ERR(d); 120 - } 121 - 122 - d->scsi_dh = scsi_dh; 123 - kref_init(&d->kref); 124 - d->sdev = sdev; 125 - 126 - spin_lock_irq(sdev->request_queue->queue_lock); 127 - sdev->scsi_dh_data = d; 128 - spin_unlock_irq(sdev->request_queue->queue_lock); 129 - return 0; 130 - } 131 - 132 - static void __detach_handler (struct kref *kref) 133 - { 134 - struct scsi_dh_data *scsi_dh_data = 135 - container_of(kref, struct scsi_dh_data, kref); 136 - struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; 137 - struct scsi_device *sdev = scsi_dh_data->sdev; 138 - 139 - scsi_dh->detach(sdev); 140 - 141 - spin_lock_irq(sdev->request_queue->queue_lock); 142 - sdev->scsi_dh_data = NULL; 143 - spin_unlock_irq(sdev->request_queue->queue_lock); 144 - 145 - sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); 146 - module_put(scsi_dh->module); 147 - } 148 - 149 - /* 150 - * scsi_dh_handler_detach - Detach a device handler from a device 151 - * @sdev - SCSI device the device handler should be detached from 152 - * @scsi_dh - Device handler to be detached 153 - * 154 - * Detach from a device handler. If a device handler is specified, 155 - * only detach if the currently attached handler matches @scsi_dh. 156 - */ 157 - static void scsi_dh_handler_detach(struct scsi_device *sdev, 158 - struct scsi_device_handler *scsi_dh) 159 - { 160 - if (!sdev->scsi_dh_data) 161 - return; 162 - 163 - if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh) 164 - return; 165 - 166 - if (!scsi_dh) 167 - scsi_dh = sdev->scsi_dh_data->scsi_dh; 168 - 169 - if (scsi_dh) 170 - kref_put(&sdev->scsi_dh_data->kref, __detach_handler); 171 - } 172 - 173 - /* 174 - * Functions for sysfs attribute 'dh_state' 175 - */ 176 - static ssize_t 177 - store_dh_state(struct device *dev, struct device_attribute *attr, 178 - const char *buf, size_t count) 179 - { 180 - struct scsi_device *sdev = to_scsi_device(dev); 181 - struct scsi_device_handler *scsi_dh; 182 - int err = -EINVAL; 183 - 184 - if (sdev->sdev_state == SDEV_CANCEL || 185 - sdev->sdev_state == SDEV_DEL) 186 - return -ENODEV; 187 - 188 - if (!sdev->scsi_dh_data) { 189 - /* 190 - * Attach to a device handler 191 - */ 192 - if (!(scsi_dh = get_device_handler(buf))) 193 - return err; 194 - err = scsi_dh_handler_attach(sdev, scsi_dh); 195 - } else { 196 - scsi_dh = sdev->scsi_dh_data->scsi_dh; 197 - if (!strncmp(buf, "detach", 6)) { 198 - /* 199 - * Detach from a device handler 200 - */ 201 - scsi_dh_handler_detach(sdev, scsi_dh); 202 - err = 0; 203 - } else if (!strncmp(buf, "activate", 8)) { 204 - /* 205 - * Activate a device handler 206 - */ 207 - if (scsi_dh->activate) 208 - err = scsi_dh->activate(sdev, NULL, NULL); 209 - else 210 - err = 0; 211 - } 212 - } 213 - 214 - return err<0?err:count; 215 - } 216 - 217 - static ssize_t 218 - show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) 219 - { 220 - struct scsi_device *sdev = to_scsi_device(dev); 221 - 222 - if (!sdev->scsi_dh_data) 223 - return snprintf(buf, 20, "detached\n"); 224 - 225 - return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name); 226 - } 227 - 228 - static struct device_attribute scsi_dh_state_attr = 229 - __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state, 230 - store_dh_state); 231 - 232 - /* 233 - * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh 234 - */ 235 - static int scsi_dh_sysfs_attr_add(struct device *dev, void *data) 236 - { 237 - struct scsi_device *sdev; 238 - int err; 239 - 240 - if (!scsi_is_sdev_device(dev)) 241 - return 0; 242 - 243 - sdev = to_scsi_device(dev); 244 - 245 - err = device_create_file(&sdev->sdev_gendev, 246 - &scsi_dh_state_attr); 247 - 248 - return 0; 249 - } 250 - 251 - /* 252 - * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh 253 - */ 254 - static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data) 255 - { 256 - struct scsi_device *sdev; 257 - 258 - if (!scsi_is_sdev_device(dev)) 259 - return 0; 260 - 261 - sdev = to_scsi_device(dev); 262 - 263 - device_remove_file(&sdev->sdev_gendev, 264 - &scsi_dh_state_attr); 265 - 266 - return 0; 267 - } 268 - 269 - /* 270 - * scsi_dh_notifier - notifier chain callback 271 - */ 272 - static int scsi_dh_notifier(struct notifier_block *nb, 273 - unsigned long action, void *data) 274 - { 275 - struct device *dev = data; 276 - struct scsi_device *sdev; 277 - int err = 0; 278 - struct scsi_device_handler *devinfo = NULL; 279 - 280 - if (!scsi_is_sdev_device(dev)) 281 - return 0; 282 - 283 - sdev = to_scsi_device(dev); 284 - 285 - if (action == BUS_NOTIFY_ADD_DEVICE) { 286 - err = device_create_file(dev, &scsi_dh_state_attr); 287 - /* don't care about err */ 288 - devinfo = device_handler_match(NULL, sdev); 289 - if (devinfo) 290 - err = scsi_dh_handler_attach(sdev, devinfo); 291 - } else if (action == BUS_NOTIFY_DEL_DEVICE) { 292 - device_remove_file(dev, &scsi_dh_state_attr); 293 - scsi_dh_handler_detach(sdev, NULL); 294 - } 295 - return err; 296 - } 297 - 298 - /* 299 - * scsi_dh_notifier_add - Callback for scsi_register_device_handler 300 - */ 301 - static int scsi_dh_notifier_add(struct device *dev, void *data) 302 - { 303 - struct scsi_device_handler *scsi_dh = data; 304 - struct scsi_device *sdev; 305 - 306 - if (!scsi_is_sdev_device(dev)) 307 - return 0; 308 - 309 - if (!get_device(dev)) 310 - return 0; 311 - 312 - sdev = to_scsi_device(dev); 313 - 314 - if (device_handler_match(scsi_dh, sdev)) 315 - scsi_dh_handler_attach(sdev, scsi_dh); 316 - 317 - put_device(dev); 318 - 319 - return 0; 320 - } 321 - 322 - /* 323 - * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler 324 - */ 325 - static int scsi_dh_notifier_remove(struct device *dev, void *data) 326 - { 327 - struct scsi_device_handler *scsi_dh = data; 328 - struct scsi_device *sdev; 329 - 330 - if (!scsi_is_sdev_device(dev)) 331 - return 0; 332 - 333 - if (!get_device(dev)) 334 - return 0; 335 - 336 - sdev = to_scsi_device(dev); 337 - 338 - scsi_dh_handler_detach(sdev, scsi_dh); 339 - 340 - put_device(dev); 341 - 342 - return 0; 343 - } 344 - 345 - /* 346 - * scsi_register_device_handler - register a device handler personality 347 - * module. 348 - * @scsi_dh - device handler to be registered. 349 - * 350 - * Returns 0 on success, -EBUSY if handler already registered. 351 - */ 352 - int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 353 - { 354 - 355 - if (get_device_handler(scsi_dh->name)) 356 - return -EBUSY; 357 - 358 - if (!scsi_dh->attach || !scsi_dh->detach) 359 - return -EINVAL; 360 - 361 - spin_lock(&list_lock); 362 - list_add(&scsi_dh->list, &scsi_dh_list); 363 - spin_unlock(&list_lock); 364 - 365 - bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); 366 - printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); 367 - 368 - return SCSI_DH_OK; 369 - } 370 - EXPORT_SYMBOL_GPL(scsi_register_device_handler); 371 - 372 - /* 373 - * scsi_unregister_device_handler - register a device handler personality 374 - * module. 375 - * @scsi_dh - device handler to be unregistered. 376 - * 377 - * Returns 0 on success, -ENODEV if handler not registered. 378 - */ 379 - int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 380 - { 381 - 382 - if (!get_device_handler(scsi_dh->name)) 383 - return -ENODEV; 384 - 385 - bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 386 - scsi_dh_notifier_remove); 387 - 388 - spin_lock(&list_lock); 389 - list_del(&scsi_dh->list); 390 - spin_unlock(&list_lock); 391 - printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); 392 - 393 - return SCSI_DH_OK; 394 - } 395 - EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); 396 - 397 - /* 398 - * scsi_dh_activate - activate the path associated with the scsi_device 399 - * corresponding to the given request queue. 400 - * Returns immediately without waiting for activation to be completed. 401 - * @q - Request queue that is associated with the scsi_device to be 402 - * activated. 403 - * @fn - Function to be called upon completion of the activation. 404 - * Function fn is called with data (below) and the error code. 405 - * Function fn may be called from the same calling context. So, 406 - * do not hold the lock in the caller which may be needed in fn. 407 - * @data - data passed to the function fn upon completion. 408 - * 409 - */ 410 - int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) 411 - { 412 - int err = 0; 413 - unsigned long flags; 414 - struct scsi_device *sdev; 415 - struct scsi_device_handler *scsi_dh = NULL; 416 - struct device *dev = NULL; 417 - 418 - spin_lock_irqsave(q->queue_lock, flags); 419 - sdev = q->queuedata; 420 - if (!sdev) { 421 - spin_unlock_irqrestore(q->queue_lock, flags); 422 - err = SCSI_DH_NOSYS; 423 - if (fn) 424 - fn(data, err); 425 - return err; 426 - } 427 - 428 - if (sdev->scsi_dh_data) 429 - scsi_dh = sdev->scsi_dh_data->scsi_dh; 430 - dev = get_device(&sdev->sdev_gendev); 431 - if (!scsi_dh || !dev || 432 - sdev->sdev_state == SDEV_CANCEL || 433 - sdev->sdev_state == SDEV_DEL) 434 - err = SCSI_DH_NOSYS; 435 - if (sdev->sdev_state == SDEV_OFFLINE) 436 - err = SCSI_DH_DEV_OFFLINED; 437 - spin_unlock_irqrestore(q->queue_lock, flags); 438 - 439 - if (err) { 440 - if (fn) 441 - fn(data, err); 442 - goto out; 443 - } 444 - 445 - if (scsi_dh->activate) 446 - err = scsi_dh->activate(sdev, fn, data); 447 - out: 448 - put_device(dev); 449 - return err; 450 - } 451 - EXPORT_SYMBOL_GPL(scsi_dh_activate); 452 - 453 - /* 454 - * scsi_dh_set_params - set the parameters for the device as per the 455 - * string specified in params. 456 - * @q - Request queue that is associated with the scsi_device for 457 - * which the parameters to be set. 458 - * @params - parameters in the following format 459 - * "no_of_params\0param1\0param2\0param3\0...\0" 460 - * for example, string for 2 parameters with value 10 and 21 461 - * is specified as "2\010\021\0". 462 - */ 463 - int scsi_dh_set_params(struct request_queue *q, const char *params) 464 - { 465 - int err = -SCSI_DH_NOSYS; 466 - unsigned long flags; 467 - struct scsi_device *sdev; 468 - struct scsi_device_handler *scsi_dh = NULL; 469 - 470 - spin_lock_irqsave(q->queue_lock, flags); 471 - sdev = q->queuedata; 472 - if (sdev && sdev->scsi_dh_data) 473 - scsi_dh = sdev->scsi_dh_data->scsi_dh; 474 - if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev)) 475 - err = 0; 476 - spin_unlock_irqrestore(q->queue_lock, flags); 477 - 478 - if (err) 479 - return err; 480 - err = scsi_dh->set_params(sdev, params); 481 - put_device(&sdev->sdev_gendev); 482 - return err; 483 - } 484 - EXPORT_SYMBOL_GPL(scsi_dh_set_params); 485 - 486 - /* 487 - * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for 488 - * the given name. FALSE(0) otherwise. 489 - * @name - name of the device handler. 490 - */ 491 - int scsi_dh_handler_exist(const char *name) 492 - { 493 - return (get_device_handler(name) != NULL); 494 - } 495 - EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); 496 - 497 - /* 498 - * scsi_dh_attach - Attach device handler 499 - * @q - Request queue that is associated with the scsi_device 500 - * the handler should be attached to 501 - * @name - name of the handler to attach 502 - */ 503 - int scsi_dh_attach(struct request_queue *q, const char *name) 504 - { 505 - unsigned long flags; 506 - struct scsi_device *sdev; 507 - struct scsi_device_handler *scsi_dh; 508 - int err = 0; 509 - 510 - scsi_dh = get_device_handler(name); 511 - if (!scsi_dh) 512 - return -EINVAL; 513 - 514 - spin_lock_irqsave(q->queue_lock, flags); 515 - sdev = q->queuedata; 516 - if (!sdev || !get_device(&sdev->sdev_gendev)) 517 - err = -ENODEV; 518 - spin_unlock_irqrestore(q->queue_lock, flags); 519 - 520 - if (!err) { 521 - err = scsi_dh_handler_attach(sdev, scsi_dh); 522 - put_device(&sdev->sdev_gendev); 523 - } 524 - return err; 525 - } 526 - EXPORT_SYMBOL_GPL(scsi_dh_attach); 527 - 528 - /* 529 - * scsi_dh_detach - Detach device handler 530 - * @q - Request queue that is associated with the scsi_device 531 - * the handler should be detached from 532 - * 533 - * This function will detach the device handler only 534 - * if the sdev is not part of the internal list, ie 535 - * if it has been attached manually. 536 - */ 537 - void scsi_dh_detach(struct request_queue *q) 538 - { 539 - unsigned long flags; 540 - struct scsi_device *sdev; 541 - struct scsi_device_handler *scsi_dh = NULL; 542 - 543 - spin_lock_irqsave(q->queue_lock, flags); 544 - sdev = q->queuedata; 545 - if (!sdev || !get_device(&sdev->sdev_gendev)) 546 - sdev = NULL; 547 - spin_unlock_irqrestore(q->queue_lock, flags); 548 - 549 - if (!sdev) 550 - return; 551 - 552 - if (sdev->scsi_dh_data) { 553 - scsi_dh = sdev->scsi_dh_data->scsi_dh; 554 - scsi_dh_handler_detach(sdev, scsi_dh); 555 - } 556 - put_device(&sdev->sdev_gendev); 557 - } 558 - EXPORT_SYMBOL_GPL(scsi_dh_detach); 559 - 560 - /* 561 - * scsi_dh_attached_handler_name - Get attached device handler's name 562 - * @q - Request queue that is associated with the scsi_device 563 - * that may have a device handler attached 564 - * @gfp - the GFP mask used in the kmalloc() call when allocating memory 565 - * 566 - * Returns name of attached handler, NULL if no handler is attached. 567 - * Caller must take care to free the returned string. 568 - */ 569 - const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) 570 - { 571 - unsigned long flags; 572 - struct scsi_device *sdev; 573 - const char *handler_name = NULL; 574 - 575 - spin_lock_irqsave(q->queue_lock, flags); 576 - sdev = q->queuedata; 577 - if (!sdev || !get_device(&sdev->sdev_gendev)) 578 - sdev = NULL; 579 - spin_unlock_irqrestore(q->queue_lock, flags); 580 - 581 - if (!sdev) 582 - return NULL; 583 - 584 - if (sdev->scsi_dh_data) 585 - handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp); 586 - 587 - put_device(&sdev->sdev_gendev); 588 - return handler_name; 589 - } 590 - EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name); 591 - 592 - static struct notifier_block scsi_dh_nb = { 593 - .notifier_call = scsi_dh_notifier 594 - }; 595 - 596 - static int __init scsi_dh_init(void) 597 - { 598 - int r; 599 - 600 - r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb); 601 - 602 - if (!r) 603 - bus_for_each_dev(&scsi_bus_type, NULL, NULL, 604 - scsi_dh_sysfs_attr_add); 605 - 606 - return r; 607 - } 608 - 609 - static void __exit scsi_dh_exit(void) 610 - { 611 - bus_for_each_dev(&scsi_bus_type, NULL, NULL, 612 - scsi_dh_sysfs_attr_remove); 613 - bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb); 614 - } 615 - 616 - module_init(scsi_dh_init); 617 - module_exit(scsi_dh_exit); 618 - 619 - MODULE_DESCRIPTION("SCSI device handler"); 620 - MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>"); 621 - MODULE_LICENSE("GPL");
+10 -21
drivers/scsi/device_handler/scsi_dh_alua.c
··· 62 62 #define ALUA_OPTIMIZE_STPG 1 63 63 64 64 struct alua_dh_data { 65 - struct scsi_dh_data dh_data; 66 65 int group_id; 67 66 int rel_port; 68 67 int tpgs; ··· 84 85 85 86 static char print_alua_state(int); 86 87 static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); 87 - 88 - static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) 89 - { 90 - return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data); 91 - } 92 88 93 89 static int realloc_buffer(struct alua_dh_data *h, unsigned len) 94 90 { ··· 702 708 */ 703 709 static int alua_set_params(struct scsi_device *sdev, const char *params) 704 710 { 705 - struct alua_dh_data *h = get_alua_data(sdev); 711 + struct alua_dh_data *h = sdev->handler_data; 706 712 unsigned int optimize = 0, argc; 707 713 const char *p = params; 708 714 int result = SCSI_DH_OK; ··· 740 746 static int alua_activate(struct scsi_device *sdev, 741 747 activate_complete fn, void *data) 742 748 { 743 - struct alua_dh_data *h = get_alua_data(sdev); 749 + struct alua_dh_data *h = sdev->handler_data; 744 750 int err = SCSI_DH_OK; 745 751 int stpg = 0; 746 752 ··· 798 804 */ 799 805 static int alua_prep_fn(struct scsi_device *sdev, struct request *req) 800 806 { 801 - struct alua_dh_data *h = get_alua_data(sdev); 807 + struct alua_dh_data *h = sdev->handler_data; 802 808 int ret = BLKPREP_OK; 803 809 804 810 if (h->state == TPGS_STATE_TRANSITIONING) ··· 813 819 814 820 } 815 821 816 - static bool alua_match(struct scsi_device *sdev) 817 - { 818 - return (scsi_device_tpgs(sdev) != 0); 819 - } 820 - 821 822 /* 822 823 * alua_bus_attach - Attach device handler 823 824 * @sdev: device to be attached to 824 825 */ 825 - static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev) 826 + static int alua_bus_attach(struct scsi_device *sdev) 826 827 { 827 828 struct alua_dh_data *h; 828 829 int err; 829 830 830 831 h = kzalloc(sizeof(*h) , GFP_KERNEL); 831 832 if (!h) 832 - return ERR_PTR(-ENOMEM); 833 + return -ENOMEM; 833 834 h->tpgs = TPGS_MODE_UNINITIALIZED; 834 835 h->state = TPGS_STATE_OPTIMIZED; 835 836 h->group_id = -1; ··· 837 848 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) 838 849 goto failed; 839 850 840 - sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); 841 - return &h->dh_data; 851 + sdev->handler_data = h; 852 + return 0; 842 853 failed: 843 854 kfree(h); 844 - return ERR_PTR(-EINVAL); 855 + return -EINVAL; 845 856 } 846 857 847 858 /* ··· 850 861 */ 851 862 static void alua_bus_detach(struct scsi_device *sdev) 852 863 { 853 - struct alua_dh_data *h = get_alua_data(sdev); 864 + struct alua_dh_data *h = sdev->handler_data; 854 865 855 866 if (h->buff && h->inq != h->buff) 856 867 kfree(h->buff); 868 + sdev->handler_data = NULL; 857 869 kfree(h); 858 870 } 859 871 ··· 867 877 .check_sense = alua_check_sense, 868 878 .activate = alua_activate, 869 879 .set_params = alua_set_params, 870 - .match = alua_match, 871 880 }; 872 881 873 882 static int __init alua_init(void)
+11 -47
drivers/scsi/device_handler/scsi_dh_emc.c
··· 72 72 }; 73 73 74 74 struct clariion_dh_data { 75 - struct scsi_dh_data dh_data; 76 75 /* 77 76 * Flags: 78 77 * CLARIION_SHORT_TRESPASS ··· 112 113 */ 113 114 int current_sp; 114 115 }; 115 - 116 - static inline struct clariion_dh_data 117 - *get_clariion_data(struct scsi_device *sdev) 118 - { 119 - return container_of(sdev->scsi_dh_data, struct clariion_dh_data, 120 - dh_data); 121 - } 122 116 123 117 /* 124 118 * Parse MODE_SELECT cmd reply. ··· 442 450 443 451 static int clariion_prep_fn(struct scsi_device *sdev, struct request *req) 444 452 { 445 - struct clariion_dh_data *h = get_clariion_data(sdev); 453 + struct clariion_dh_data *h = sdev->handler_data; 446 454 int ret = BLKPREP_OK; 447 455 448 456 if (h->lun_state != CLARIION_LUN_OWNED) { ··· 525 533 static int clariion_activate(struct scsi_device *sdev, 526 534 activate_complete fn, void *data) 527 535 { 528 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 536 + struct clariion_dh_data *csdev = sdev->handler_data; 529 537 int result; 530 538 531 539 result = clariion_send_inquiry(sdev, csdev); ··· 566 574 */ 567 575 static int clariion_set_params(struct scsi_device *sdev, const char *params) 568 576 { 569 - struct clariion_dh_data *csdev = get_clariion_data(sdev); 577 + struct clariion_dh_data *csdev = sdev->handler_data; 570 578 unsigned int hr = 0, st = 0, argc; 571 579 const char *p = params; 572 580 int result = SCSI_DH_OK; ··· 614 622 return result; 615 623 } 616 624 617 - static const struct { 618 - char *vendor; 619 - char *model; 620 - } clariion_dev_list[] = { 621 - {"DGC", "RAID"}, 622 - {"DGC", "DISK"}, 623 - {"DGC", "VRAID"}, 624 - {NULL, NULL}, 625 - }; 626 - 627 - static bool clariion_match(struct scsi_device *sdev) 628 - { 629 - int i; 630 - 631 - if (scsi_device_tpgs(sdev)) 632 - return false; 633 - 634 - for (i = 0; clariion_dev_list[i].vendor; i++) { 635 - if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, 636 - strlen(clariion_dev_list[i].vendor)) && 637 - !strncmp(sdev->model, clariion_dev_list[i].model, 638 - strlen(clariion_dev_list[i].model))) { 639 - return true; 640 - } 641 - } 642 - return false; 643 - } 644 - 645 - static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev) 625 + static int clariion_bus_attach(struct scsi_device *sdev) 646 626 { 647 627 struct clariion_dh_data *h; 648 628 int err; 649 629 650 630 h = kzalloc(sizeof(*h) , GFP_KERNEL); 651 631 if (!h) 652 - return ERR_PTR(-ENOMEM); 632 + return -ENOMEM; 653 633 h->lun_state = CLARIION_LUN_UNINITIALIZED; 654 634 h->default_sp = CLARIION_UNBOUND_LU; 655 635 h->current_sp = CLARIION_UNBOUND_LU; ··· 639 675 CLARIION_NAME, h->current_sp + 'A', 640 676 h->port, lun_state[h->lun_state], 641 677 h->default_sp + 'A'); 642 - return &h->dh_data; 678 + 679 + sdev->handler_data = h; 680 + return 0; 643 681 644 682 failed: 645 683 kfree(h); 646 - return ERR_PTR(-EINVAL); 684 + return -EINVAL; 647 685 } 648 686 649 687 static void clariion_bus_detach(struct scsi_device *sdev) 650 688 { 651 - struct clariion_dh_data *h = get_clariion_data(sdev); 652 - 653 - kfree(h); 689 + kfree(sdev->handler_data); 690 + sdev->handler_data = NULL; 654 691 } 655 692 656 693 static struct scsi_device_handler clariion_dh = { ··· 663 698 .activate = clariion_activate, 664 699 .prep_fn = clariion_prep_fn, 665 700 .set_params = clariion_set_params, 666 - .match = clariion_match, 667 701 }; 668 702 669 703 static int __init clariion_init(void)
+10 -45
drivers/scsi/device_handler/scsi_dh_hp_sw.c
··· 38 38 #define HP_SW_PATH_PASSIVE 1 39 39 40 40 struct hp_sw_dh_data { 41 - struct scsi_dh_data dh_data; 42 41 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 43 42 int path_state; 44 43 int retries; ··· 48 49 }; 49 50 50 51 static int hp_sw_start_stop(struct hp_sw_dh_data *); 51 - 52 - static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) 53 - { 54 - return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data); 55 - } 56 52 57 53 /* 58 54 * tur_done - Handle TEST UNIT READY return status ··· 261 267 262 268 static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) 263 269 { 264 - struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 270 + struct hp_sw_dh_data *h = sdev->handler_data; 265 271 int ret = BLKPREP_OK; 266 272 267 273 if (h->path_state != HP_SW_PATH_ACTIVE) { ··· 286 292 activate_complete fn, void *data) 287 293 { 288 294 int ret = SCSI_DH_OK; 289 - struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 295 + struct hp_sw_dh_data *h = sdev->handler_data; 290 296 291 297 ret = hp_sw_tur(sdev, h); 292 298 ··· 305 311 return 0; 306 312 } 307 313 308 - static const struct { 309 - char *vendor; 310 - char *model; 311 - } hp_sw_dh_data_list[] = { 312 - {"COMPAQ", "MSA1000 VOLUME"}, 313 - {"COMPAQ", "HSV110"}, 314 - {"HP", "HSV100"}, 315 - {"DEC", "HSG80"}, 316 - {NULL, NULL}, 317 - }; 318 - 319 - static bool hp_sw_match(struct scsi_device *sdev) 320 - { 321 - int i; 322 - 323 - if (scsi_device_tpgs(sdev)) 324 - return false; 325 - 326 - for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { 327 - if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, 328 - strlen(hp_sw_dh_data_list[i].vendor)) && 329 - !strncmp(sdev->model, hp_sw_dh_data_list[i].model, 330 - strlen(hp_sw_dh_data_list[i].model))) { 331 - return true; 332 - } 333 - } 334 - return false; 335 - } 336 - 337 - static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev) 314 + static int hp_sw_bus_attach(struct scsi_device *sdev) 338 315 { 339 316 struct hp_sw_dh_data *h; 340 317 int ret; 341 318 342 319 h = kzalloc(sizeof(*h), GFP_KERNEL); 343 320 if (!h) 344 - return ERR_PTR(-ENOMEM); 321 + return -ENOMEM; 345 322 h->path_state = HP_SW_PATH_UNINITIALIZED; 346 323 h->retries = HP_SW_RETRIES; 347 324 h->sdev = sdev; ··· 324 359 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", 325 360 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? 326 361 "active":"passive"); 327 - return &h->dh_data; 362 + 363 + sdev->handler_data = h; 364 + return 0; 328 365 failed: 329 366 kfree(h); 330 - return ERR_PTR(-EINVAL); 367 + return -EINVAL; 331 368 } 332 369 333 370 static void hp_sw_bus_detach( struct scsi_device *sdev ) 334 371 { 335 - struct hp_sw_dh_data *h = get_hp_sw_data(sdev); 336 - 337 - kfree(h); 372 + kfree(sdev->handler_data); 373 + sdev->handler_data = NULL; 338 374 } 339 375 340 376 static struct scsi_device_handler hp_sw_dh = { ··· 345 379 .detach = hp_sw_bus_detach, 346 380 .activate = hp_sw_activate, 347 381 .prep_fn = hp_sw_prep_fn, 348 - .match = hp_sw_match, 349 382 }; 350 383 351 384 static int __init hp_sw_init(void)
+13 -67
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 181 181 }; 182 182 183 183 struct rdac_dh_data { 184 - struct scsi_dh_data dh_data; 185 184 struct rdac_controller *ctlr; 186 185 #define UNINITIALIZED_LUN (1 << 8) 187 186 unsigned lun; ··· 258 259 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ 259 260 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ 260 261 } while (0); 261 - 262 - static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) 263 - { 264 - return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data); 265 - } 266 262 267 263 static struct request *get_rdac_req(struct scsi_device *sdev, 268 264 void *buffer, unsigned buflen, int rw) ··· 538 544 { 539 545 struct scsi_sense_hdr sense_hdr; 540 546 int err = SCSI_DH_IO, ret; 541 - struct rdac_dh_data *h = get_rdac_data(sdev); 547 + struct rdac_dh_data *h = sdev->handler_data; 542 548 543 549 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 544 550 if (!ret) ··· 583 589 container_of(work, struct rdac_controller, ms_work); 584 590 struct request *rq; 585 591 struct scsi_device *sdev = ctlr->ms_sdev; 586 - struct rdac_dh_data *h = get_rdac_data(sdev); 592 + struct rdac_dh_data *h = sdev->handler_data; 587 593 struct request_queue *q = sdev->request_queue; 588 594 int err, retry_cnt = RDAC_RETRY_COUNT; 589 595 struct rdac_queue_data *tmp, *qdata; ··· 642 648 if (!qdata) 643 649 return SCSI_DH_RETRY; 644 650 645 - qdata->h = get_rdac_data(sdev); 651 + qdata->h = sdev->handler_data; 646 652 qdata->callback_fn = fn; 647 653 qdata->callback_data = data; 648 654 ··· 661 667 static int rdac_activate(struct scsi_device *sdev, 662 668 activate_complete fn, void *data) 663 669 { 664 - struct rdac_dh_data *h = get_rdac_data(sdev); 670 + struct rdac_dh_data *h = sdev->handler_data; 665 671 int err = SCSI_DH_OK; 666 672 int act = 0; 667 673 ··· 696 702 697 703 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) 698 704 { 699 - struct rdac_dh_data *h = get_rdac_data(sdev); 705 + struct rdac_dh_data *h = sdev->handler_data; 700 706 int ret = BLKPREP_OK; 701 707 702 708 if (h->state != RDAC_STATE_ACTIVE) { ··· 710 716 static int rdac_check_sense(struct scsi_device *sdev, 711 717 struct scsi_sense_hdr *sense_hdr) 712 718 { 713 - struct rdac_dh_data *h = get_rdac_data(sdev); 719 + struct rdac_dh_data *h = sdev->handler_data; 714 720 715 721 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " 716 722 "I/O returned with sense %02x/%02x/%02x", ··· 772 778 return SCSI_RETURN_NOT_HANDLED; 773 779 } 774 780 775 - static const struct { 776 - char *vendor; 777 - char *model; 778 - } rdac_dev_list[] = { 779 - {"IBM", "1722"}, 780 - {"IBM", "1724"}, 781 - {"IBM", "1726"}, 782 - {"IBM", "1742"}, 783 - {"IBM", "1745"}, 784 - {"IBM", "1746"}, 785 - {"IBM", "1813"}, 786 - {"IBM", "1814"}, 787 - {"IBM", "1815"}, 788 - {"IBM", "1818"}, 789 - {"IBM", "3526"}, 790 - {"SGI", "TP9"}, 791 - {"SGI", "IS"}, 792 - {"STK", "OPENstorage D280"}, 793 - {"STK", "FLEXLINE 380"}, 794 - {"SUN", "CSM"}, 795 - {"SUN", "LCSM100"}, 796 - {"SUN", "STK6580_6780"}, 797 - {"SUN", "SUN_6180"}, 798 - {"SUN", "ArrayStorage"}, 799 - {"DELL", "MD3"}, 800 - {"NETAPP", "INF-01-00"}, 801 - {"LSI", "INF-01-00"}, 802 - {"ENGENIO", "INF-01-00"}, 803 - {NULL, NULL}, 804 - }; 805 - 806 - static bool rdac_match(struct scsi_device *sdev) 807 - { 808 - int i; 809 - 810 - if (scsi_device_tpgs(sdev)) 811 - return false; 812 - 813 - for (i = 0; rdac_dev_list[i].vendor; i++) { 814 - if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor, 815 - strlen(rdac_dev_list[i].vendor)) && 816 - !strncmp(sdev->model, rdac_dev_list[i].model, 817 - strlen(rdac_dev_list[i].model))) { 818 - return true; 819 - } 820 - } 821 - return false; 822 - } 823 - 824 - static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev) 781 + static int rdac_bus_attach(struct scsi_device *sdev) 825 782 { 826 783 struct rdac_dh_data *h; 827 784 int err; ··· 781 836 782 837 h = kzalloc(sizeof(*h) , GFP_KERNEL); 783 838 if (!h) 784 - return ERR_PTR(-ENOMEM); 839 + return -ENOMEM; 785 840 h->lun = UNINITIALIZED_LUN; 786 841 h->state = RDAC_STATE_ACTIVE; 787 842 ··· 806 861 RDAC_NAME, h->lun, mode[(int)h->mode], 807 862 lun_state[(int)h->lun_state]); 808 863 809 - return &h->dh_data; 864 + sdev->handler_data = h; 865 + return 0; 810 866 811 867 clean_ctlr: 812 868 spin_lock(&list_lock); ··· 816 870 817 871 failed: 818 872 kfree(h); 819 - return ERR_PTR(-EINVAL); 873 + return -EINVAL; 820 874 } 821 875 822 876 static void rdac_bus_detach( struct scsi_device *sdev ) 823 877 { 824 - struct rdac_dh_data *h = get_rdac_data(sdev); 878 + struct rdac_dh_data *h = sdev->handler_data; 825 879 826 880 if (h->ctlr && h->ctlr->ms_queued) 827 881 flush_workqueue(kmpath_rdacd); ··· 830 884 if (h->ctlr) 831 885 kref_put(&h->ctlr->kref, release_controller); 832 886 spin_unlock(&list_lock); 887 + sdev->handler_data = NULL; 833 888 kfree(h); 834 889 } 835 890 ··· 842 895 .attach = rdac_bus_attach, 843 896 .detach = rdac_bus_detach, 844 897 .activate = rdac_activate, 845 - .match = rdac_match, 846 898 }; 847 899 848 900 static int __init rdac_init(void)
+1 -1
drivers/scsi/fcoe/fcoe.c
··· 364 364 * on the ethertype for the given device 365 365 */ 366 366 fcoe->fcoe_packet_type.func = fcoe_rcv; 367 - fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 367 + fcoe->fcoe_packet_type.type = htons(ETH_P_FCOE); 368 368 fcoe->fcoe_packet_type.dev = netdev; 369 369 dev_add_pack(&fcoe->fcoe_packet_type); 370 370
+6 -2
drivers/scsi/ipr.c
··· 4555 4555 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4556 4556 res = (struct ipr_resource_entry *)sdev->hostdata; 4557 4557 if (res) { 4558 - if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) { 4558 + if (ipr_is_af_dasd_device(res)) { 4559 4559 res->raw_mode = simple_strtoul(buf, NULL, 10); 4560 4560 len = strlen(buf); 4561 4561 if (res->sdev) ··· 6383 6383 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { 6384 6384 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6385 6385 } 6386 - if (res->raw_mode && ipr_is_af_dasd_device(res)) 6386 + if (res->raw_mode && ipr_is_af_dasd_device(res)) { 6387 6387 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; 6388 + 6389 + if (scsi_cmd->underflow == 0) 6390 + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 6391 + } 6388 6392 6389 6393 if (ioa_cfg->sis64) 6390 6394 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+4 -3
drivers/scsi/lpfc/lpfc_mbox.c
··· 2284 2284 (struct lpfc_rdp_context *)(mbox->context2); 2285 2285 2286 2286 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) 2287 - goto error; 2287 + goto error_mbuf_free; 2288 2288 2289 2289 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, 2290 2290 DMP_SFF_PAGE_A2_SIZE); ··· 2299 2299 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; 2300 2300 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 2301 2301 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) 2302 - goto error; 2302 + goto error_cmd_free; 2303 2303 2304 2304 return; 2305 2305 2306 - error: 2306 + error_mbuf_free: 2307 2307 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2308 2308 kfree(mp); 2309 + error_cmd_free: 2309 2310 lpfc_sli4_mbox_cmd_free(phba, mbox); 2310 2311 rdp_context->cmpl(phba, rdp_context, FAILURE); 2311 2312 }
+6
drivers/scsi/mpt2sas/mpt2sas_base.c
··· 112 112 if (ret) 113 113 return ret; 114 114 115 + /* global ioc spinlock to protect controller list on list operations */ 115 116 printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug); 117 + spin_lock(&gioc_lock); 116 118 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 117 119 ioc->fwfault_debug = mpt2sas_fwfault_debug; 120 + spin_unlock(&gioc_lock); 118 121 return 0; 119 122 } 120 123 ··· 4440 4437 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4441 4438 __func__)); 4442 4439 4440 + /* synchronizing freeing resource with pci_access_mutex lock */ 4441 + mutex_lock(&ioc->pci_access_mutex); 4443 4442 if (ioc->chip_phys && ioc->chip) { 4444 4443 _base_mask_interrupts(ioc); 4445 4444 ioc->shost_recovery = 1; ··· 4461 4456 pci_disable_pcie_error_reporting(pdev); 4462 4457 pci_disable_device(pdev); 4463 4458 } 4459 + mutex_unlock(&ioc->pci_access_mutex); 4464 4460 return; 4465 4461 } 4466 4462
+39 -2
drivers/scsi/mpt2sas/mpt2sas_base.h
··· 238 238 * @flags: MPT_TARGET_FLAGS_XXX flags 239 239 * @deleted: target flaged for deletion 240 240 * @tm_busy: target is busy with TM request. 241 + * @sdev: The sas_device associated with this target 241 242 */ 242 243 struct MPT2SAS_TARGET { 243 244 struct scsi_target *starget; ··· 249 248 u32 flags; 250 249 u8 deleted; 251 250 u8 tm_busy; 251 + struct _sas_device *sdev; 252 252 }; 253 253 254 254 ··· 378 376 u8 phy; 379 377 u8 responding; 380 378 u8 pfa_led_on; 379 + struct kref refcount; 381 380 }; 381 + 382 + static inline void sas_device_get(struct _sas_device *s) 383 + { 384 + kref_get(&s->refcount); 385 + } 386 + 387 + static inline void sas_device_free(struct kref *r) 388 + { 389 + kfree(container_of(r, struct _sas_device, refcount)); 390 + } 391 + 392 + static inline void sas_device_put(struct _sas_device *s) 393 + { 394 + kref_put(&s->refcount, sas_device_free); 395 + } 382 396 383 397 /** 384 398 * struct _raid_device - raid volume link list ··· 817 799 * @delayed_tr_list: target reset link list 818 800 * @delayed_tr_volume_list: volume target reset link list 819 801 * @@temp_sensors_count: flag to carry the number of temperature sensors 802 + * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and 803 + * pci resource handling. PCI resource freeing will lead to free 804 + * vital hardware/memory resource, which might be in use by cli/sysfs 805 + * path functions resulting in Null pointer reference followed by kernel 806 + * crash. To avoid the above race condition we use mutex syncrhonization 807 + * which ensures the syncrhonization between cli/sysfs_show path 820 808 */ 821 809 struct MPT2SAS_ADAPTER { 822 810 struct list_head list; ··· 1039 1015 u8 mfg_pg10_hide_flag; 1040 1016 u8 hide_drives; 1041 1017 1018 + struct mutex pci_access_mutex; 1042 1019 }; 1043 1020 1044 1021 typedef u8 (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, ··· 1048 1023 1049 1024 /* base shared API */ 1050 1025 extern struct list_head mpt2sas_ioc_list; 1026 + /* spinlock on list operations over IOCs 1027 + * Case: when multiple warpdrive cards(IOCs) are in use 1028 + * Each IOC will added to the ioc list stucture on initialization. 1029 + * Watchdog threads run at regular intervals to check IOC for any 1030 + * fault conditions which will trigger the dead_ioc thread to 1031 + * deallocate pci resource, resulting deleting the IOC netry from list, 1032 + * this deletion need to protected by spinlock to enusre that 1033 + * ioc removal is syncrhonized, if not synchronized it might lead to 1034 + * list_del corruption as the ioc list is traversed in cli path 1035 + */ 1036 + extern spinlock_t gioc_lock; 1051 1037 void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc); 1052 1038 void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc); 1053 1039 ··· 1131 1095 u16 handle); 1132 1096 struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER 1133 1097 *ioc, u64 sas_address); 1134 - struct _sas_device *mpt2sas_scsih_sas_device_find_by_sas_address( 1098 + struct _sas_device *mpt2sas_get_sdev_by_addr( 1099 + struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1100 + struct _sas_device *__mpt2sas_get_sdev_by_addr( 1135 1101 struct MPT2SAS_ADAPTER *ioc, u64 sas_address); 1136 1102 1137 1103 void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc); 1138 - 1139 1104 void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); 1140 1105 1141 1106 /* config shared API */
+31 -7
drivers/scsi/mpt2sas/mpt2sas_ctl.c
··· 427 427 _ctl_verify_adapter(int ioc_number, struct MPT2SAS_ADAPTER **iocpp) 428 428 { 429 429 struct MPT2SAS_ADAPTER *ioc; 430 - 430 + /* global ioc lock to protect controller on list operations */ 431 + spin_lock(&gioc_lock); 431 432 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { 432 433 if (ioc->id != ioc_number) 433 434 continue; 435 + spin_unlock(&gioc_lock); 434 436 *iocpp = ioc; 435 437 return ioc_number; 436 438 } 439 + spin_unlock(&gioc_lock); 437 440 *iocpp = NULL; 438 441 return -1; 439 442 } ··· 525 522 526 523 poll_wait(filep, &ctl_poll_wait, wait); 527 524 525 + /* global ioc lock to protect controller on list operations */ 526 + spin_lock(&gioc_lock); 528 527 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) { 529 - if (ioc->aen_event_read_flag) 528 + if (ioc->aen_event_read_flag) { 529 + spin_unlock(&gioc_lock); 530 530 return POLLIN | POLLRDNORM; 531 + } 531 532 } 533 + spin_unlock(&gioc_lock); 532 534 return 0; 533 535 } 534 536 ··· 2176 2168 2177 2169 if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc) 2178 2170 return -ENODEV; 2171 + /* pci_access_mutex lock acquired by ioctl path */ 2172 + mutex_lock(&ioc->pci_access_mutex); 2179 2173 if (ioc->shost_recovery || ioc->pci_error_recovery || 2180 - ioc->is_driver_loading) 2181 - return -EAGAIN; 2174 + ioc->is_driver_loading || ioc->remove_host) { 2175 + ret = -EAGAIN; 2176 + goto out_unlock_pciaccess; 2177 + } 2182 2178 2183 2179 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2184 2180 if (state == NON_BLOCKING) { 2185 - if (!mutex_trylock(&ioc->ctl_cmds.mutex)) 2186 - return -EAGAIN; 2181 + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2182 + ret = -EAGAIN; 2183 + goto out_unlock_pciaccess; 2184 + } 2187 2185 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2188 - return -ERESTARTSYS; 2186 + ret = -ERESTARTSYS; 2187 + goto out_unlock_pciaccess; 2189 2188 } 2190 2189 2191 2190 switch (cmd) { ··· 2273 2258 } 2274 2259 2275 2260 mutex_unlock(&ioc->ctl_cmds.mutex); 2261 + out_unlock_pciaccess: 2262 + mutex_unlock(&ioc->pci_access_mutex); 2276 2263 return ret; 2277 2264 } 2278 2265 ··· 2728 2711 "warpdrive\n", ioc->name, __func__); 2729 2712 goto out; 2730 2713 } 2714 + /* pci_access_mutex lock acquired by sysfs show path */ 2715 + mutex_lock(&ioc->pci_access_mutex); 2716 + if (ioc->pci_error_recovery || ioc->remove_host) { 2717 + mutex_unlock(&ioc->pci_access_mutex); 2718 + return 0; 2719 + } 2731 2720 2732 2721 /* allocate upto GPIOVal 36 entries */ 2733 2722 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); ··· 2772 2749 2773 2750 out: 2774 2751 kfree(io_unit_pg3); 2752 + mutex_unlock(&ioc->pci_access_mutex); 2775 2753 return rc; 2776 2754 } 2777 2755 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
+434 -171
drivers/scsi/mpt2sas/mpt2sas_scsih.c
··· 79 79 80 80 /* global parameters */ 81 81 LIST_HEAD(mpt2sas_ioc_list); 82 - 82 + /* global ioc lock for list operations */ 83 + DEFINE_SPINLOCK(gioc_lock); 83 84 /* local parameters */ 84 85 static u8 scsi_io_cb_idx = -1; 85 86 static u8 tm_cb_idx = -1; ··· 177 176 u8 VP_ID; 178 177 u8 ignore; 179 178 u16 event; 179 + struct kref refcount; 180 180 char event_data[0] __aligned(4); 181 181 }; 182 + 183 + static void fw_event_work_free(struct kref *r) 184 + { 185 + kfree(container_of(r, struct fw_event_work, refcount)); 186 + } 187 + 188 + static void fw_event_work_get(struct fw_event_work *fw_work) 189 + { 190 + kref_get(&fw_work->refcount); 191 + } 192 + 193 + static void fw_event_work_put(struct fw_event_work *fw_work) 194 + { 195 + kref_put(&fw_work->refcount, fw_event_work_free); 196 + } 197 + 198 + static struct fw_event_work *alloc_fw_event_work(int len) 199 + { 200 + struct fw_event_work *fw_event; 201 + 202 + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 203 + if (!fw_event) 204 + return NULL; 205 + 206 + kref_init(&fw_event->refcount); 207 + return fw_event; 208 + } 182 209 183 210 /* raid transport support */ 184 211 static struct raid_template *mpt2sas_raid_template; ··· 322 293 return ret; 323 294 324 295 printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level); 296 + spin_lock(&gioc_lock); 325 297 list_for_each_entry(ioc, &mpt2sas_ioc_list, list) 326 298 ioc->logging_level = logging_level; 299 + spin_unlock(&gioc_lock); 327 300 return 0; 328 301 } 329 302 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, ··· 557 526 } 558 527 } 559 528 529 + static struct _sas_device * 530 + __mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc, 531 + struct MPT2SAS_TARGET *tgt_priv) 532 + { 533 + struct _sas_device *ret; 534 + 535 + assert_spin_locked(&ioc->sas_device_lock); 536 + 537 + ret = tgt_priv->sdev; 538 + if (ret) 539 + sas_device_get(ret); 540 + 541 + return ret; 542 + } 543 + 544 + static struct _sas_device * 545 + mpt2sas_get_sdev_from_target(struct MPT2SAS_ADAPTER *ioc, 546 + struct MPT2SAS_TARGET *tgt_priv) 547 + { 548 + struct _sas_device *ret; 549 + unsigned long flags; 550 + 551 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 552 + ret = __mpt2sas_get_sdev_from_target(ioc, tgt_priv); 553 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 554 + 555 + return ret; 556 + } 557 + 558 + 559 + struct _sas_device * 560 + __mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc, 561 + u64 sas_address) 562 + { 563 + struct _sas_device *sas_device; 564 + 565 + assert_spin_locked(&ioc->sas_device_lock); 566 + 567 + list_for_each_entry(sas_device, &ioc->sas_device_list, list) 568 + if (sas_device->sas_address == sas_address) 569 + goto found_device; 570 + 571 + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 572 + if (sas_device->sas_address == sas_address) 573 + goto found_device; 574 + 575 + return NULL; 576 + 577 + found_device: 578 + sas_device_get(sas_device); 579 + return sas_device; 580 + } 581 + 560 582 /** 561 - * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search 583 + * mpt2sas_get_sdev_by_addr - sas device search 562 584 * @ioc: per adapter object 563 585 * @sas_address: sas address 564 586 * Context: Calling function should acquire ioc->sas_device_lock ··· 620 536 * object. 621 537 */ 622 538 struct _sas_device * 623 - mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, 539 + mpt2sas_get_sdev_by_addr(struct MPT2SAS_ADAPTER *ioc, 624 540 u64 sas_address) 625 541 { 626 542 struct _sas_device *sas_device; 543 + unsigned long flags; 544 + 545 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 546 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 547 + sas_address); 548 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 549 + 550 + return sas_device; 551 + } 552 + 553 + static struct _sas_device * 554 + __mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 555 + { 556 + struct _sas_device *sas_device; 557 + 558 + assert_spin_locked(&ioc->sas_device_lock); 627 559 628 560 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 629 - if (sas_device->sas_address == sas_address) 630 - return sas_device; 561 + if (sas_device->handle == handle) 562 + goto found_device; 631 563 632 564 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 633 - if (sas_device->sas_address == sas_address) 634 - return sas_device; 565 + if (sas_device->handle == handle) 566 + goto found_device; 635 567 636 568 return NULL; 569 + 570 + found_device: 571 + sas_device_get(sas_device); 572 + return sas_device; 637 573 } 638 574 639 575 /** 640 - * _scsih_sas_device_find_by_handle - sas device search 576 + * mpt2sas_get_sdev_by_handle - sas device search 641 577 * @ioc: per adapter object 642 578 * @handle: sas device handle (assigned by firmware) 643 579 * Context: Calling function should acquire ioc->sas_device_lock ··· 666 562 * object. 667 563 */ 668 564 static struct _sas_device * 669 - _scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 565 + mpt2sas_get_sdev_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 670 566 { 671 567 struct _sas_device *sas_device; 568 + unsigned long flags; 672 569 673 - list_for_each_entry(sas_device, &ioc->sas_device_list, list) 674 - if (sas_device->handle == handle) 675 - return sas_device; 570 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 571 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 572 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 676 573 677 - list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 678 - if (sas_device->handle == handle) 679 - return sas_device; 680 - 681 - return NULL; 574 + return sas_device; 682 575 } 683 576 684 577 /** ··· 684 583 * @sas_device: the sas_device object 685 584 * Context: This function will acquire ioc->sas_device_lock. 686 585 * 687 - * Removing object and freeing associated memory from the ioc->sas_device_list. 586 + * If sas_device is on the list, remove it and decrement its reference count. 688 587 */ 689 588 static void 690 589 _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc, ··· 695 594 if (!sas_device) 696 595 return; 697 596 597 + /* 598 + * The lock serializes access to the list, but we still need to verify 599 + * that nobody removed the entry while we were waiting on the lock. 600 + */ 698 601 spin_lock_irqsave(&ioc->sas_device_lock, flags); 699 - list_del(&sas_device->list); 700 - kfree(sas_device); 602 + if (!list_empty(&sas_device->list)) { 603 + list_del_init(&sas_device->list); 604 + sas_device_put(sas_device); 605 + } 701 606 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 702 607 } 703 608 ··· 727 620 sas_device->handle, (unsigned long long)sas_device->sas_address)); 728 621 729 622 spin_lock_irqsave(&ioc->sas_device_lock, flags); 623 + sas_device_get(sas_device); 730 624 list_add_tail(&sas_device->list, &ioc->sas_device_list); 731 625 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 732 626 ··· 767 659 sas_device->handle, (unsigned long long)sas_device->sas_address)); 768 660 769 661 spin_lock_irqsave(&ioc->sas_device_lock, flags); 662 + sas_device_get(sas_device); 770 663 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 771 664 _scsih_determine_boot_device(ioc, sas_device, 0); 772 665 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); ··· 1317 1208 goto not_sata; 1318 1209 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1319 1210 goto not_sata; 1211 + 1320 1212 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1321 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1322 - sas_device_priv_data->sas_target->sas_address); 1323 - if (sas_device && sas_device->device_info & 1324 - MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1325 - max_depth = MPT2SAS_SATA_QUEUE_DEPTH; 1213 + sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data); 1214 + if (sas_device) { 1215 + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1216 + max_depth = MPT2SAS_SATA_QUEUE_DEPTH; 1217 + 1218 + sas_device_put(sas_device); 1219 + } 1326 1220 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1327 1221 1328 1222 not_sata: ··· 1383 1271 /* sas/sata devices */ 1384 1272 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1385 1273 rphy = dev_to_rphy(starget->dev.parent); 1386 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1274 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 1387 1275 rphy->identify.sas_address); 1388 1276 1389 1277 if (sas_device) { 1390 1278 sas_target_priv_data->handle = sas_device->handle; 1391 1279 sas_target_priv_data->sas_address = sas_device->sas_address; 1280 + sas_target_priv_data->sdev = sas_device; 1392 1281 sas_device->starget = starget; 1393 1282 sas_device->id = starget->id; 1394 1283 sas_device->channel = starget->channel; 1395 1284 if (test_bit(sas_device->handle, ioc->pd_handles)) 1396 1285 sas_target_priv_data->flags |= 1397 1286 MPT_TARGET_FLAGS_RAID_COMPONENT; 1287 + 1398 1288 } 1399 1289 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1400 1290 ··· 1438 1324 1439 1325 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1440 1326 rphy = dev_to_rphy(starget->dev.parent); 1441 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1442 - rphy->identify.sas_address); 1327 + sas_device = __mpt2sas_get_sdev_from_target(ioc, sas_target_priv_data); 1443 1328 if (sas_device && (sas_device->starget == starget) && 1444 1329 (sas_device->id == starget->id) && 1445 1330 (sas_device->channel == starget->channel)) 1446 1331 sas_device->starget = NULL; 1447 1332 1333 + if (sas_device) { 1334 + /* 1335 + * Corresponding get() is in _scsih_target_alloc() 1336 + */ 1337 + sas_target_priv_data->sdev = NULL; 1338 + sas_device_put(sas_device); 1339 + 1340 + sas_device_put(sas_device); 1341 + } 1448 1342 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1449 1343 1450 1344 out: ··· 1508 1386 1509 1387 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1510 1388 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1511 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1389 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 1512 1390 sas_target_priv_data->sas_address); 1513 1391 if (sas_device && (sas_device->starget == NULL)) { 1514 1392 sdev_printk(KERN_INFO, sdev, ··· 1516 1394 __func__, __LINE__); 1517 1395 sas_device->starget = starget; 1518 1396 } 1397 + 1398 + if (sas_device) 1399 + sas_device_put(sas_device); 1400 + 1519 1401 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1520 1402 } 1521 1403 ··· 1554 1428 1555 1429 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1556 1430 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1557 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1558 - sas_target_priv_data->sas_address); 1431 + sas_device = __mpt2sas_get_sdev_from_target(ioc, 1432 + sas_target_priv_data); 1559 1433 if (sas_device && !sas_target_priv_data->num_luns) 1560 1434 sas_device->starget = NULL; 1435 + 1436 + if (sas_device) 1437 + sas_device_put(sas_device); 1561 1438 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1562 1439 } 1563 1440 ··· 2207 2078 } 2208 2079 2209 2080 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2210 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2081 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 2211 2082 sas_device_priv_data->sas_target->sas_address); 2212 2083 if (!sas_device) { 2213 2084 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); ··· 2241 2112 (unsigned long long) sas_device->enclosure_logical_id, 2242 2113 sas_device->slot); 2243 2114 2115 + sas_device_put(sas_device); 2244 2116 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2245 2117 if (!ssp_target) 2246 2118 _scsih_display_sata_capabilities(ioc, handle, sdev); 2247 - 2248 2119 2249 2120 _scsih_change_queue_depth(sdev, qdepth); 2250 2121 ··· 2252 2123 sas_read_port_mode_page(sdev); 2253 2124 _scsih_enable_tlr(ioc, sdev); 2254 2125 } 2126 + 2255 2127 return 0; 2256 2128 } 2257 2129 ··· 2639 2509 device_str, (unsigned long long)priv_target->sas_address); 2640 2510 } else { 2641 2511 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2642 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 2643 - priv_target->sas_address); 2512 + sas_device = __mpt2sas_get_sdev_from_target(ioc, priv_target); 2644 2513 if (sas_device) { 2645 2514 if (priv_target->flags & 2646 2515 MPT_TARGET_FLAGS_RAID_COMPONENT) { ··· 2658 2529 "enclosure_logical_id(0x%016llx), slot(%d)\n", 2659 2530 (unsigned long long)sas_device->enclosure_logical_id, 2660 2531 sas_device->slot); 2532 + 2533 + sas_device_put(sas_device); 2661 2534 } 2662 2535 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2663 2536 } ··· 2735 2604 { 2736 2605 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2737 2606 struct MPT2SAS_DEVICE *sas_device_priv_data; 2738 - struct _sas_device *sas_device; 2739 - unsigned long flags; 2607 + struct _sas_device *sas_device = NULL; 2740 2608 u16 handle; 2741 2609 int r; 2742 2610 2743 2611 struct scsi_target *starget = scmd->device->sdev_target; 2612 + struct MPT2SAS_TARGET *target_priv_data = starget->hostdata; 2744 2613 2745 2614 starget_printk(KERN_INFO, starget, "attempting device reset! " 2746 2615 "scmd(%p)\n", scmd); ··· 2760 2629 handle = 0; 2761 2630 if (sas_device_priv_data->sas_target->flags & 2762 2631 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2763 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 2764 - sas_device = _scsih_sas_device_find_by_handle(ioc, 2765 - sas_device_priv_data->sas_target->handle); 2632 + sas_device = mpt2sas_get_sdev_from_target(ioc, 2633 + target_priv_data); 2766 2634 if (sas_device) 2767 2635 handle = sas_device->volume_handle; 2768 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2769 2636 } else 2770 2637 handle = sas_device_priv_data->sas_target->handle; 2771 2638 ··· 2780 2651 out: 2781 2652 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2782 2653 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2654 + 2655 + if (sas_device) 2656 + sas_device_put(sas_device); 2657 + 2783 2658 return r; 2784 2659 } 2785 2660 ··· 2798 2665 { 2799 2666 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2800 2667 struct MPT2SAS_DEVICE *sas_device_priv_data; 2801 - struct _sas_device *sas_device; 2802 - unsigned long flags; 2668 + struct _sas_device *sas_device = NULL; 2803 2669 u16 handle; 2804 2670 int r; 2805 2671 struct scsi_target *starget = scmd->device->sdev_target; 2672 + struct MPT2SAS_TARGET *target_priv_data = starget->hostdata; 2806 2673 2807 2674 starget_printk(KERN_INFO, starget, "attempting target reset! " 2808 2675 "scmd(%p)\n", scmd); ··· 2822 2689 handle = 0; 2823 2690 if (sas_device_priv_data->sas_target->flags & 2824 2691 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2825 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 2826 - sas_device = _scsih_sas_device_find_by_handle(ioc, 2827 - sas_device_priv_data->sas_target->handle); 2692 + sas_device = mpt2sas_get_sdev_from_target(ioc, 2693 + target_priv_data); 2828 2694 if (sas_device) 2829 2695 handle = sas_device->volume_handle; 2830 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2831 2696 } else 2832 2697 handle = sas_device_priv_data->sas_target->handle; 2833 2698 ··· 2842 2711 out: 2843 2712 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2844 2713 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2714 + 2715 + if (sas_device) 2716 + sas_device_put(sas_device); 2717 + 2845 2718 return r; 2846 2719 } 2847 2720 ··· 2903 2768 return; 2904 2769 2905 2770 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2771 + fw_event_work_get(fw_event); 2906 2772 list_add_tail(&fw_event->list, &ioc->fw_event_list); 2907 2773 INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work); 2774 + fw_event_work_get(fw_event); 2908 2775 queue_delayed_work(ioc->firmware_event_thread, 2909 2776 &fw_event->delayed_work, 0); 2910 2777 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2911 2778 } 2912 2779 2913 2780 /** 2914 - * _scsih_fw_event_free - delete fw_event 2781 + * _scsih_fw_event_del_from_list - delete fw_event from the list 2915 2782 * @ioc: per adapter object 2916 2783 * @fw_event: object describing the event 2917 2784 * Context: This function will acquire ioc->fw_event_lock. 2918 2785 * 2919 - * This removes firmware event object from link list, frees associated memory. 2786 + * If the fw_event is on the fw_event_list, remove it and do a put. 2920 2787 * 2921 2788 * Return nothing. 2922 2789 */ 2923 2790 static void 2924 - _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work 2791 + _scsih_fw_event_del_from_list(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work 2925 2792 *fw_event) 2926 2793 { 2927 2794 unsigned long flags; 2928 2795 2929 2796 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2930 - list_del(&fw_event->list); 2931 - kfree(fw_event); 2797 + if (!list_empty(&fw_event->list)) { 2798 + list_del_init(&fw_event->list); 2799 + fw_event_work_put(fw_event); 2800 + } 2932 2801 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2933 2802 } 2934 - 2935 2803 2936 2804 /** 2937 2805 * _scsih_error_recovery_delete_devices - remove devices not responding ··· 2950 2812 if (ioc->is_driver_loading) 2951 2813 return; 2952 2814 2953 - fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 2815 + fw_event = alloc_fw_event_work(0); 2954 2816 if (!fw_event) 2955 2817 return; 2956 2818 2957 2819 fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; 2958 2820 fw_event->ioc = ioc; 2959 2821 _scsih_fw_event_add(ioc, fw_event); 2822 + fw_event_work_put(fw_event); 2960 2823 } 2961 2824 2962 2825 /** ··· 2971 2832 { 2972 2833 struct fw_event_work *fw_event; 2973 2834 2974 - fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 2835 + fw_event = alloc_fw_event_work(0); 2975 2836 if (!fw_event) 2976 2837 return; 2977 2838 fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE; 2978 2839 fw_event->ioc = ioc; 2979 2840 _scsih_fw_event_add(ioc, fw_event); 2841 + fw_event_work_put(fw_event); 2842 + } 2843 + 2844 + static struct fw_event_work *dequeue_next_fw_event(struct MPT2SAS_ADAPTER *ioc) 2845 + { 2846 + unsigned long flags; 2847 + struct fw_event_work *fw_event = NULL; 2848 + 2849 + spin_lock_irqsave(&ioc->fw_event_lock, flags); 2850 + if (!list_empty(&ioc->fw_event_list)) { 2851 + fw_event = list_first_entry(&ioc->fw_event_list, 2852 + struct fw_event_work, list); 2853 + list_del_init(&fw_event->list); 2854 + } 2855 + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2856 + 2857 + return fw_event; 2980 2858 } 2981 2859 2982 2860 /** ··· 3008 2852 static void 3009 2853 _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc) 3010 2854 { 3011 - struct fw_event_work *fw_event, *next; 2855 + struct fw_event_work *fw_event; 3012 2856 3013 2857 if (list_empty(&ioc->fw_event_list) || 3014 2858 !ioc->firmware_event_thread || in_interrupt()) 3015 2859 return; 3016 2860 3017 - list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { 3018 - if (cancel_delayed_work_sync(&fw_event->delayed_work)) { 3019 - _scsih_fw_event_free(ioc, fw_event); 3020 - continue; 3021 - } 2861 + while ((fw_event = dequeue_next_fw_event(ioc))) { 2862 + /* 2863 + * Wait on the fw_event to complete. If this returns 1, then 2864 + * the event was never executed, and we need a put for the 2865 + * reference the delayed_work had on the fw_event. 2866 + * 2867 + * If it did execute, we wait for it to finish, and the put will 2868 + * happen from _firmware_event_work() 2869 + */ 2870 + if (cancel_delayed_work_sync(&fw_event->delayed_work)) 2871 + fw_event_work_put(fw_event); 2872 + 2873 + fw_event_work_put(fw_event); 3022 2874 } 3023 2875 } 3024 2876 ··· 3166 3002 3167 3003 list_for_each_entry(mpt2sas_port, 3168 3004 &sas_expander->sas_port_list, port_list) { 3169 - if (mpt2sas_port->remote_identify.device_type == 3170 - SAS_END_DEVICE) { 3005 + if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) { 3171 3006 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3172 - sas_device = 3173 - mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 3174 - mpt2sas_port->remote_identify.sas_address); 3175 - if (sas_device) 3007 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 3008 + mpt2sas_port->remote_identify.sas_address); 3009 + if (sas_device) { 3176 3010 set_bit(sas_device->handle, 3177 - ioc->blocking_handles); 3011 + ioc->blocking_handles); 3012 + sas_device_put(sas_device); 3013 + } 3178 3014 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3179 3015 } 3180 3016 } ··· 3244 3080 { 3245 3081 Mpi2SCSITaskManagementRequest_t *mpi_request; 3246 3082 u16 smid; 3247 - struct _sas_device *sas_device; 3083 + struct _sas_device *sas_device = NULL; 3248 3084 struct MPT2SAS_TARGET *sas_target_priv_data = NULL; 3249 3085 u64 sas_address = 0; 3250 3086 unsigned long flags; ··· 3274 3110 return; 3275 3111 3276 3112 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3277 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 3113 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 3278 3114 if (sas_device && sas_device->starget && 3279 3115 sas_device->starget->hostdata) { 3280 3116 sas_target_priv_data = sas_device->starget->hostdata; ··· 3295 3131 if (!smid) { 3296 3132 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 3297 3133 if (!delayed_tr) 3298 - return; 3134 + goto out; 3299 3135 INIT_LIST_HEAD(&delayed_tr->list); 3300 3136 delayed_tr->handle = handle; 3301 3137 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 3302 3138 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT 3303 3139 "DELAYED:tr:handle(0x%04x), (open)\n", 3304 3140 ioc->name, handle)); 3305 - return; 3141 + goto out; 3306 3142 } 3307 3143 3308 3144 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), " ··· 3314 3150 mpi_request->DevHandle = cpu_to_le16(handle); 3315 3151 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3316 3152 mpt2sas_base_put_smid_hi_priority(ioc, smid); 3153 + out: 3154 + if (sas_device) 3155 + sas_device_put(sas_device); 3317 3156 } 3318 3157 3319 3158 ··· 4235 4068 char *desc_scsi_state = ioc->tmp_string; 4236 4069 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4237 4070 struct _sas_device *sas_device = NULL; 4238 - unsigned long flags; 4239 4071 struct scsi_target *starget = scmd->device->sdev_target; 4240 4072 struct MPT2SAS_TARGET *priv_target = starget->hostdata; 4241 4073 char *device_str = NULL; ··· 4366 4200 printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name, 4367 4201 device_str, (unsigned long long)priv_target->sas_address); 4368 4202 } else { 4369 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 4370 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 4371 - priv_target->sas_address); 4203 + sas_device = mpt2sas_get_sdev_from_target(ioc, priv_target); 4372 4204 if (sas_device) { 4373 4205 printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), " 4374 4206 "phy(%d)\n", ioc->name, sas_device->sas_address, ··· 4375 4211 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 4376 4212 ioc->name, sas_device->enclosure_logical_id, 4377 4213 sas_device->slot); 4214 + 4215 + sas_device_put(sas_device); 4378 4216 } 4379 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4380 4217 } 4381 4218 4382 4219 printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), " ··· 4424 4259 Mpi2SepRequest_t mpi_request; 4425 4260 struct _sas_device *sas_device; 4426 4261 4427 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4262 + sas_device = mpt2sas_get_sdev_by_handle(ioc, handle); 4428 4263 if (!sas_device) 4429 4264 return; 4430 4265 ··· 4439 4274 &mpi_request)) != 0) { 4440 4275 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, 4441 4276 __FILE__, __LINE__, __func__); 4442 - return; 4277 + goto out; 4443 4278 } 4444 4279 sas_device->pfa_led_on = 1; 4445 4280 ··· 4449 4284 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 4450 4285 ioc->name, le16_to_cpu(mpi_reply.IOCStatus), 4451 4286 le32_to_cpu(mpi_reply.IOCLogInfo))); 4452 - return; 4287 + goto out; 4453 4288 } 4289 + out: 4290 + sas_device_put(sas_device); 4454 4291 } 4455 4292 4456 4293 /** ··· 4507 4340 { 4508 4341 struct fw_event_work *fw_event; 4509 4342 4510 - fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 4343 + fw_event = alloc_fw_event_work(0); 4511 4344 if (!fw_event) 4512 4345 return; 4513 4346 fw_event->event = MPT2SAS_TURN_ON_PFA_LED; 4514 4347 fw_event->device_handle = handle; 4515 4348 fw_event->ioc = ioc; 4516 4349 _scsih_fw_event_add(ioc, fw_event); 4350 + fw_event_work_put(fw_event); 4517 4351 } 4518 4352 4519 4353 /** ··· 4538 4370 4539 4371 /* only handle non-raid devices */ 4540 4372 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4541 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4373 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 4542 4374 if (!sas_device) { 4543 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4544 - return; 4375 + goto out_unlock; 4545 4376 } 4546 4377 starget = sas_device->starget; 4547 4378 sas_target_priv_data = starget->hostdata; 4548 4379 4549 4380 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 4550 - ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) { 4551 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4552 - return; 4553 - } 4381 + ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 4382 + goto out_unlock; 4383 + 4554 4384 starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4555 4385 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4556 4386 ··· 4562 4396 if (!event_reply) { 4563 4397 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 4564 4398 ioc->name, __FILE__, __LINE__, __func__); 4565 - return; 4399 + goto out; 4566 4400 } 4567 4401 4568 4402 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; ··· 4579 4413 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 4580 4414 mpt2sas_ctl_add_to_event_log(ioc, event_reply); 4581 4415 kfree(event_reply); 4416 + out: 4417 + if (sas_device) 4418 + sas_device_put(sas_device); 4419 + return; 4420 + 4421 + out_unlock: 4422 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4423 + goto out; 4582 4424 } 4583 4425 4584 4426 /** ··· 5322 5148 5323 5149 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5324 5150 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5325 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5151 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 5326 5152 sas_address); 5327 5153 5328 5154 if (!sas_device) { 5329 5155 printk(MPT2SAS_ERR_FMT "device is not present " 5330 5156 "handle(0x%04x), no sas_device!!!\n", ioc->name, handle); 5331 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5332 - return; 5157 + goto out_unlock; 5333 5158 } 5334 5159 5335 5160 if (unlikely(sas_device->handle != handle)) { ··· 5345 5172 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5346 5173 printk(MPT2SAS_ERR_FMT "device is not present " 5347 5174 "handle(0x%04x), flags!!!\n", ioc->name, handle); 5348 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5349 - return; 5175 + goto out_unlock; 5350 5176 } 5351 5177 5352 5178 /* check if there were any issues with discovery */ 5353 5179 if (_scsih_check_access_status(ioc, sas_address, handle, 5354 - sas_device_pg0.AccessStatus)) { 5355 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5356 - return; 5357 - } 5180 + sas_device_pg0.AccessStatus)) 5181 + goto out_unlock; 5182 + 5358 5183 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5359 5184 _scsih_ublock_io_device(ioc, sas_address); 5185 + if (sas_device) 5186 + sas_device_put(sas_device); 5187 + return; 5360 5188 5189 + out_unlock: 5190 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5191 + if (sas_device) 5192 + sas_device_put(sas_device); 5361 5193 } 5362 5194 5363 5195 /** ··· 5386 5208 u32 ioc_status; 5387 5209 __le64 sas_address; 5388 5210 u32 device_info; 5389 - unsigned long flags; 5390 5211 5391 5212 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5392 5213 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { ··· 5427 5250 return -1; 5428 5251 } 5429 5252 5430 - 5431 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 5432 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5253 + sas_device = mpt2sas_get_sdev_by_addr(ioc, 5433 5254 sas_address); 5434 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5435 5255 5436 - if (sas_device) 5256 + if (sas_device) { 5257 + sas_device_put(sas_device); 5437 5258 return 0; 5259 + } 5438 5260 5439 5261 sas_device = kzalloc(sizeof(struct _sas_device), 5440 5262 GFP_KERNEL); ··· 5443 5267 return -1; 5444 5268 } 5445 5269 5270 + kref_init(&sas_device->refcount); 5446 5271 sas_device->handle = handle; 5447 5272 if (_scsih_get_sas_address(ioc, le16_to_cpu 5448 5273 (sas_device_pg0.ParentDevHandle), ··· 5473 5296 else 5474 5297 _scsih_sas_device_add(ioc, sas_device); 5475 5298 5299 + sas_device_put(sas_device); 5476 5300 return 0; 5477 5301 } 5478 5302 ··· 5522 5344 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 5523 5345 sas_device->handle, (unsigned long long) 5524 5346 sas_device->sas_address)); 5525 - kfree(sas_device); 5526 5347 } 5527 5348 /** 5528 5349 * _scsih_device_remove_by_handle - removing device object by handle ··· 5540 5363 return; 5541 5364 5542 5365 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5543 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 5544 - if (sas_device) 5545 - list_del(&sas_device->list); 5366 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 5367 + if (sas_device) { 5368 + list_del_init(&sas_device->list); 5369 + sas_device_put(sas_device); 5370 + } 5546 5371 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5547 - if (sas_device) 5372 + 5373 + if (sas_device) { 5548 5374 _scsih_remove_device(ioc, sas_device); 5375 + sas_device_put(sas_device); 5376 + } 5549 5377 } 5550 5378 5551 5379 /** ··· 5571 5389 return; 5572 5390 5573 5391 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5574 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5575 - sas_address); 5576 - if (sas_device) 5577 - list_del(&sas_device->list); 5392 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, sas_address); 5393 + if (sas_device) { 5394 + list_del_init(&sas_device->list); 5395 + sas_device_put(sas_device); 5396 + } 5578 5397 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5579 - if (sas_device) 5398 + 5399 + if (sas_device) { 5580 5400 _scsih_remove_device(ioc, sas_device); 5401 + sas_device_put(sas_device); 5402 + } 5581 5403 } 5582 5404 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING 5583 5405 /** ··· 5902 5716 5903 5717 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5904 5718 sas_address = le64_to_cpu(event_data->SASAddress); 5905 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 5719 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 5906 5720 sas_address); 5907 5721 5908 - if (!sas_device || !sas_device->starget) { 5909 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5910 - return; 5911 - } 5722 + if (!sas_device || !sas_device->starget) 5723 + goto out; 5912 5724 5913 5725 target_priv_data = sas_device->starget->hostdata; 5914 - if (!target_priv_data) { 5915 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5916 - return; 5917 - } 5726 + if (!target_priv_data) 5727 + goto out; 5918 5728 5919 5729 if (event_data->ReasonCode == 5920 5730 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 5921 5731 target_priv_data->tm_busy = 1; 5922 5732 else 5923 5733 target_priv_data->tm_busy = 0; 5734 + 5735 + out: 5736 + if (sas_device) 5737 + sas_device_put(sas_device); 5738 + 5924 5739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5740 + 5925 5741 } 5926 5742 5927 5743 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING ··· 6311 6123 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6312 6124 6313 6125 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6314 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6126 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 6315 6127 if (sas_device) { 6316 6128 sas_device->volume_handle = 0; 6317 6129 sas_device->volume_wwid = 0; ··· 6330 6142 /* exposing raid component */ 6331 6143 if (starget) 6332 6144 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 6145 + 6146 + sas_device_put(sas_device); 6333 6147 } 6334 6148 6335 6149 /** ··· 6360 6170 &volume_wwid); 6361 6171 6362 6172 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6363 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6173 + sas_device = __mpt2sas_get_sdev_by_handle(ioc, handle); 6364 6174 if (sas_device) { 6365 6175 set_bit(handle, ioc->pd_handles); 6366 6176 if (sas_device->starget && sas_device->starget->hostdata) { ··· 6379 6189 /* hiding raid component */ 6380 6190 if (starget) 6381 6191 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 6192 + 6193 + sas_device_put(sas_device); 6382 6194 } 6383 6195 6384 6196 /** ··· 6413 6221 Mpi2EventIrConfigElement_t *element) 6414 6222 { 6415 6223 struct _sas_device *sas_device; 6416 - unsigned long flags; 6417 6224 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 6418 6225 Mpi2ConfigReply_t mpi_reply; 6419 6226 Mpi2SasDevicePage0_t sas_device_pg0; ··· 6422 6231 6423 6232 set_bit(handle, ioc->pd_handles); 6424 6233 6425 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 6426 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6427 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6428 - if (sas_device) 6234 + sas_device = mpt2sas_get_sdev_by_handle(ioc, handle); 6235 + if (sas_device) { 6236 + sas_device_put(sas_device); 6429 6237 return; 6238 + } 6430 6239 6431 6240 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6432 6241 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { ··· 6700 6509 u16 handle, parent_handle; 6701 6510 u32 state; 6702 6511 struct _sas_device *sas_device; 6703 - unsigned long flags; 6704 6512 Mpi2ConfigReply_t mpi_reply; 6705 6513 Mpi2SasDevicePage0_t sas_device_pg0; 6706 6514 u32 ioc_status; ··· 6732 6542 if (!ioc->is_warpdrive) 6733 6543 set_bit(handle, ioc->pd_handles); 6734 6544 6735 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 6736 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 6737 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6738 - 6739 - if (sas_device) 6545 + sas_device = mpt2sas_get_sdev_by_handle(ioc, handle); 6546 + if (sas_device) { 6547 + sas_device_put(sas_device); 6740 6548 return; 6549 + } 6741 6550 6742 6551 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 6743 6552 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ··· 7204 7015 struct _raid_device *raid_device, *raid_device_next; 7205 7016 struct list_head tmp_list; 7206 7017 unsigned long flags; 7018 + LIST_HEAD(head); 7207 7019 7208 7020 printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", 7209 7021 ioc->name); ··· 7212 7022 /* removing unresponding end devices */ 7213 7023 printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n", 7214 7024 ioc->name); 7025 + 7026 + /* 7027 + * Iterate, pulling off devices marked as non-responding. We become the 7028 + * owner for the reference the list had on any object we prune. 7029 + */ 7030 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 7215 7031 list_for_each_entry_safe(sas_device, sas_device_next, 7216 - &ioc->sas_device_list, list) { 7032 + &ioc->sas_device_list, list) { 7217 7033 if (!sas_device->responding) 7218 - mpt2sas_device_remove_by_sas_address(ioc, 7219 - sas_device->sas_address); 7034 + list_move_tail(&sas_device->list, &head); 7220 7035 else 7221 7036 sas_device->responding = 0; 7037 + } 7038 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7039 + 7040 + /* 7041 + * Now, uninitialize and remove the unresponding devices we pruned. 7042 + */ 7043 + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 7044 + _scsih_remove_device(ioc, sas_device); 7045 + list_del_init(&sas_device->list); 7046 + sas_device_put(sas_device); 7222 7047 } 7223 7048 7224 7049 /* removing unresponding volumes */ ··· 7384 7179 } 7385 7180 phys_disk_num = pd_pg0.PhysDiskNum; 7386 7181 handle = le16_to_cpu(pd_pg0.DevHandle); 7387 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 7388 - sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 7389 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7390 - if (sas_device) 7182 + sas_device = mpt2sas_get_sdev_by_handle(ioc, handle); 7183 + if (sas_device) { 7184 + sas_device_put(sas_device); 7391 7185 continue; 7186 + } 7392 7187 if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, 7393 7188 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 7394 7189 handle) != 0) ··· 7507 7302 if (!(_scsih_is_end_device( 7508 7303 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 7509 7304 continue; 7510 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 7511 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 7305 + sas_device = mpt2sas_get_sdev_by_addr(ioc, 7512 7306 le64_to_cpu(sas_device_pg0.SASAddress)); 7513 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7514 - if (sas_device) 7307 + if (sas_device) { 7308 + sas_device_put(sas_device); 7515 7309 continue; 7310 + } 7516 7311 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 7517 7312 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 7518 7313 printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: " ··· 7615 7410 struct fw_event_work, delayed_work.work); 7616 7411 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 7617 7412 7413 + _scsih_fw_event_del_from_list(ioc, fw_event); 7414 + 7618 7415 /* the queue is being flushed so ignore this event */ 7619 - if (ioc->remove_host || 7620 - ioc->pci_error_recovery) { 7621 - _scsih_fw_event_free(ioc, fw_event); 7416 + if (ioc->remove_host || ioc->pci_error_recovery) { 7417 + fw_event_work_put(fw_event); 7622 7418 return; 7623 7419 } 7624 7420 7625 7421 switch (fw_event->event) { 7626 7422 case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: 7627 - while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) 7423 + while (scsi_host_in_recovery(ioc->shost) || 7424 + ioc->shost_recovery) { 7425 + /* 7426 + * If we're unloading, bail. Otherwise, this can become 7427 + * an infinite loop. 7428 + */ 7429 + if (ioc->remove_host) 7430 + goto out; 7431 + 7628 7432 ssleep(1); 7433 + } 7629 7434 _scsih_remove_unresponding_sas_devices(ioc); 7630 7435 _scsih_scan_for_devices_after_reset(ioc); 7631 7436 break; ··· 7684 7469 _scsih_sas_ir_operation_status_event(ioc, fw_event); 7685 7470 break; 7686 7471 } 7687 - _scsih_fw_event_free(ioc, fw_event); 7472 + out: 7473 + fw_event_work_put(fw_event); 7688 7474 } 7689 7475 7690 7476 /** ··· 7823 7607 } 7824 7608 7825 7609 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 7826 - fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC); 7610 + fw_event = alloc_fw_event_work(sz); 7827 7611 if (!fw_event) { 7828 7612 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 7829 7613 ioc->name, __FILE__, __LINE__, __func__); ··· 7836 7620 fw_event->VP_ID = mpi_reply->VP_ID; 7837 7621 fw_event->event = event; 7838 7622 _scsih_fw_event_add(ioc, fw_event); 7623 + fw_event_work_put(fw_event); 7839 7624 return; 7840 7625 } 7841 7626 ··· 8084 7867 sas_remove_host(shost); 8085 7868 scsi_remove_host(shost); 8086 7869 mpt2sas_base_detach(ioc); 7870 + spin_lock(&gioc_lock); 8087 7871 list_del(&ioc->list); 7872 + spin_unlock(&gioc_lock); 8088 7873 scsi_host_put(shost); 8089 7874 } 8090 7875 ··· 8185 7966 } 8186 7967 } 8187 7968 7969 + static struct _sas_device *get_next_sas_device(struct MPT2SAS_ADAPTER *ioc) 7970 + { 7971 + struct _sas_device *sas_device = NULL; 7972 + unsigned long flags; 7973 + 7974 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 7975 + if (!list_empty(&ioc->sas_device_init_list)) { 7976 + sas_device = list_first_entry(&ioc->sas_device_init_list, 7977 + struct _sas_device, list); 7978 + sas_device_get(sas_device); 7979 + } 7980 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7981 + 7982 + return sas_device; 7983 + } 7984 + 7985 + static void sas_device_make_active(struct MPT2SAS_ADAPTER *ioc, 7986 + struct _sas_device *sas_device) 7987 + { 7988 + unsigned long flags; 7989 + 7990 + spin_lock_irqsave(&ioc->sas_device_lock, flags); 7991 + 7992 + /* 7993 + * Since we dropped the lock during the call to port_add(), we need to 7994 + * be careful here that somebody else didn't move or delete this item 7995 + * while we were busy with other things. 7996 + * 7997 + * If it was on the list, we need a put() for the reference the list 7998 + * had. Either way, we need a get() for the destination list. 7999 + */ 8000 + if (!list_empty(&sas_device->list)) { 8001 + list_del_init(&sas_device->list); 8002 + sas_device_put(sas_device); 8003 + } 8004 + 8005 + sas_device_get(sas_device); 8006 + list_add_tail(&sas_device->list, &ioc->sas_device_list); 8007 + 8008 + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 8009 + } 8010 + 8188 8011 /** 8189 8012 * _scsih_probe_sas - reporting sas devices to sas transport 8190 8013 * @ioc: per adapter object ··· 8236 7975 static void 8237 7976 _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) 8238 7977 { 8239 - struct _sas_device *sas_device, *next; 8240 - unsigned long flags; 7978 + struct _sas_device *sas_device; 8241 7979 8242 - /* SAS Device List */ 8243 - list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, 8244 - list) { 7980 + if (ioc->hide_drives) 7981 + return; 8245 7982 8246 - if (ioc->hide_drives) 8247 - continue; 8248 - 7983 + while ((sas_device = get_next_sas_device(ioc))) { 8249 7984 if (!mpt2sas_transport_port_add(ioc, sas_device->handle, 8250 - sas_device->sas_address_parent)) { 8251 - list_del(&sas_device->list); 8252 - kfree(sas_device); 7985 + sas_device->sas_address_parent)) { 7986 + _scsih_sas_device_remove(ioc, sas_device); 7987 + sas_device_put(sas_device); 8253 7988 continue; 8254 7989 } else if (!sas_device->starget) { 8255 7990 if (!ioc->is_driver_loading) { 8256 7991 mpt2sas_transport_port_remove(ioc, 8257 - sas_device->sas_address, 8258 - sas_device->sas_address_parent); 8259 - list_del(&sas_device->list); 8260 - kfree(sas_device); 7992 + sas_device->sas_address, 7993 + sas_device->sas_address_parent); 7994 + _scsih_sas_device_remove(ioc, sas_device); 7995 + sas_device_put(sas_device); 8261 7996 continue; 8262 7997 } 8263 7998 } 8264 - spin_lock_irqsave(&ioc->sas_device_lock, flags); 8265 - list_move_tail(&sas_device->list, &ioc->sas_device_list); 8266 - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7999 + 8000 + sas_device_make_active(ioc, sas_device); 8001 + sas_device_put(sas_device); 8267 8002 } 8268 8003 } 8269 8004 ··· 8399 8142 ioc = shost_priv(shost); 8400 8143 memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER)); 8401 8144 INIT_LIST_HEAD(&ioc->list); 8145 + spin_lock(&gioc_lock); 8402 8146 list_add_tail(&ioc->list, &mpt2sas_ioc_list); 8147 + spin_unlock(&gioc_lock); 8403 8148 ioc->shost = shost; 8404 8149 ioc->id = mpt_ids++; 8405 8150 sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); ··· 8426 8167 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 8427 8168 /* misc semaphores and spin locks */ 8428 8169 mutex_init(&ioc->reset_in_progress_mutex); 8170 + /* initializing pci_access_mutex lock */ 8171 + mutex_init(&ioc->pci_access_mutex); 8429 8172 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 8430 8173 spin_lock_init(&ioc->scsi_lookup_lock); 8431 8174 spin_lock_init(&ioc->sas_device_lock); ··· 8530 8269 out_attach_fail: 8531 8270 destroy_workqueue(ioc->firmware_event_thread); 8532 8271 out_thread_fail: 8272 + spin_lock(&gioc_lock); 8533 8273 list_del(&ioc->list); 8274 + spin_unlock(&gioc_lock); 8534 8275 scsi_host_put(shost); 8535 8276 return rv; 8536 8277 }
+8 -4
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1323 1323 int rc; 1324 1324 1325 1325 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1326 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1326 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 1327 1327 rphy->identify.sas_address); 1328 1328 if (sas_device) { 1329 1329 *identifier = sas_device->enclosure_logical_id; 1330 1330 rc = 0; 1331 + sas_device_put(sas_device); 1331 1332 } else { 1332 1333 *identifier = 0; 1333 1334 rc = -ENXIO; 1334 1335 } 1336 + 1335 1337 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1336 1338 return rc; 1337 1339 } ··· 1353 1351 int rc; 1354 1352 1355 1353 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1356 - sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 1354 + sas_device = __mpt2sas_get_sdev_by_addr(ioc, 1357 1355 rphy->identify.sas_address); 1358 - if (sas_device) 1356 + if (sas_device) { 1359 1357 rc = sas_device->slot; 1360 - else 1358 + sas_device_put(sas_device); 1359 + } else { 1361 1360 rc = -ENXIO; 1361 + } 1362 1362 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1363 1363 return rc; 1364 1364 }
+6 -2
drivers/scsi/mpt3sas/mpi/mpi2.h
··· 8 8 * scatter/gather formats. 9 9 * Creation Date: June 21, 2006 10 10 * 11 - * mpi2.h Version: 02.00.31 11 + * mpi2.h Version: 02.00.35 12 12 * 13 13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 14 * prefix are for use only on MPI v2.5 products, and must not be used ··· 88 88 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. 89 89 * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. 90 90 * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. 91 + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. 92 + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. 93 + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT 94 + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. 91 95 * -------------------------------------------------------------------------- 92 96 */ 93 97 ··· 125 121 #define MPI2_VERSION_02_05 (0x0205) 126 122 127 123 /*Unit and Dev versioning for this MPI header set */ 128 - #define MPI2_HEADER_VERSION_UNIT (0x1F) 124 + #define MPI2_HEADER_VERSION_UNIT (0x23) 129 125 #define MPI2_HEADER_VERSION_DEV (0x00) 130 126 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 131 127 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+44 -8
drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
··· 6 6 * Title: MPI Configuration messages and pages 7 7 * Creation Date: November 10, 2006 8 8 * 9 - * mpi2_cnfg.h Version: 02.00.26 9 + * mpi2_cnfg.h Version: 02.00.29 10 10 * 11 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 12 * prefix are for use only on MPI v2.5 products, and must not be used ··· 165 165 * match the specification. 166 166 * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for 167 167 * future use. 168 + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for 169 + * MPI2_CONFIG_PAGE_MAN_7. 170 + * Added EnclosureLevel and ConnectorName fields to 171 + * MPI2_CONFIG_PAGE_SAS_DEV_0. 172 + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for 173 + * MPI2_CONFIG_PAGE_SAS_DEV_0. 174 + * Added EnclosureLevel field to 175 + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. 176 + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for 177 + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. 178 + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of 179 + * MPI2_CONFIG_PAGE_BIOS_1. 180 + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and 181 + * more defines for the BiosOptions field.. 168 182 * -------------------------------------------------------------------------- 169 183 */ 170 184 ··· 738 724 #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) 739 725 740 726 /*defines for the Flags field */ 727 + #define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) 741 728 #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) 742 729 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) 743 730 ··· 1326 1311 MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ 1327 1312 U32 BiosOptions; /*0x04 */ 1328 1313 U32 IOCSettings; /*0x08 */ 1329 - U32 Reserved1; /*0x0C */ 1314 + U8 SSUTimeout; /*0x0C */ 1315 + U8 Reserved1; /*0x0D */ 1316 + U16 Reserved2; /*0x0E */ 1330 1317 U32 DeviceSettings; /*0x10 */ 1331 1318 U16 NumberOfDevices; /*0x14 */ 1332 1319 U16 UEFIVersion; /*0x16 */ ··· 1340 1323 *PTR_MPI2_CONFIG_PAGE_BIOS_1, 1341 1324 Mpi2BiosPage1_t, *pMpi2BiosPage1_t; 1342 1325 1343 - #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) 1326 + #define MPI2_BIOSPAGE1_PAGEVERSION (0x07) 1344 1327 1345 1328 /*values for BIOS Page 1 BiosOptions field */ 1329 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) 1330 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) 1331 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) 1332 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) 1333 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) 1334 + #define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) 1335 + 1336 + #define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) 1337 + 1338 + #define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) 1339 + #define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) 1340 + #define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) 1341 + #define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) 1342 + #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) 1343 + 1346 1344 #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) 1347 1345 #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) 1348 1346 ··· 2665 2633 U8 2666 2634 ControlGroup; /*0x2E */ 2667 2635 U8 2668 - Reserved1; /*0x2F */ 2636 + EnclosureLevel; /*0x2F */ 2669 2637 U32 2670 - Reserved2; /*0x30 */ 2638 + ConnectorName[4]; /*0x30 */ 2671 2639 U32 2672 2640 Reserved3; /*0x34 */ 2673 2641 } MPI2_CONFIG_PAGE_SAS_DEV_0, ··· 2675 2643 Mpi2SasDevicePage0_t, 2676 2644 *pMpi2SasDevicePage0_t; 2677 2645 2678 - #define MPI2_SASDEVICE0_PAGEVERSION (0x08) 2646 + #define MPI2_SASDEVICE0_PAGEVERSION (0x09) 2679 2647 2680 2648 /*values for SAS Device Page 0 AccessStatus field */ 2681 2649 #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) ··· 2715 2683 #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) 2716 2684 #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) 2717 2685 #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) 2686 + #define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) 2718 2687 #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) 2719 2688 2720 2689 ··· 3052 3019 NumSlots; /*0x18 */ 3053 3020 U16 3054 3021 StartSlot; /*0x1A */ 3055 - U16 3022 + U8 3056 3023 Reserved2; /*0x1C */ 3024 + U8 3025 + EnclosureLevel; /*0x1D */ 3057 3026 U16 3058 3027 SEPDevHandle; /*0x1E */ 3059 3028 U32 ··· 3066 3031 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3067 3032 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t; 3068 3033 3069 - #define MPI2_SASENCLOSURE0_PAGEVERSION (0x03) 3034 + #define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) 3070 3035 3071 3036 /*values for SAS Enclosure Page 0 Flags field */ 3037 + #define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) 3072 3038 #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) 3073 3039 #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) 3074 3040 #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+3 -1
drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
··· 6 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 7 7 * Creation Date: October 11, 2006 8 8 * 9 - * mpi2_ioc.h Version: 02.00.23 9 + * mpi2_ioc.h Version: 02.00.24 10 10 * 11 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 12 * prefix are for use only on MPI v2.5 products, and must not be used ··· 132 132 * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. 133 133 * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. 134 134 * Added Encrypted Hash Extended Image. 135 + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. 135 136 * -------------------------------------------------------------------------- 136 137 */ 137 138 ··· 1599 1598 /* values for HashImageType */ 1600 1599 #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) 1601 1600 #define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) 1601 + #define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) 1602 1602 1603 1603 /* values for HashAlgorithm */ 1604 1604 #define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+3 -1
drivers/scsi/mpt3sas/mpi/mpi2_tool.h
··· 6 6 * Title: MPI diagnostic tool structures and definitions 7 7 * Creation Date: March 26, 2007 8 8 * 9 - * mpi2_tool.h Version: 02.00.11 9 + * mpi2_tool.h Version: 02.00.12 10 10 * 11 11 * Version History 12 12 * --------------- ··· 33 33 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that 34 34 * it uses MPI Chain SGE as well as MPI Simple SGE. 35 35 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. 36 + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. 36 37 * -------------------------------------------------------------------------- 37 38 */ 38 39 ··· 101 100 #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) 102 101 #define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) 103 102 #define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) 103 + #define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) 104 104 #define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) 105 105 #define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) 106 106 #define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+278 -32
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 83 83 module_param(msix_disable, int, 0); 84 84 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 85 85 86 - static int max_msix_vectors = 8; 86 + static int max_msix_vectors = -1; 87 87 module_param(max_msix_vectors, int, 0); 88 88 MODULE_PARM_DESC(max_msix_vectors, 89 - " max msix vectors - (default=8)"); 89 + " max msix vectors"); 90 90 91 91 static int mpt3sas_fwfault_debug; 92 92 MODULE_PARM_DESC(mpt3sas_fwfault_debug, ··· 1009 1009 } 1010 1010 1011 1011 wmb(); 1012 - writel(reply_q->reply_post_host_index | (msix_index << 1013 - MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); 1012 + 1013 + /* Update Reply Post Host Index. 1014 + * For those HBA's which support combined reply queue feature 1015 + * 1. Get the correct Supplemental Reply Post Host Index Register. 1016 + * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host 1017 + * Index Register address bank i.e replyPostRegisterIndex[], 1018 + * 2. Then update this register with new reply host index value 1019 + * in ReplyPostIndex field and the MSIxIndex field with 1020 + * msix_index value reduced to a value between 0 and 7, 1021 + * using a modulo 8 operation. Since each Supplemental Reply Post 1022 + * Host Index Register supports 8 MSI-X vectors. 1023 + * 1024 + * For other HBA's just update the Reply Post Host Index register with 1025 + * new reply host index value in ReplyPostIndex Field and msix_index 1026 + * value in MSIxIndex field. 1027 + */ 1028 + if (ioc->msix96_vector) 1029 + writel(reply_q->reply_post_host_index | ((msix_index & 7) << 1030 + MPI2_RPHI_MSIX_INDEX_SHIFT), 1031 + ioc->replyPostRegisterIndex[msix_index/8]); 1032 + else 1033 + writel(reply_q->reply_post_host_index | (msix_index << 1034 + MPI2_RPHI_MSIX_INDEX_SHIFT), 1035 + &ioc->chip->ReplyPostHostIndex); 1014 1036 atomic_dec(&reply_q->busy); 1015 1037 return IRQ_HANDLED; 1016 1038 } ··· 1360 1338 1361 1339 sg_scmd = scsi_sglist(scmd); 1362 1340 sges_left = scsi_dma_map(scmd); 1363 - if (!sges_left) { 1341 + if (sges_left < 0) { 1364 1342 sdev_printk(KERN_ERR, scmd->device, 1365 1343 "pci_map_sg failed: request for %d bytes!\n", 1366 1344 scsi_bufflen(scmd)); ··· 1429 1407 fill_in_last_segment: 1430 1408 1431 1409 /* fill the last segment */ 1432 - while (sges_left) { 1410 + while (sges_left > 0) { 1433 1411 if (sges_left == 1) 1434 1412 _base_add_sg_single_ieee(sg_local, 1435 1413 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), ··· 1582 1560 1583 1561 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1584 1562 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1585 - if (ioc->msix_vector_count > 8) 1586 - ioc->msix_vector_count = 8; 1587 1563 dinitprintk(ioc, pr_info(MPT3SAS_FMT 1588 1564 "msix is supported, vector_count(%d)\n", 1589 1565 ioc->name, ioc->msix_vector_count)); ··· 1813 1793 } 1814 1794 1815 1795 /** 1796 + * mpt3sas_base_unmap_resources - free controller resources 1797 + * @ioc: per adapter object 1798 + */ 1799 + void 1800 + mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 1801 + { 1802 + struct pci_dev *pdev = ioc->pdev; 1803 + 1804 + dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", 1805 + ioc->name, __func__)); 1806 + 1807 + _base_free_irq(ioc); 1808 + _base_disable_msix(ioc); 1809 + 1810 + if (ioc->msix96_vector) 1811 + kfree(ioc->replyPostRegisterIndex); 1812 + 1813 + if (ioc->chip_phys) { 1814 + iounmap(ioc->chip); 1815 + ioc->chip_phys = 0; 1816 + } 1817 + 1818 + if (pci_is_enabled(pdev)) { 1819 + pci_release_selected_regions(ioc->pdev, ioc->bars); 1820 + pci_disable_pcie_error_reporting(pdev); 1821 + pci_disable_device(pdev); 1822 + } 1823 + } 1824 + 1825 + /** 1816 1826 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 1817 1827 * @ioc: per adapter object 1818 1828 * ··· 1932 1882 if (r) 1933 1883 goto out_fail; 1934 1884 1885 + /* Use the Combined reply queue feature only for SAS3 C0 & higher 1886 + * revision HBAs and also only when reply queue count is greater than 8 1887 + */ 1888 + if (ioc->msix96_vector && ioc->reply_queue_count > 8) { 1889 + /* Determine the Supplemental Reply Post Host Index Registers 1890 + * Addresse. Supplemental Reply Post Host Index Registers 1891 + * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and 1892 + * each register is at offset bytes of 1893 + * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. 1894 + */ 1895 + ioc->replyPostRegisterIndex = kcalloc( 1896 + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT, 1897 + sizeof(resource_size_t *), GFP_KERNEL); 1898 + if (!ioc->replyPostRegisterIndex) { 1899 + dfailprintk(ioc, printk(MPT3SAS_FMT 1900 + "allocation for reply Post Register Index failed!!!\n", 1901 + ioc->name)); 1902 + r = -ENOMEM; 1903 + goto out_fail; 1904 + } 1905 + 1906 + for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) { 1907 + ioc->replyPostRegisterIndex[i] = (resource_size_t *) 1908 + ((u8 *)&ioc->chip->Doorbell + 1909 + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 1910 + (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 1911 + } 1912 + } else 1913 + ioc->msix96_vector = 0; 1914 + 1935 1915 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 1936 1916 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 1937 1917 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : ··· 1977 1897 return 0; 1978 1898 1979 1899 out_fail: 1980 - if (ioc->chip_phys) 1981 - iounmap(ioc->chip); 1982 - ioc->chip_phys = 0; 1983 - pci_release_selected_regions(ioc->pdev, ioc->bars); 1984 - pci_disable_pcie_error_reporting(pdev); 1985 - pci_disable_device(pdev); 1900 + mpt3sas_base_unmap_resources(ioc); 1986 1901 return r; 1987 1902 } 1988 1903 ··· 2367 2292 2368 2293 2369 2294 /** 2295 + * _base_display_dell_branding - Display branding string 2296 + * @ioc: per adapter object 2297 + * 2298 + * Return nothing. 2299 + */ 2300 + static void 2301 + _base_display_dell_branding(struct MPT3SAS_ADAPTER *ioc) 2302 + { 2303 + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL) 2304 + return; 2305 + 2306 + switch (ioc->pdev->device) { 2307 + case MPI25_MFGPAGE_DEVID_SAS3008: 2308 + switch (ioc->pdev->subsystem_device) { 2309 + case MPT3SAS_DELL_12G_HBA_SSDID: 2310 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2311 + MPT3SAS_DELL_12G_HBA_BRANDING); 2312 + break; 2313 + default: 2314 + pr_info(MPT3SAS_FMT 2315 + "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name, 2316 + ioc->pdev->subsystem_device); 2317 + break; 2318 + } 2319 + break; 2320 + default: 2321 + pr_info(MPT3SAS_FMT 2322 + "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", ioc->name, 2323 + ioc->pdev->subsystem_device); 2324 + break; 2325 + } 2326 + } 2327 + 2328 + /** 2329 + * _base_display_cisco_branding - Display branding string 2330 + * @ioc: per adapter object 2331 + * 2332 + * Return nothing. 2333 + */ 2334 + static void 2335 + _base_display_cisco_branding(struct MPT3SAS_ADAPTER *ioc) 2336 + { 2337 + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_CISCO) 2338 + return; 2339 + 2340 + switch (ioc->pdev->device) { 2341 + case MPI25_MFGPAGE_DEVID_SAS3008: 2342 + switch (ioc->pdev->subsystem_device) { 2343 + case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 2344 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2345 + MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 2346 + break; 2347 + case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 2348 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2349 + MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 2350 + break; 2351 + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 2352 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2353 + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 2354 + break; 2355 + default: 2356 + pr_info(MPT3SAS_FMT 2357 + "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 2358 + ioc->name, ioc->pdev->subsystem_device); 2359 + break; 2360 + } 2361 + break; 2362 + case MPI25_MFGPAGE_DEVID_SAS3108_1: 2363 + switch (ioc->pdev->subsystem_device) { 2364 + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 2365 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2366 + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 2367 + break; 2368 + case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 2369 + pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2370 + MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING); 2371 + break; 2372 + default: 2373 + pr_info(MPT3SAS_FMT 2374 + "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 2375 + ioc->name, ioc->pdev->subsystem_device); 2376 + break; 2377 + } 2378 + break; 2379 + default: 2380 + pr_info(MPT3SAS_FMT 2381 + "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 2382 + ioc->name, ioc->pdev->subsystem_device); 2383 + break; 2384 + } 2385 + } 2386 + 2387 + /** 2370 2388 * _base_display_ioc_capabilities - Disply IOC's capabilities. 2371 2389 * @ioc: per adapter object 2372 2390 * ··· 2489 2321 bios_version & 0x000000FF); 2490 2322 2491 2323 _base_display_intel_branding(ioc); 2324 + _base_display_dell_branding(ioc); 2325 + _base_display_cisco_branding(ioc); 2492 2326 2493 2327 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 2494 2328 ··· 3309 3139 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 3310 3140 */ 3311 3141 static int 3142 + _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); 3143 + 3144 + static int 3312 3145 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, 3313 3146 int sleep_flag) 3314 3147 { ··· 3854 3681 } 3855 3682 3856 3683 /** 3684 + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 3685 + * @ioc: per adapter object 3686 + * @timeout: 3687 + * @sleep_flag: CAN_SLEEP or NO_SLEEP 3688 + * 3689 + * Returns 0 for success, non-zero for failure. 3690 + */ 3691 + static int 3692 + _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout, 3693 + int sleep_flag) 3694 + { 3695 + u32 ioc_state; 3696 + int rc; 3697 + 3698 + dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, 3699 + __func__)); 3700 + 3701 + if (ioc->pci_error_recovery) { 3702 + dfailprintk(ioc, printk(MPT3SAS_FMT 3703 + "%s: host in pci error recovery\n", ioc->name, __func__)); 3704 + return -EFAULT; 3705 + } 3706 + 3707 + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3708 + dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 3709 + ioc->name, __func__, ioc_state)); 3710 + 3711 + if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 3712 + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 3713 + return 0; 3714 + 3715 + if (ioc_state & MPI2_DOORBELL_USED) { 3716 + dhsprintk(ioc, printk(MPT3SAS_FMT 3717 + "unexpected doorbell active!\n", ioc->name)); 3718 + goto issue_diag_reset; 3719 + } 3720 + 3721 + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3722 + mpt3sas_base_fault_info(ioc, ioc_state & 3723 + MPI2_DOORBELL_DATA_MASK); 3724 + goto issue_diag_reset; 3725 + } 3726 + 3727 + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 3728 + timeout, sleep_flag); 3729 + if (ioc_state) { 3730 + dfailprintk(ioc, printk(MPT3SAS_FMT 3731 + "%s: failed going to ready state (ioc_state=0x%x)\n", 3732 + ioc->name, __func__, ioc_state)); 3733 + return -EFAULT; 3734 + } 3735 + 3736 + issue_diag_reset: 3737 + rc = _base_diag_reset(ioc, sleep_flag); 3738 + return rc; 3739 + } 3740 + 3741 + /** 3857 3742 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 3858 3743 * @ioc: per adapter object 3859 3744 * @sleep_flag: CAN_SLEEP or NO_SLEEP ··· 3929 3698 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3930 3699 __func__)); 3931 3700 3701 + r = _base_wait_for_iocstate(ioc, 10, sleep_flag); 3702 + if (r) { 3703 + dfailprintk(ioc, printk(MPT3SAS_FMT 3704 + "%s: failed getting to correct state\n", 3705 + ioc->name, __func__)); 3706 + return r; 3707 + } 3932 3708 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 3933 3709 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 3934 3710 memset(&mpi_request, 0, mpi_request_sz); ··· 4021 3783 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 4022 3784 mpi_request.VF_ID = 0; /* TODO */ 4023 3785 mpi_request.VP_ID = 0; 4024 - mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3786 + mpi_request.MsgVersion = cpu_to_le16(MPI25_VERSION); 4025 3787 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 4026 3788 4027 3789 if (_base_is_controller_msix_enabled(ioc)) ··· 4762 4524 4763 4525 /* initialize reply post host index */ 4764 4526 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4765 - writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, 4766 - &ioc->chip->ReplyPostHostIndex); 4527 + if (ioc->msix96_vector) 4528 + writel((reply_q->msix_index & 7)<< 4529 + MPI2_RPHI_MSIX_INDEX_SHIFT, 4530 + ioc->replyPostRegisterIndex[reply_q->msix_index/8]); 4531 + else 4532 + writel(reply_q->msix_index << 4533 + MPI2_RPHI_MSIX_INDEX_SHIFT, 4534 + &ioc->chip->ReplyPostHostIndex); 4535 + 4767 4536 if (!_base_is_controller_msix_enabled(ioc)) 4768 4537 goto skip_init_reply_post_host_index; 4769 4538 } ··· 4809 4564 void 4810 4565 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 4811 4566 { 4812 - struct pci_dev *pdev = ioc->pdev; 4813 - 4814 4567 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4815 4568 __func__)); 4816 4569 ··· 4819 4576 ioc->shost_recovery = 0; 4820 4577 } 4821 4578 4822 - _base_free_irq(ioc); 4823 - _base_disable_msix(ioc); 4824 - 4825 - if (ioc->chip_phys && ioc->chip) 4826 - iounmap(ioc->chip); 4827 - ioc->chip_phys = 0; 4828 - 4829 - if (pci_is_enabled(pdev)) { 4830 - pci_release_selected_regions(ioc->pdev, ioc->bars); 4831 - pci_disable_pcie_error_reporting(pdev); 4832 - pci_disable_device(pdev); 4833 - } 4579 + mpt3sas_base_unmap_resources(ioc); 4834 4580 return; 4835 4581 } 4836 4582 ··· 4834 4602 { 4835 4603 int r, i; 4836 4604 int cpu_id, last_cpu_id = 0; 4605 + u8 revision; 4837 4606 4838 4607 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4839 4608 __func__)); ··· 4853 4620 r = -ENOMEM; 4854 4621 goto out_free_resources; 4855 4622 } 4623 + 4624 + /* Check whether the controller revision is C0 or above. 4625 + * only C0 and above revision controllers support 96 MSI-X vectors. 4626 + */ 4627 + revision = ioc->pdev->revision; 4628 + 4629 + if ((ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3004 || 4630 + ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3008 || 4631 + ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_1 || 4632 + ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_2 || 4633 + ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_5 || 4634 + ioc->pdev->device == MPI25_MFGPAGE_DEVID_SAS3108_6) && 4635 + (revision >= 0x02)) 4636 + ioc->msix96_vector = 1; 4856 4637 4857 4638 ioc->rdpq_array_enable_assigned = 0; 4858 4639 ioc->dma_mask = 0; ··· 4890 4643 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 4891 4644 ioc->build_sg = &_base_build_sg_ieee; 4892 4645 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 4893 - ioc->mpi25 = 1; 4894 4646 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 4895 4647 4896 4648 /*
+52 -5
drivers/scsi/mpt3sas/mpt3sas_base.h
··· 71 71 #define MPT3SAS_DRIVER_NAME "mpt3sas" 72 72 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 73 73 #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 74 - #define MPT3SAS_DRIVER_VERSION "04.100.00.00" 75 - #define MPT3SAS_MAJOR_VERSION 4 74 + #define MPT3SAS_DRIVER_VERSION "09.100.00.00" 75 + #define MPT3SAS_MAJOR_VERSION 9 76 76 #define MPT3SAS_MINOR_VERSION 100 77 77 #define MPT3SAS_BUILD_VERSION 0 78 78 #define MPT3SAS_RELEASE_VERSION 00 ··· 152 152 #define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 153 153 154 154 /* 155 + * Dell HBA branding 156 + */ 157 + #define MPT3SAS_DELL_12G_HBA_BRANDING \ 158 + "Dell 12Gbps HBA" 159 + 160 + /* 161 + * Dell HBA SSDIDs 162 + */ 163 + #define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 164 + 165 + /* 166 + * Cisco HBA branding 167 + */ 168 + #define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ 169 + "Cisco 9300-8E 12G SAS HBA" 170 + #define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ 171 + "Cisco 9300-8i 12G SAS HBA" 172 + #define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ 173 + "Cisco 12G Modular SAS Pass through Controller" 174 + #define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ 175 + "UCS C3X60 12G SAS Pass through Controller" 176 + /* 177 + * Cisco HBA SSSDIDs 178 + */ 179 + #define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C 180 + #define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154 181 + #define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155 182 + #define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156 183 + 184 + /* 155 185 * status bits for ioc->diag_buffer_status 156 186 */ 157 187 #define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) 158 188 #define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) 159 189 #define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) 160 190 191 + /* 192 + * Combined Reply Queue constants, 193 + * There are twelve Supplemental Reply Post Host Index Registers 194 + * and each register is at offset 0x10 bytes from the previous one. 195 + */ 196 + #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12 197 + #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) 161 198 162 199 /* OEM Identifiers */ 163 200 #define MFG10_OEM_ID_INVALID (0x00000000) ··· 209 172 #define MFG10_GF0_R10_DISPLAY (0x00000004) 210 173 #define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) 211 174 #define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) 175 + 176 + #define VIRTUAL_IO_FAILED_RETRY (0x32010081) 212 177 213 178 /* OEM Specific Flags will come from OEM specific header files */ 214 179 struct Mpi2ManufacturingPage10_t { ··· 333 294 * @responding: used in _scsih_sas_device_mark_responding 334 295 * @fast_path: fast path feature enable bit 335 296 * @pfa_led_on: flag for PFA LED status 336 - * 297 + * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() 298 + * addition routine. 337 299 */ 338 300 struct _sas_device { 339 301 struct list_head list; ··· 355 315 u8 responding; 356 316 u8 fast_path; 357 317 u8 pfa_led_on; 318 + u8 pend_sas_rphy_add; 319 + u8 enclosure_level; 320 + u8 connector_name[4]; 358 321 }; 359 322 360 323 /** ··· 771 728 * is assigned only ones 772 729 * @reply_queue_count: number of reply queue's 773 730 * @reply_queue_list: link list contaning the reply queue info 774 - * @reply_post_host_index: head index in the pool where FW completes IO 731 + * @msix96_vector: 96 MSI-X vector support 732 + * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue 775 733 * @delayed_tr_list: target reset link list 776 734 * @delayed_tr_volume_list: volume target reset link list 777 735 * @@temp_sensors_count: flag to carry the number of temperature sensors ··· 858 814 MPT_BUILD_SG_SCMD build_sg_scmd; 859 815 MPT_BUILD_SG build_sg; 860 816 MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; 861 - u8 mpi25; 862 817 u16 sge_size_ieee; 863 818 864 819 /* function ptr for MPI sg elements only */ ··· 979 936 struct dma_pool *reply_post_free_dma_pool; 980 937 u8 reply_queue_count; 981 938 struct list_head reply_queue_list; 939 + 940 + u8 msix96_vector; 941 + /* reply post register index */ 942 + resource_size_t **replyPostRegisterIndex; 982 943 983 944 struct list_head delayed_tr_list; 984 945 struct list_head delayed_tr_volume_list;
+288 -55
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 585 585 586 586 if (!sas_device) 587 587 return; 588 + pr_info(MPT3SAS_FMT 589 + "removing handle(0x%04x), sas_addr(0x%016llx)\n", 590 + ioc->name, sas_device->handle, 591 + (unsigned long long) sas_device->sas_address); 592 + 593 + if (sas_device->enclosure_handle != 0) 594 + pr_info(MPT3SAS_FMT 595 + "removing enclosure logical id(0x%016llx), slot(%d)\n", 596 + ioc->name, (unsigned long long) 597 + sas_device->enclosure_logical_id, sas_device->slot); 598 + 599 + if (sas_device->connector_name[0] != '\0') 600 + pr_info(MPT3SAS_FMT 601 + "removing enclosure level(0x%04x), connector name( %s)\n", 602 + ioc->name, sas_device->enclosure_level, 603 + sas_device->connector_name); 588 604 589 605 spin_lock_irqsave(&ioc->sas_device_lock, flags); 590 606 list_del(&sas_device->list); ··· 679 663 ioc->name, __func__, sas_device->handle, 680 664 (unsigned long long)sas_device->sas_address)); 681 665 666 + if (sas_device->enclosure_handle != 0) 667 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 668 + "%s: enclosure logical id(0x%016llx), slot( %d)\n", 669 + ioc->name, __func__, (unsigned long long) 670 + sas_device->enclosure_logical_id, sas_device->slot)); 671 + 672 + if (sas_device->connector_name[0] != '\0') 673 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 674 + "%s: enclosure level(0x%04x), connector name( %s)\n", 675 + ioc->name, __func__, 676 + sas_device->enclosure_level, sas_device->connector_name)); 677 + 682 678 spin_lock_irqsave(&ioc->sas_device_lock, flags); 683 679 list_add_tail(&sas_device->list, &ioc->sas_device_list); 684 680 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); ··· 731 703 "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, 732 704 __func__, sas_device->handle, 733 705 (unsigned long long)sas_device->sas_address)); 706 + 707 + if (sas_device->enclosure_handle != 0) 708 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 709 + "%s: enclosure logical id(0x%016llx), slot( %d)\n", 710 + ioc->name, __func__, (unsigned long long) 711 + sas_device->enclosure_logical_id, sas_device->slot)); 712 + 713 + if (sas_device->connector_name[0] != '\0') 714 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 715 + "%s: enclosure level(0x%04x), connector name( %s)\n", 716 + ioc->name, __func__, sas_device->enclosure_level, 717 + sas_device->connector_name)); 734 718 735 719 spin_lock_irqsave(&ioc->sas_device_lock, flags); 736 720 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); ··· 1812 1772 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 1813 1773 ds, handle, (unsigned long long)sas_device->sas_address, 1814 1774 sas_device->phy, (unsigned long long)sas_device->device_name); 1815 - sdev_printk(KERN_INFO, sdev, 1816 - "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", 1817 - ds, (unsigned long long) 1818 - sas_device->enclosure_logical_id, sas_device->slot); 1775 + if (sas_device->enclosure_handle != 0) 1776 + sdev_printk(KERN_INFO, sdev, 1777 + "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", 1778 + ds, (unsigned long long) 1779 + sas_device->enclosure_logical_id, sas_device->slot); 1780 + if (sas_device->connector_name[0] != '\0') 1781 + sdev_printk(KERN_INFO, sdev, 1782 + "%s: enclosure level(0x%04x), connector name( %s)\n", 1783 + ds, sas_device->enclosure_level, 1784 + sas_device->connector_name); 1819 1785 1820 1786 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1821 1787 ··· 2235 2189 sas_device->handle, 2236 2190 (unsigned long long)sas_device->sas_address, 2237 2191 sas_device->phy); 2238 - starget_printk(KERN_INFO, starget, 2239 - "enclosure_logical_id(0x%016llx), slot(%d)\n", 2240 - (unsigned long long)sas_device->enclosure_logical_id, 2241 - sas_device->slot); 2192 + if (sas_device->enclosure_handle != 0) 2193 + starget_printk(KERN_INFO, starget, 2194 + "enclosure_logical_id(0x%016llx), slot(%d)\n", 2195 + (unsigned long long) 2196 + sas_device->enclosure_logical_id, 2197 + sas_device->slot); 2198 + if (sas_device->connector_name) 2199 + starget_printk(KERN_INFO, starget, 2200 + "enclosure level(0x%04x),connector name(%s)\n", 2201 + sas_device->enclosure_level, 2202 + sas_device->connector_name); 2242 2203 } 2243 2204 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2244 2205 } ··· 2605 2552 } 2606 2553 2607 2554 /** 2555 + * _scsih_internal_device_block - block the sdev device 2556 + * @sdev: per device object 2557 + * @sas_device_priv_data : per device driver private data 2558 + * 2559 + * make sure device is blocked without error, if not 2560 + * print an error 2561 + */ 2562 + static void 2563 + _scsih_internal_device_block(struct scsi_device *sdev, 2564 + struct MPT3SAS_DEVICE *sas_device_priv_data) 2565 + { 2566 + int r = 0; 2567 + 2568 + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 2569 + sas_device_priv_data->sas_target->handle); 2570 + sas_device_priv_data->block = 1; 2571 + 2572 + r = scsi_internal_device_block(sdev); 2573 + if (r == -EINVAL) 2574 + sdev_printk(KERN_WARNING, sdev, 2575 + "device_block failed with return(%d) for handle(0x%04x)\n", 2576 + sas_device_priv_data->sas_target->handle, r); 2577 + } 2578 + 2579 + /** 2580 + * _scsih_internal_device_unblock - unblock the sdev device 2581 + * @sdev: per device object 2582 + * @sas_device_priv_data : per device driver private data 2583 + * make sure device is unblocked without error, if not retry 2584 + * by blocking and then unblocking 2585 + */ 2586 + 2587 + static void 2588 + _scsih_internal_device_unblock(struct scsi_device *sdev, 2589 + struct MPT3SAS_DEVICE *sas_device_priv_data) 2590 + { 2591 + int r = 0; 2592 + 2593 + sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 2594 + "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 2595 + sas_device_priv_data->block = 0; 2596 + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2597 + if (r == -EINVAL) { 2598 + /* The device has been set to SDEV_RUNNING by SD layer during 2599 + * device addition but the request queue is still stopped by 2600 + * our earlier block call. We need to perform a block again 2601 + * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 2602 + 2603 + sdev_printk(KERN_WARNING, sdev, 2604 + "device_unblock failed with return(%d) for handle(0x%04x) " 2605 + "performing a block followed by an unblock\n", 2606 + sas_device_priv_data->sas_target->handle, r); 2607 + sas_device_priv_data->block = 1; 2608 + r = scsi_internal_device_block(sdev); 2609 + if (r) 2610 + sdev_printk(KERN_WARNING, sdev, "retried device_block " 2611 + "failed with return(%d) for handle(0x%04x)\n", 2612 + sas_device_priv_data->sas_target->handle, r); 2613 + 2614 + sas_device_priv_data->block = 0; 2615 + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2616 + if (r) 2617 + sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 2618 + " failed with return(%d) for handle(0x%04x)\n", 2619 + sas_device_priv_data->sas_target->handle, r); 2620 + } 2621 + } 2622 + 2623 + /** 2608 2624 * _scsih_ublock_io_all_device - unblock every device 2609 2625 * @ioc: per adapter object 2610 2626 * ··· 2692 2570 if (!sas_device_priv_data->block) 2693 2571 continue; 2694 2572 2695 - sas_device_priv_data->block = 0; 2696 2573 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 2697 2574 "device_running, handle(0x%04x)\n", 2698 2575 sas_device_priv_data->sas_target->handle)); 2699 - scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2576 + _scsih_internal_device_unblock(sdev, sas_device_priv_data); 2700 2577 } 2701 2578 } 2702 2579 ··· 2720 2599 if (sas_device_priv_data->sas_target->sas_address 2721 2600 != sas_address) 2722 2601 continue; 2723 - if (sas_device_priv_data->block) { 2724 - sas_device_priv_data->block = 0; 2725 - scsi_internal_device_unblock(sdev, SDEV_RUNNING); 2726 - } 2602 + if (sas_device_priv_data->block) 2603 + _scsih_internal_device_unblock(sdev, 2604 + sas_device_priv_data); 2727 2605 } 2728 2606 } 2729 2607 ··· 2745 2625 continue; 2746 2626 if (sas_device_priv_data->block) 2747 2627 continue; 2748 - sas_device_priv_data->block = 1; 2749 - scsi_internal_device_block(sdev); 2750 - sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n", 2751 - sas_device_priv_data->sas_target->handle); 2628 + _scsih_internal_device_block(sdev, sas_device_priv_data); 2752 2629 } 2753 2630 } 2754 2631 ··· 2761 2644 { 2762 2645 struct MPT3SAS_DEVICE *sas_device_priv_data; 2763 2646 struct scsi_device *sdev; 2647 + struct _sas_device *sas_device; 2648 + 2649 + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2650 + if (!sas_device) 2651 + return; 2764 2652 2765 2653 shost_for_each_device(sdev, ioc->shost) { 2766 2654 sas_device_priv_data = sdev->hostdata; ··· 2775 2653 continue; 2776 2654 if (sas_device_priv_data->block) 2777 2655 continue; 2778 - sas_device_priv_data->block = 1; 2779 - scsi_internal_device_block(sdev); 2780 - sdev_printk(KERN_INFO, sdev, 2781 - "device_blocked, handle(0x%04x)\n", handle); 2656 + if (sas_device->pend_sas_rphy_add) 2657 + continue; 2658 + _scsih_internal_device_block(sdev, sas_device_priv_data); 2782 2659 } 2783 2660 } 2784 2661 ··· 2927 2806 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 2928 2807 ioc->name, handle, 2929 2808 (unsigned long long)sas_address)); 2809 + if (sas_device->enclosure_handle != 0) 2810 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 2811 + "setting delete flag:enclosure logical id(0x%016llx)," 2812 + " slot(%d)\n", ioc->name, (unsigned long long) 2813 + sas_device->enclosure_logical_id, 2814 + sas_device->slot)); 2815 + if (sas_device->connector_name) 2816 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 2817 + "setting delete flag: enclosure level(0x%04x)," 2818 + " connector name( %s)\n", ioc->name, 2819 + sas_device->enclosure_level, 2820 + sas_device->connector_name)); 2930 2821 _scsih_ublock_io_device(ioc, sas_address); 2931 2822 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 2932 2823 } ··· 3954 3821 "\tsas_address(0x%016llx), phy(%d)\n", 3955 3822 ioc->name, (unsigned long long) 3956 3823 sas_device->sas_address, sas_device->phy); 3957 - pr_warn(MPT3SAS_FMT 3958 - "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 3959 - ioc->name, (unsigned long long) 3960 - sas_device->enclosure_logical_id, sas_device->slot); 3824 + if (sas_device->enclosure_handle != 0) 3825 + pr_warn(MPT3SAS_FMT 3826 + "\tenclosure_logical_id(0x%016llx)," 3827 + "slot(%d)\n", ioc->name, 3828 + (unsigned long long) 3829 + sas_device->enclosure_logical_id, 3830 + sas_device->slot); 3831 + if (sas_device->connector_name[0]) 3832 + pr_warn(MPT3SAS_FMT 3833 + "\tenclosure level(0x%04x)," 3834 + " connector name( %s)\n", ioc->name, 3835 + sas_device->enclosure_level, 3836 + sas_device->connector_name); 3961 3837 } 3962 3838 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3963 3839 } ··· 4141 3999 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4142 4000 return; 4143 4001 } 4144 - starget_printk(KERN_WARNING, starget, "predicted fault\n"); 4002 + if (sas_device->enclosure_handle != 0) 4003 + starget_printk(KERN_INFO, starget, "predicted fault, " 4004 + "enclosure logical id(0x%016llx), slot(%d)\n", 4005 + (unsigned long long)sas_device->enclosure_logical_id, 4006 + sas_device->slot); 4007 + if (sas_device->connector_name[0] != '\0') 4008 + starget_printk(KERN_WARNING, starget, "predicted fault, " 4009 + "enclosure level(0x%04x), connector name( %s)\n", 4010 + sas_device->enclosure_level, 4011 + sas_device->connector_name); 4145 4012 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4146 4013 4147 4014 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) ··· 4270 4119 _scsih_smart_predicted_fault(ioc, 4271 4120 le16_to_cpu(mpi_reply->DevHandle)); 4272 4121 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 4273 - } 4274 4122 4123 + #ifdef CONFIG_SCSI_MPT3SAS_LOGGING 4124 + if (!(ioc->logging_level & MPT_DEBUG_REPLY) && 4125 + ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 4126 + (scmd->sense_buffer[2] == MEDIUM_ERROR) || 4127 + (scmd->sense_buffer[2] == HARDWARE_ERROR))) 4128 + _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 4129 + #endif 4130 + } 4275 4131 switch (ioc_status) { 4276 4132 case MPI2_IOCSTATUS_BUSY: 4277 4133 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: ··· 4303 4145 scmd->result = DID_SOFT_ERROR << 16; 4304 4146 scmd->device->expecting_cc_ua = 1; 4305 4147 } 4148 + break; 4149 + } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 4150 + scmd->result = DID_RESET << 16; 4306 4151 break; 4307 4152 } 4308 4153 scmd->result = DID_SOFT_ERROR << 16; ··· 4949 4788 sas_device->handle, handle); 4950 4789 sas_target_priv_data->handle = handle; 4951 4790 sas_device->handle = handle; 4791 + if (sas_device_pg0.Flags & 4792 + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 4793 + sas_device->enclosure_level = 4794 + le16_to_cpu(sas_device_pg0.EnclosureLevel); 4795 + memcpy(&sas_device->connector_name[0], 4796 + &sas_device_pg0.ConnectorName[0], 4); 4797 + } else { 4798 + sas_device->enclosure_level = 0; 4799 + sas_device->connector_name[0] = '\0'; 4800 + } 4952 4801 } 4953 4802 4954 4803 /* check if device is present */ ··· 5065 4894 ioc->name, __FILE__, __LINE__, __func__); 5066 4895 sas_device->enclosure_handle = 5067 4896 le16_to_cpu(sas_device_pg0.EnclosureHandle); 5068 - sas_device->slot = 5069 - le16_to_cpu(sas_device_pg0.Slot); 4897 + if (sas_device->enclosure_handle != 0) 4898 + sas_device->slot = 4899 + le16_to_cpu(sas_device_pg0.Slot); 5070 4900 sas_device->device_info = device_info; 5071 4901 sas_device->sas_address = sas_address; 5072 4902 sas_device->phy = sas_device_pg0.PhyNum; 5073 4903 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 5074 4904 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 5075 4905 4906 + if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 4907 + sas_device->enclosure_level = 4908 + le16_to_cpu(sas_device_pg0.EnclosureLevel); 4909 + memcpy(&sas_device->connector_name[0], 4910 + &sas_device_pg0.ConnectorName[0], 4); 4911 + } else { 4912 + sas_device->enclosure_level = 0; 4913 + sas_device->connector_name[0] = '\0'; 4914 + } 5076 4915 /* get enclosure_logical_id */ 5077 4916 if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( 5078 4917 ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, ··· 5124 4943 ioc->name, __func__, 5125 4944 sas_device->handle, (unsigned long long) 5126 4945 sas_device->sas_address)); 4946 + if (sas_device->enclosure_handle != 0) 4947 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 4948 + "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 4949 + ioc->name, __func__, 4950 + (unsigned long long)sas_device->enclosure_logical_id, 4951 + sas_device->slot)); 4952 + if (sas_device->connector_name[0] != '\0') 4953 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 4954 + "%s: enter: enclosure level(0x%04x), connector name( %s)\n", 4955 + ioc->name, __func__, 4956 + sas_device->enclosure_level, 4957 + sas_device->connector_name)); 5127 4958 5128 4959 if (sas_device->starget && sas_device->starget->hostdata) { 5129 4960 sas_target_priv_data = sas_device->starget->hostdata; ··· 5152 4959 "removing handle(0x%04x), sas_addr(0x%016llx)\n", 5153 4960 ioc->name, sas_device->handle, 5154 4961 (unsigned long long) sas_device->sas_address); 4962 + if (sas_device->enclosure_handle != 0) 4963 + pr_info(MPT3SAS_FMT 4964 + "removing : enclosure logical id(0x%016llx), slot(%d)\n", 4965 + ioc->name, 4966 + (unsigned long long)sas_device->enclosure_logical_id, 4967 + sas_device->slot); 4968 + if (sas_device->connector_name[0] != '\0') 4969 + pr_info(MPT3SAS_FMT 4970 + "removing enclosure level(0x%04x), connector name( %s)\n", 4971 + ioc->name, sas_device->enclosure_level, 4972 + sas_device->connector_name); 5155 4973 5156 4974 dewtprintk(ioc, pr_info(MPT3SAS_FMT 5157 4975 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 5158 4976 ioc->name, __func__, 5159 - sas_device->handle, (unsigned long long) 5160 - sas_device->sas_address)); 4977 + sas_device->handle, (unsigned long long) 4978 + sas_device->sas_address)); 4979 + if (sas_device->enclosure_handle != 0) 4980 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 4981 + "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 4982 + ioc->name, __func__, 4983 + (unsigned long long)sas_device->enclosure_logical_id, 4984 + sas_device->slot)); 4985 + if (sas_device->connector_name[0] != '\0') 4986 + dewtprintk(ioc, pr_info(MPT3SAS_FMT 4987 + "%s: exit: enclosure level(0x%04x), connector name(%s)\n", 4988 + ioc->name, __func__, sas_device->enclosure_level, 4989 + sas_device->connector_name)); 5161 4990 5162 4991 kfree(sas_device); 5163 4992 } ··· 6572 6357 /** 6573 6358 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 6574 6359 * @ioc: per adapter object 6575 - * @sas_address: sas address 6576 - * @slot: enclosure slot id 6577 - * @handle: device handle 6360 + * @sas_device_pg0: SAS Device page 0 6578 6361 * 6579 6362 * After host reset, find out whether devices are still responding. 6580 6363 * Used in _scsih_remove_unresponsive_sas_devices. ··· 6580 6367 * Return nothing. 6581 6368 */ 6582 6369 static void 6583 - _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 6584 - u16 slot, u16 handle) 6370 + _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 6371 + Mpi2SasDevicePage0_t *sas_device_pg0) 6585 6372 { 6586 6373 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 6587 6374 struct scsi_target *starget; ··· 6590 6377 6591 6378 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6592 6379 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 6593 - if (sas_device->sas_address == sas_address && 6594 - sas_device->slot == slot) { 6380 + if ((sas_device->sas_address == sas_device_pg0->SASAddress) && 6381 + (sas_device->slot == sas_device_pg0->Slot)) { 6595 6382 sas_device->responding = 1; 6596 6383 starget = sas_device->starget; 6597 6384 if (starget && starget->hostdata) { ··· 6600 6387 sas_target_priv_data->deleted = 0; 6601 6388 } else 6602 6389 sas_target_priv_data = NULL; 6603 - if (starget) 6390 + if (starget) { 6604 6391 starget_printk(KERN_INFO, starget, 6605 - "handle(0x%04x), sas_addr(0x%016llx), " 6606 - "enclosure logical id(0x%016llx), " 6607 - "slot(%d)\n", handle, 6608 - (unsigned long long)sas_device->sas_address, 6392 + "handle(0x%04x), sas_addr(0x%016llx)\n", 6393 + sas_device_pg0->DevHandle, 6609 6394 (unsigned long long) 6610 - sas_device->enclosure_logical_id, 6611 - sas_device->slot); 6612 - if (sas_device->handle == handle) 6395 + sas_device->sas_address); 6396 + 6397 + if (sas_device->enclosure_handle != 0) 6398 + starget_printk(KERN_INFO, starget, 6399 + "enclosure logical id(0x%016llx)," 6400 + " slot(%d)\n", 6401 + (unsigned long long) 6402 + sas_device->enclosure_logical_id, 6403 + sas_device->slot); 6404 + } 6405 + if (sas_device_pg0->Flags & 6406 + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 6407 + sas_device->enclosure_level = 6408 + le16_to_cpu(sas_device_pg0->EnclosureLevel); 6409 + memcpy(&sas_device->connector_name[0], 6410 + &sas_device_pg0->ConnectorName[0], 4); 6411 + } else { 6412 + sas_device->enclosure_level = 0; 6413 + sas_device->connector_name[0] = '\0'; 6414 + } 6415 + 6416 + if (sas_device->handle == sas_device_pg0->DevHandle) 6613 6417 goto out; 6614 6418 pr_info("\thandle changed from(0x%04x)!!!\n", 6615 6419 sas_device->handle); 6616 - sas_device->handle = handle; 6420 + sas_device->handle = sas_device_pg0->DevHandle; 6617 6421 if (sas_target_priv_data) 6618 - sas_target_priv_data->handle = handle; 6422 + sas_target_priv_data->handle = 6423 + sas_device_pg0->DevHandle; 6619 6424 goto out; 6620 6425 } 6621 6426 } ··· 6672 6441 MPI2_IOCSTATUS_MASK; 6673 6442 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6674 6443 break; 6675 - handle = le16_to_cpu(sas_device_pg0.DevHandle); 6444 + handle = sas_device_pg0.DevHandle = 6445 + le16_to_cpu(sas_device_pg0.DevHandle); 6676 6446 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 6677 6447 if (!(_scsih_is_end_device(device_info))) 6678 6448 continue; 6679 - _scsih_mark_responding_sas_device(ioc, 6680 - le64_to_cpu(sas_device_pg0.SASAddress), 6681 - le16_to_cpu(sas_device_pg0.Slot), handle); 6449 + sas_device_pg0.SASAddress = 6450 + le64_to_cpu(sas_device_pg0.SASAddress); 6451 + sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot); 6452 + _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 6682 6453 } 6683 6454 6684 6455 out: ··· 8087 7854 /* event thread */ 8088 7855 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 8089 7856 "fw_event%d", ioc->id); 8090 - ioc->firmware_event_thread = create_singlethread_workqueue( 8091 - ioc->firmware_event_name); 7857 + ioc->firmware_event_thread = alloc_ordered_workqueue( 7858 + ioc->firmware_event_name, WQ_MEM_RECLAIM); 8092 7859 if (!ioc->firmware_event_thread) { 8093 7860 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8094 7861 ioc->name, __FILE__, __LINE__, __func__);
+20 -2
drivers/scsi/mpt3sas/mpt3sas_transport.c
··· 649 649 unsigned long flags; 650 650 struct _sas_node *sas_node; 651 651 struct sas_rphy *rphy; 652 + struct _sas_device *sas_device = NULL; 652 653 int i; 653 654 struct sas_port *port; 654 655 ··· 732 731 mpt3sas_port->remote_identify.device_type); 733 732 734 733 rphy->identify = mpt3sas_port->remote_identify; 734 + 735 + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { 736 + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, 737 + mpt3sas_port->remote_identify.sas_address); 738 + if (!sas_device) { 739 + dfailprintk(ioc, printk(MPT3SAS_FMT 740 + "failure at %s:%d/%s()!\n", 741 + ioc->name, __FILE__, __LINE__, __func__)); 742 + goto out_fail; 743 + } 744 + sas_device->pend_sas_rphy_add = 1; 745 + } 746 + 735 747 if ((sas_rphy_add(rphy))) { 736 748 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 737 749 ioc->name, __FILE__, __LINE__, __func__); 738 750 } 751 + 752 + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) 753 + sas_device->pend_sas_rphy_add = 0; 754 + 739 755 if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) 740 756 dev_printk(KERN_INFO, &rphy->dev, 741 757 "add: handle(0x%04x), sas_addr(0x%016llx)\n", ··· 1964 1946 } else { 1965 1947 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1966 1948 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1967 - if (!dma_addr_out) { 1949 + if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) { 1968 1950 pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n", 1969 1951 ioc->name, __func__); 1970 1952 rc = -ENOMEM; ··· 1986 1968 } else { 1987 1969 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1988 1970 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1989 - if (!dma_addr_in) { 1971 + if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) { 1990 1972 pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n", 1991 1973 ioc->name, __func__); 1992 1974 rc = -ENOMEM;
+1
drivers/scsi/pm8001/pm8001_hwi.c
··· 2642 2642 ts->resp = SAS_TASK_COMPLETE; 2643 2643 ts->stat = SAS_OPEN_REJECT; 2644 2644 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2645 + break; 2645 2646 default: 2646 2647 PM8001_IO_DBG(pm8001_ha, 2647 2648 pm8001_printk("Unknown status 0x%x\n", status));
+1
drivers/scsi/pm8001/pm80xx_hwi.c
··· 2337 2337 ts->resp = SAS_TASK_COMPLETE; 2338 2338 ts->stat = SAS_OPEN_REJECT; 2339 2339 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 2340 + break; 2340 2341 default: 2341 2342 PM8001_IO_DBG(pm8001_ha, 2342 2343 pm8001_printk("Unknown status 0x%x\n", status));
+74 -84
drivers/scsi/scsi_debug.c
··· 25 25 * module options to "modprobe scsi_debug num_tgts=2" [20021221] 26 26 */ 27 27 28 + 29 + #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 30 + 28 31 #include <linux/module.h> 29 32 30 33 #include <linux/kernel.h> ··· 204 201 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) 205 202 * or "peripheral device" addressing (value 0) */ 206 203 #define SAM2_LUN_ADDRESS_METHOD 0 207 - #define SAM2_WLUN_REPORT_LUNS 0xc101 208 204 209 205 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued 210 206 * (for response) at one time. Can be reduced by max_queue option. Command ··· 700 698 else 701 699 hpnt->max_id = scsi_debug_num_tgts; 702 700 /* scsi_debug_max_luns; */ 703 - hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; 701 + hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 704 702 } 705 703 spin_unlock(&sdebug_host_list_lock); 706 704 } ··· 1290 1288 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); 1291 1289 if (! arr) 1292 1290 return DID_REQUEUE << 16; 1293 - have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS); 1291 + have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS); 1294 1292 if (have_wlun) 1295 1293 pq_pdt = 0x1e; /* present, wlun */ 1296 1294 else if (scsi_debug_no_lun_0 && (0 == devip->lun)) ··· 1429 1427 unsigned char * sbuff; 1430 1428 unsigned char *cmd = scp->cmnd; 1431 1429 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1432 - bool dsense, want_dsense; 1430 + bool dsense; 1433 1431 int len = 18; 1434 1432 1435 1433 memset(arr, 0, sizeof(arr)); 1436 1434 dsense = !!(cmd[1] & 1); 1437 - want_dsense = dsense || scsi_debug_dsense; 1438 1435 sbuff = scp->sense_buffer; 1439 1436 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { 1440 1437 if (dsense) { ··· 2447 2446 __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); 2448 2447 2449 2448 if (sdt->guard_tag != csum) { 2450 - pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", 2451 - __func__, 2449 + pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", 2452 2450 (unsigned long)sector, 2453 2451 be16_to_cpu(sdt->guard_tag), 2454 2452 be16_to_cpu(csum)); ··· 2455 2455 } 2456 2456 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && 2457 2457 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 2458 - pr_err("%s: REF check failed on sector %lu\n", 2459 - __func__, (unsigned long)sector); 2458 + pr_err("REF check failed on sector %lu\n", 2459 + (unsigned long)sector); 2460 2460 return 0x03; 2461 2461 } 2462 2462 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 2463 2463 be32_to_cpu(sdt->ref_tag) != ei_lba) { 2464 - pr_err("%s: REF check failed on sector %lu\n", 2465 - __func__, (unsigned long)sector); 2464 + pr_err("REF check failed on sector %lu\n", 2465 + (unsigned long)sector); 2466 2466 return 0x03; 2467 2467 } 2468 2468 return 0; ··· 2680 2680 return 0; 2681 2681 } 2682 2682 2683 - void dump_sector(unsigned char *buf, int len) 2683 + static void dump_sector(unsigned char *buf, int len) 2684 2684 { 2685 2685 int i, j, n; 2686 2686 ··· 3365 3365 one_lun[i].scsi_lun[1] = lun & 0xff; 3366 3366 } 3367 3367 if (want_wlun) { 3368 - one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; 3369 - one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; 3368 + one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff; 3369 + one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff; 3370 3370 i++; 3371 3371 } 3372 3372 alloc_len = (unsigned char *)(one_lun + i) - arr; ··· 3449 3449 atomic_inc(&sdebug_completions); 3450 3450 qa_indx = indx; 3451 3451 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3452 - pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3452 + pr_err("wild qa_indx=%d\n", qa_indx); 3453 3453 return; 3454 3454 } 3455 3455 spin_lock_irqsave(&queued_arr_lock, iflags); ··· 3457 3457 scp = sqcp->a_cmnd; 3458 3458 if (NULL == scp) { 3459 3459 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3460 - pr_err("%s: scp is NULL\n", __func__); 3460 + pr_err("scp is NULL\n"); 3461 3461 return; 3462 3462 } 3463 3463 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3464 3464 if (devip) 3465 3465 atomic_dec(&devip->num_in_q); 3466 3466 else 3467 - pr_err("%s: devip=NULL\n", __func__); 3467 + pr_err("devip=NULL\n"); 3468 3468 if (atomic_read(&retired_max_queue) > 0) 3469 3469 retiring = 1; 3470 3470 3471 3471 sqcp->a_cmnd = NULL; 3472 3472 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3473 3473 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3474 - pr_err("%s: Unexpected completion\n", __func__); 3474 + pr_err("Unexpected completion\n"); 3475 3475 return; 3476 3476 } 3477 3477 ··· 3481 3481 retval = atomic_read(&retired_max_queue); 3482 3482 if (qa_indx >= retval) { 3483 3483 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3484 - pr_err("%s: index %d too large\n", __func__, retval); 3484 + pr_err("index %d too large\n", retval); 3485 3485 return; 3486 3486 } 3487 3487 k = find_last_bit(queued_in_use_bm, retval); ··· 3509 3509 atomic_inc(&sdebug_completions); 3510 3510 qa_indx = sd_hrtp->qa_indx; 3511 3511 if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) { 3512 - pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx); 3512 + pr_err("wild qa_indx=%d\n", qa_indx); 3513 3513 goto the_end; 3514 3514 } 3515 3515 spin_lock_irqsave(&queued_arr_lock, iflags); ··· 3517 3517 scp = sqcp->a_cmnd; 3518 3518 if (NULL == scp) { 3519 3519 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3520 - pr_err("%s: scp is NULL\n", __func__); 3520 + pr_err("scp is NULL\n"); 3521 3521 goto the_end; 3522 3522 } 3523 3523 devip = (struct sdebug_dev_info *)scp->device->hostdata; 3524 3524 if (devip) 3525 3525 atomic_dec(&devip->num_in_q); 3526 3526 else 3527 - pr_err("%s: devip=NULL\n", __func__); 3527 + pr_err("devip=NULL\n"); 3528 3528 if (atomic_read(&retired_max_queue) > 0) 3529 3529 retiring = 1; 3530 3530 3531 3531 sqcp->a_cmnd = NULL; 3532 3532 if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) { 3533 3533 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3534 - pr_err("%s: Unexpected completion\n", __func__); 3534 + pr_err("Unexpected completion\n"); 3535 3535 goto the_end; 3536 3536 } 3537 3537 ··· 3541 3541 retval = atomic_read(&retired_max_queue); 3542 3542 if (qa_indx >= retval) { 3543 3543 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3544 - pr_err("%s: index %d too large\n", __func__, retval); 3544 + pr_err("index %d too large\n", retval); 3545 3545 goto the_end; 3546 3546 } 3547 3547 k = find_last_bit(queued_in_use_bm, retval); ··· 3580 3580 return devip; 3581 3581 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); 3582 3582 if (!sdbg_host) { 3583 - pr_err("%s: Host info NULL\n", __func__); 3583 + pr_err("Host info NULL\n"); 3584 3584 return NULL; 3585 3585 } 3586 3586 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { ··· 3596 3596 if (!open_devip) { /* try and make a new one */ 3597 3597 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 3598 3598 if (!open_devip) { 3599 - printk(KERN_ERR "%s: out of memory at line %d\n", 3600 - __func__, __LINE__); 3599 + pr_err("out of memory at line %d\n", __LINE__); 3601 3600 return NULL; 3602 3601 } 3603 3602 } ··· 3614 3615 static int scsi_debug_slave_alloc(struct scsi_device *sdp) 3615 3616 { 3616 3617 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3617 - printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n", 3618 + pr_info("slave_alloc <%u %u %u %llu>\n", 3618 3619 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3619 3620 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); 3620 3621 return 0; ··· 3625 3626 struct sdebug_dev_info *devip; 3626 3627 3627 3628 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3628 - printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n", 3629 + pr_info("slave_configure <%u %u %u %llu>\n", 3629 3630 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3630 3631 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) 3631 3632 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; ··· 3645 3646 (struct sdebug_dev_info *)sdp->hostdata; 3646 3647 3647 3648 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 3648 - printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n", 3649 + pr_info("slave_destroy <%u %u %u %llu>\n", 3649 3650 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 3650 3651 if (devip) { 3651 3652 /* make this slot available for re-use */ ··· 3896 3897 return; 3897 3898 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { 3898 3899 scsi_debug_num_parts = SDEBUG_MAX_PARTS; 3899 - pr_warn("%s: reducing partitions to %d\n", __func__, 3900 - SDEBUG_MAX_PARTS); 3900 + pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS); 3901 3901 } 3902 3902 num_sectors = (int)sdebug_store_sectors; 3903 3903 sectors_per_part = (num_sectors - sdebug_sectors_per) ··· 3940 3942 unsigned long iflags; 3941 3943 int k, num_in_q, qdepth, inject; 3942 3944 struct sdebug_queued_cmd *sqcp = NULL; 3943 - struct scsi_device *sdp = cmnd->device; 3945 + struct scsi_device *sdp; 3944 3946 3945 - if (NULL == cmnd || NULL == devip) { 3946 - pr_warn("%s: called with NULL cmnd or devip pointer\n", 3947 - __func__); 3947 + /* this should never happen */ 3948 + if (WARN_ON(!cmnd)) 3949 + return SCSI_MLQUEUE_HOST_BUSY; 3950 + 3951 + if (NULL == devip) { 3952 + pr_warn("called devip == NULL\n"); 3948 3953 /* no particularly good error to report back */ 3949 3954 return SCSI_MLQUEUE_HOST_BUSY; 3950 3955 } 3956 + 3957 + sdp = cmnd->device; 3958 + 3951 3959 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3952 3960 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 3953 3961 __func__, scsi_result); ··· 4387 4383 4388 4384 fake_storep = vmalloc(sz); 4389 4385 if (NULL == fake_storep) { 4390 - pr_err("%s: out of memory, 9\n", 4391 - __func__); 4386 + pr_err("out of memory, 9\n"); 4392 4387 return -ENOMEM; 4393 4388 } 4394 4389 memset(fake_storep, 0, sz); ··· 4787 4784 atomic_set(&retired_max_queue, 0); 4788 4785 4789 4786 if (scsi_debug_ndelay >= 1000000000) { 4790 - pr_warn("%s: ndelay must be less than 1 second, ignored\n", 4791 - __func__); 4787 + pr_warn("ndelay must be less than 1 second, ignored\n"); 4792 4788 scsi_debug_ndelay = 0; 4793 4789 } else if (scsi_debug_ndelay > 0) 4794 4790 scsi_debug_delay = DELAY_OVERRIDDEN; ··· 4799 4797 case 4096: 4800 4798 break; 4801 4799 default: 4802 - pr_err("%s: invalid sector_size %d\n", __func__, 4803 - scsi_debug_sector_size); 4800 + pr_err("invalid sector_size %d\n", scsi_debug_sector_size); 4804 4801 return -EINVAL; 4805 4802 } 4806 4803 ··· 4812 4811 break; 4813 4812 4814 4813 default: 4815 - pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__); 4814 + pr_err("dif must be 0, 1, 2 or 3\n"); 4816 4815 return -EINVAL; 4817 4816 } 4818 4817 4819 4818 if (scsi_debug_guard > 1) { 4820 - pr_err("%s: guard must be 0 or 1\n", __func__); 4819 + pr_err("guard must be 0 or 1\n"); 4821 4820 return -EINVAL; 4822 4821 } 4823 4822 4824 4823 if (scsi_debug_ato > 1) { 4825 - pr_err("%s: ato must be 0 or 1\n", __func__); 4824 + pr_err("ato must be 0 or 1\n"); 4826 4825 return -EINVAL; 4827 4826 } 4828 4827 4829 4828 if (scsi_debug_physblk_exp > 15) { 4830 - pr_err("%s: invalid physblk_exp %u\n", __func__, 4831 - scsi_debug_physblk_exp); 4829 + pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp); 4832 4830 return -EINVAL; 4833 4831 } 4834 4832 4835 4833 if (scsi_debug_lowest_aligned > 0x3fff) { 4836 - pr_err("%s: lowest_aligned too big: %u\n", __func__, 4837 - scsi_debug_lowest_aligned); 4834 + pr_err("lowest_aligned too big: %u\n", 4835 + scsi_debug_lowest_aligned); 4838 4836 return -EINVAL; 4839 4837 } 4840 4838 ··· 4863 4863 if (0 == scsi_debug_fake_rw) { 4864 4864 fake_storep = vmalloc(sz); 4865 4865 if (NULL == fake_storep) { 4866 - pr_err("%s: out of memory, 1\n", __func__); 4866 + pr_err("out of memory, 1\n"); 4867 4867 return -ENOMEM; 4868 4868 } 4869 4869 memset(fake_storep, 0, sz); ··· 4877 4877 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); 4878 4878 dif_storep = vmalloc(dif_size); 4879 4879 4880 - pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size, 4881 - dif_storep); 4880 + pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep); 4882 4881 4883 4882 if (dif_storep == NULL) { 4884 - pr_err("%s: out of mem. (DIX)\n", __func__); 4883 + pr_err("out of mem. (DIX)\n"); 4885 4884 ret = -ENOMEM; 4886 4885 goto free_vm; 4887 4886 } ··· 4902 4903 if (scsi_debug_unmap_alignment && 4903 4904 scsi_debug_unmap_granularity <= 4904 4905 scsi_debug_unmap_alignment) { 4905 - pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n", 4906 - __func__); 4906 + pr_err("ERR: unmap_granularity <= unmap_alignment\n"); 4907 4907 return -EINVAL; 4908 4908 } 4909 4909 4910 4910 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; 4911 4911 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); 4912 4912 4913 - pr_info("%s: %lu provisioning blocks\n", __func__, map_size); 4913 + pr_info("%lu provisioning blocks\n", map_size); 4914 4914 4915 4915 if (map_storep == NULL) { 4916 - pr_err("%s: out of mem. (MAP)\n", __func__); 4916 + pr_err("out of mem. (MAP)\n"); 4917 4917 ret = -ENOMEM; 4918 4918 goto free_vm; 4919 4919 } ··· 4926 4928 4927 4929 pseudo_primary = root_device_register("pseudo_0"); 4928 4930 if (IS_ERR(pseudo_primary)) { 4929 - pr_warn("%s: root_device_register() error\n", __func__); 4931 + pr_warn("root_device_register() error\n"); 4930 4932 ret = PTR_ERR(pseudo_primary); 4931 4933 goto free_vm; 4932 4934 } 4933 4935 ret = bus_register(&pseudo_lld_bus); 4934 4936 if (ret < 0) { 4935 - pr_warn("%s: bus_register error: %d\n", __func__, ret); 4937 + pr_warn("bus_register error: %d\n", ret); 4936 4938 goto dev_unreg; 4937 4939 } 4938 4940 ret = driver_register(&sdebug_driverfs_driver); 4939 4941 if (ret < 0) { 4940 - pr_warn("%s: driver_register error: %d\n", __func__, ret); 4942 + pr_warn("driver_register error: %d\n", ret); 4941 4943 goto bus_unreg; 4942 4944 } 4943 4945 ··· 4946 4948 4947 4949 for (k = 0; k < host_to_add; k++) { 4948 4950 if (sdebug_add_adapter()) { 4949 - pr_err("%s: sdebug_add_adapter failed k=%d\n", 4950 - __func__, k); 4951 + pr_err("sdebug_add_adapter failed k=%d\n", k); 4951 4952 break; 4952 4953 } 4953 4954 } 4954 4955 4955 - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { 4956 - pr_info("%s: built %d host(s)\n", __func__, 4957 - scsi_debug_add_host); 4958 - } 4956 + if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 4957 + pr_info("built %d host(s)\n", scsi_debug_add_host); 4958 + 4959 4959 return 0; 4960 4960 4961 4961 bus_unreg: ··· 4961 4965 dev_unreg: 4962 4966 root_device_unregister(pseudo_primary); 4963 4967 free_vm: 4964 - if (map_storep) 4965 - vfree(map_storep); 4966 - if (dif_storep) 4967 - vfree(dif_storep); 4968 + vfree(map_storep); 4969 + vfree(dif_storep); 4968 4970 vfree(fake_storep); 4969 4971 4970 4972 return ret; ··· 4980 4986 bus_unregister(&pseudo_lld_bus); 4981 4987 root_device_unregister(pseudo_primary); 4982 4988 4983 - if (dif_storep) 4984 - vfree(dif_storep); 4985 - 4989 + vfree(dif_storep); 4986 4990 vfree(fake_storep); 4987 4991 } 4988 4992 ··· 5004 5012 5005 5013 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 5006 5014 if (NULL == sdbg_host) { 5007 - printk(KERN_ERR "%s: out of memory at line %d\n", 5008 - __func__, __LINE__); 5015 + pr_err("out of memory at line %d\n", __LINE__); 5009 5016 return -ENOMEM; 5010 5017 } 5011 5018 ··· 5014 5023 for (k = 0; k < devs_per_host; k++) { 5015 5024 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 5016 5025 if (!sdbg_devinfo) { 5017 - printk(KERN_ERR "%s: out of memory at line %d\n", 5018 - __func__, __LINE__); 5026 + pr_err("out of memory at line %d\n", __LINE__); 5019 5027 error = -ENOMEM; 5020 5028 goto clean; 5021 5029 } ··· 5168 5178 } 5169 5179 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); 5170 5180 } 5171 - has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS); 5181 + has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); 5172 5182 if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) 5173 5183 return schedule_resp(scp, NULL, errsts_no_connect, 0); 5174 5184 ··· 5328 5338 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5329 5339 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5330 5340 if (NULL == hpnt) { 5331 - pr_err("%s: scsi_host_alloc failed\n", __func__); 5341 + pr_err("scsi_host_alloc failed\n"); 5332 5342 error = -ENODEV; 5333 5343 return error; 5334 5344 } ··· 5339 5349 hpnt->max_id = scsi_debug_num_tgts + 1; 5340 5350 else 5341 5351 hpnt->max_id = scsi_debug_num_tgts; 5342 - hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */ 5352 + /* = scsi_debug_max_luns; */ 5353 + hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1; 5343 5354 5344 5355 host_prot = 0; 5345 5356 ··· 5372 5381 5373 5382 scsi_host_set_prot(hpnt, host_prot); 5374 5383 5375 - printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n", 5384 + pr_info("host protection%s%s%s%s%s%s%s\n", 5376 5385 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5377 5386 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5378 5387 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", ··· 5400 5409 5401 5410 error = scsi_add_host(hpnt, &sdbg_host->dev); 5402 5411 if (error) { 5403 - printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); 5412 + pr_err("scsi_add_host failed\n"); 5404 5413 error = -ENODEV; 5405 5414 scsi_host_put(hpnt); 5406 5415 } else ··· 5417 5426 sdbg_host = to_sdebug_host(dev); 5418 5427 5419 5428 if (!sdbg_host) { 5420 - printk(KERN_ERR "%s: Unable to locate host info\n", 5421 - __func__); 5429 + pr_err("Unable to locate host info\n"); 5422 5430 return -ENODEV; 5423 5431 } 5424 5432
+437
drivers/scsi/scsi_dh.c
··· 1 + /* 2 + * SCSI device handler infrastruture. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms of the GNU General Public License as published by the 6 + * Free Software Foundation; either version 2 of the License, or (at your 7 + * option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, but 10 + * WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 + * General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along 15 + * with this program; if not, write to the Free Software Foundation, Inc., 16 + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 + * 18 + * Copyright IBM Corporation, 2007 19 + * Authors: 20 + * Chandra Seetharaman <sekharan@us.ibm.com> 21 + * Mike Anderson <andmike@linux.vnet.ibm.com> 22 + */ 23 + 24 + #include <linux/slab.h> 25 + #include <linux/module.h> 26 + #include <scsi/scsi_dh.h> 27 + #include "scsi_priv.h" 28 + 29 + static DEFINE_SPINLOCK(list_lock); 30 + static LIST_HEAD(scsi_dh_list); 31 + 32 + struct scsi_dh_blist { 33 + const char *vendor; 34 + const char *model; 35 + const char *driver; 36 + }; 37 + 38 + static const struct scsi_dh_blist scsi_dh_blist[] = { 39 + {"DGC", "RAID", "clariion" }, 40 + {"DGC", "DISK", "clariion" }, 41 + {"DGC", "VRAID", "clariion" }, 42 + 43 + {"COMPAQ", "MSA1000 VOLUME", "hp_sw" }, 44 + {"COMPAQ", "HSV110", "hp_sw" }, 45 + {"HP", "HSV100", "hp_sw"}, 46 + {"DEC", "HSG80", "hp_sw"}, 47 + 48 + {"IBM", "1722", "rdac", }, 49 + {"IBM", "1724", "rdac", }, 50 + {"IBM", "1726", "rdac", }, 51 + {"IBM", "1742", "rdac", }, 52 + {"IBM", "1745", "rdac", }, 53 + {"IBM", "1746", "rdac", }, 54 + {"IBM", "1813", "rdac", }, 55 + {"IBM", "1814", "rdac", }, 56 + {"IBM", "1815", "rdac", }, 57 + {"IBM", "1818", "rdac", }, 58 + {"IBM", "3526", "rdac", }, 59 + {"SGI", "TP9", "rdac", }, 60 + {"SGI", "IS", "rdac", }, 61 + {"STK", "OPENstorage D280", "rdac", }, 62 + {"STK", "FLEXLINE 380", "rdac", }, 63 + {"SUN", "CSM", "rdac", }, 64 + {"SUN", "LCSM100", "rdac", }, 65 + {"SUN", "STK6580_6780", "rdac", }, 66 + {"SUN", "SUN_6180", "rdac", }, 67 + {"SUN", "ArrayStorage", "rdac", }, 68 + {"DELL", "MD3", "rdac", }, 69 + {"NETAPP", "INF-01-00", "rdac", }, 70 + {"LSI", "INF-01-00", "rdac", }, 71 + {"ENGENIO", "INF-01-00", "rdac", }, 72 + {NULL, NULL, NULL }, 73 + }; 74 + 75 + static const char * 76 + scsi_dh_find_driver(struct scsi_device *sdev) 77 + { 78 + const struct scsi_dh_blist *b; 79 + 80 + if (scsi_device_tpgs(sdev)) 81 + return "alua"; 82 + 83 + for (b = scsi_dh_blist; b->vendor; b++) { 84 + if (!strncmp(sdev->vendor, b->vendor, strlen(b->vendor)) && 85 + !strncmp(sdev->model, b->model, strlen(b->model))) { 86 + return b->driver; 87 + } 88 + } 89 + return NULL; 90 + } 91 + 92 + 93 + static struct scsi_device_handler *__scsi_dh_lookup(const char *name) 94 + { 95 + struct scsi_device_handler *tmp, *found = NULL; 96 + 97 + spin_lock(&list_lock); 98 + list_for_each_entry(tmp, &scsi_dh_list, list) { 99 + if (!strncmp(tmp->name, name, strlen(tmp->name))) { 100 + found = tmp; 101 + break; 102 + } 103 + } 104 + spin_unlock(&list_lock); 105 + return found; 106 + } 107 + 108 + static struct scsi_device_handler *scsi_dh_lookup(const char *name) 109 + { 110 + struct scsi_device_handler *dh; 111 + 112 + dh = __scsi_dh_lookup(name); 113 + if (!dh) { 114 + request_module(name); 115 + dh = __scsi_dh_lookup(name); 116 + } 117 + 118 + return dh; 119 + } 120 + 121 + /* 122 + * scsi_dh_handler_attach - Attach a device handler to a device 123 + * @sdev - SCSI device the device handler should attach to 124 + * @scsi_dh - The device handler to attach 125 + */ 126 + static int scsi_dh_handler_attach(struct scsi_device *sdev, 127 + struct scsi_device_handler *scsi_dh) 128 + { 129 + int error; 130 + 131 + if (!try_module_get(scsi_dh->module)) 132 + return -EINVAL; 133 + 134 + error = scsi_dh->attach(sdev); 135 + if (error) { 136 + sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n", 137 + scsi_dh->name, error); 138 + module_put(scsi_dh->module); 139 + } else 140 + sdev->handler = scsi_dh; 141 + 142 + return error; 143 + } 144 + 145 + /* 146 + * scsi_dh_handler_detach - Detach a device handler from a device 147 + * @sdev - SCSI device the device handler should be detached from 148 + */ 149 + static void scsi_dh_handler_detach(struct scsi_device *sdev) 150 + { 151 + sdev->handler->detach(sdev); 152 + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", sdev->handler->name); 153 + module_put(sdev->handler->module); 154 + } 155 + 156 + /* 157 + * Functions for sysfs attribute 'dh_state' 158 + */ 159 + static ssize_t 160 + store_dh_state(struct device *dev, struct device_attribute *attr, 161 + const char *buf, size_t count) 162 + { 163 + struct scsi_device *sdev = to_scsi_device(dev); 164 + struct scsi_device_handler *scsi_dh; 165 + int err = -EINVAL; 166 + 167 + if (sdev->sdev_state == SDEV_CANCEL || 168 + sdev->sdev_state == SDEV_DEL) 169 + return -ENODEV; 170 + 171 + if (!sdev->handler) { 172 + /* 173 + * Attach to a device handler 174 + */ 175 + scsi_dh = scsi_dh_lookup(buf); 176 + if (!scsi_dh) 177 + return err; 178 + err = scsi_dh_handler_attach(sdev, scsi_dh); 179 + } else { 180 + if (!strncmp(buf, "detach", 6)) { 181 + /* 182 + * Detach from a device handler 183 + */ 184 + sdev_printk(KERN_WARNING, sdev, 185 + "can't detach handler %s.\n", 186 + sdev->handler->name); 187 + err = -EINVAL; 188 + } else if (!strncmp(buf, "activate", 8)) { 189 + /* 190 + * Activate a device handler 191 + */ 192 + if (sdev->handler->activate) 193 + err = sdev->handler->activate(sdev, NULL, NULL); 194 + else 195 + err = 0; 196 + } 197 + } 198 + 199 + return err<0?err:count; 200 + } 201 + 202 + static ssize_t 203 + show_dh_state(struct device *dev, struct device_attribute *attr, char *buf) 204 + { 205 + struct scsi_device *sdev = to_scsi_device(dev); 206 + 207 + if (!sdev->handler) 208 + return snprintf(buf, 20, "detached\n"); 209 + 210 + return snprintf(buf, 20, "%s\n", sdev->handler->name); 211 + } 212 + 213 + static struct device_attribute scsi_dh_state_attr = 214 + __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state, 215 + store_dh_state); 216 + 217 + int scsi_dh_add_device(struct scsi_device *sdev) 218 + { 219 + struct scsi_device_handler *devinfo = NULL; 220 + const char *drv; 221 + int err; 222 + 223 + err = device_create_file(&sdev->sdev_gendev, &scsi_dh_state_attr); 224 + if (err) 225 + return err; 226 + 227 + drv = scsi_dh_find_driver(sdev); 228 + if (drv) 229 + devinfo = scsi_dh_lookup(drv); 230 + if (devinfo) 231 + err = scsi_dh_handler_attach(sdev, devinfo); 232 + return err; 233 + } 234 + 235 + void scsi_dh_remove_device(struct scsi_device *sdev) 236 + { 237 + if (sdev->handler) 238 + scsi_dh_handler_detach(sdev); 239 + device_remove_file(&sdev->sdev_gendev, &scsi_dh_state_attr); 240 + } 241 + 242 + /* 243 + * scsi_register_device_handler - register a device handler personality 244 + * module. 245 + * @scsi_dh - device handler to be registered. 246 + * 247 + * Returns 0 on success, -EBUSY if handler already registered. 248 + */ 249 + int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 250 + { 251 + if (__scsi_dh_lookup(scsi_dh->name)) 252 + return -EBUSY; 253 + 254 + if (!scsi_dh->attach || !scsi_dh->detach) 255 + return -EINVAL; 256 + 257 + spin_lock(&list_lock); 258 + list_add(&scsi_dh->list, &scsi_dh_list); 259 + spin_unlock(&list_lock); 260 + 261 + printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); 262 + 263 + return SCSI_DH_OK; 264 + } 265 + EXPORT_SYMBOL_GPL(scsi_register_device_handler); 266 + 267 + /* 268 + * scsi_unregister_device_handler - register a device handler personality 269 + * module. 270 + * @scsi_dh - device handler to be unregistered. 271 + * 272 + * Returns 0 on success, -ENODEV if handler not registered. 273 + */ 274 + int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 275 + { 276 + if (!__scsi_dh_lookup(scsi_dh->name)) 277 + return -ENODEV; 278 + 279 + spin_lock(&list_lock); 280 + list_del(&scsi_dh->list); 281 + spin_unlock(&list_lock); 282 + printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); 283 + 284 + return SCSI_DH_OK; 285 + } 286 + EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); 287 + 288 + static struct scsi_device *get_sdev_from_queue(struct request_queue *q) 289 + { 290 + struct scsi_device *sdev; 291 + unsigned long flags; 292 + 293 + spin_lock_irqsave(q->queue_lock, flags); 294 + sdev = q->queuedata; 295 + if (!sdev || !get_device(&sdev->sdev_gendev)) 296 + sdev = NULL; 297 + spin_unlock_irqrestore(q->queue_lock, flags); 298 + 299 + return sdev; 300 + } 301 + 302 + /* 303 + * scsi_dh_activate - activate the path associated with the scsi_device 304 + * corresponding to the given request queue. 305 + * Returns immediately without waiting for activation to be completed. 306 + * @q - Request queue that is associated with the scsi_device to be 307 + * activated. 308 + * @fn - Function to be called upon completion of the activation. 309 + * Function fn is called with data (below) and the error code. 310 + * Function fn may be called from the same calling context. So, 311 + * do not hold the lock in the caller which may be needed in fn. 312 + * @data - data passed to the function fn upon completion. 313 + * 314 + */ 315 + int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) 316 + { 317 + struct scsi_device *sdev; 318 + int err = SCSI_DH_NOSYS; 319 + 320 + sdev = get_sdev_from_queue(q); 321 + if (!sdev) { 322 + if (fn) 323 + fn(data, err); 324 + return err; 325 + } 326 + 327 + if (!sdev->handler) 328 + goto out_fn; 329 + err = SCSI_DH_NOTCONN; 330 + if (sdev->sdev_state == SDEV_CANCEL || 331 + sdev->sdev_state == SDEV_DEL) 332 + goto out_fn; 333 + 334 + err = SCSI_DH_DEV_OFFLINED; 335 + if (sdev->sdev_state == SDEV_OFFLINE) 336 + goto out_fn; 337 + 338 + if (sdev->handler->activate) 339 + err = sdev->handler->activate(sdev, fn, data); 340 + 341 + out_put_device: 342 + put_device(&sdev->sdev_gendev); 343 + return err; 344 + 345 + out_fn: 346 + if (fn) 347 + fn(data, err); 348 + goto out_put_device; 349 + } 350 + EXPORT_SYMBOL_GPL(scsi_dh_activate); 351 + 352 + /* 353 + * scsi_dh_set_params - set the parameters for the device as per the 354 + * string specified in params. 355 + * @q - Request queue that is associated with the scsi_device for 356 + * which the parameters to be set. 357 + * @params - parameters in the following format 358 + * "no_of_params\0param1\0param2\0param3\0...\0" 359 + * for example, string for 2 parameters with value 10 and 21 360 + * is specified as "2\010\021\0". 361 + */ 362 + int scsi_dh_set_params(struct request_queue *q, const char *params) 363 + { 364 + struct scsi_device *sdev; 365 + int err = -SCSI_DH_NOSYS; 366 + 367 + sdev = get_sdev_from_queue(q); 368 + if (!sdev) 369 + return err; 370 + 371 + if (sdev->handler && sdev->handler->set_params) 372 + err = sdev->handler->set_params(sdev, params); 373 + put_device(&sdev->sdev_gendev); 374 + return err; 375 + } 376 + EXPORT_SYMBOL_GPL(scsi_dh_set_params); 377 + 378 + /* 379 + * scsi_dh_attach - Attach device handler 380 + * @q - Request queue that is associated with the scsi_device 381 + * the handler should be attached to 382 + * @name - name of the handler to attach 383 + */ 384 + int scsi_dh_attach(struct request_queue *q, const char *name) 385 + { 386 + struct scsi_device *sdev; 387 + struct scsi_device_handler *scsi_dh; 388 + int err = 0; 389 + 390 + sdev = get_sdev_from_queue(q); 391 + if (!sdev) 392 + return -ENODEV; 393 + 394 + scsi_dh = scsi_dh_lookup(name); 395 + if (!scsi_dh) { 396 + err = -EINVAL; 397 + goto out_put_device; 398 + } 399 + 400 + if (sdev->handler) { 401 + if (sdev->handler != scsi_dh) 402 + err = -EBUSY; 403 + goto out_put_device; 404 + } 405 + 406 + err = scsi_dh_handler_attach(sdev, scsi_dh); 407 + 408 + out_put_device: 409 + put_device(&sdev->sdev_gendev); 410 + return err; 411 + } 412 + EXPORT_SYMBOL_GPL(scsi_dh_attach); 413 + 414 + /* 415 + * scsi_dh_attached_handler_name - Get attached device handler's name 416 + * @q - Request queue that is associated with the scsi_device 417 + * that may have a device handler attached 418 + * @gfp - the GFP mask used in the kmalloc() call when allocating memory 419 + * 420 + * Returns name of attached handler, NULL if no handler is attached. 421 + * Caller must take care to free the returned string. 422 + */ 423 + const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) 424 + { 425 + struct scsi_device *sdev; 426 + const char *handler_name = NULL; 427 + 428 + sdev = get_sdev_from_queue(q); 429 + if (!sdev) 430 + return NULL; 431 + 432 + if (sdev->handler) 433 + handler_name = kstrdup(sdev->handler->name, gfp); 434 + put_device(&sdev->sdev_gendev); 435 + return handler_name; 436 + } 437 + EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
+13 -4
drivers/scsi/scsi_error.c
··· 36 36 #include <scsi/scsi_transport.h> 37 37 #include <scsi/scsi_host.h> 38 38 #include <scsi/scsi_ioctl.h> 39 + #include <scsi/scsi_dh.h> 39 40 #include <scsi/sg.h> 40 41 41 42 #include "scsi_priv.h" ··· 464 463 if (scsi_sense_is_deferred(&sshdr)) 465 464 return NEEDS_RETRY; 466 465 467 - if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && 468 - sdev->scsi_dh_data->scsi_dh->check_sense) { 466 + if (sdev->handler && sdev->handler->check_sense) { 469 467 int rc; 470 468 471 - rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr); 469 + rc = sdev->handler->check_sense(sdev, &sshdr); 472 470 if (rc != SCSI_RETURN_NOT_HANDLED) 473 471 return rc; 474 472 /* handler does not care. Drop down to default handling */ ··· 2178 2178 * We never actually get interrupted because kthread_run 2179 2179 * disables signal delivery for the created thread. 2180 2180 */ 2181 - while (!kthread_should_stop()) { 2181 + while (true) { 2182 + /* 2183 + * The sequence in kthread_stop() sets the stop flag first 2184 + * then wakes the process. To avoid missed wakeups, the task 2185 + * should always be in a non running state before the stop 2186 + * flag is checked 2187 + */ 2182 2188 set_current_state(TASK_INTERRUPTIBLE); 2189 + if (kthread_should_stop()) 2190 + break; 2191 + 2183 2192 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || 2184 2193 shost->host_failed != atomic_read(&shost->host_busy)) { 2185 2194 SCSI_LOG_ERROR_RECOVERY(1,
+3 -3
drivers/scsi/scsi_lib.c
··· 31 31 #include <scsi/scsi_driver.h> 32 32 #include <scsi/scsi_eh.h> 33 33 #include <scsi/scsi_host.h> 34 + #include <scsi/scsi_dh.h> 34 35 35 36 #include <trace/events/scsi.h> 36 37 ··· 1249 1248 { 1250 1249 struct scsi_cmnd *cmd = req->special; 1251 1250 1252 - if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1253 - && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1254 - int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1251 + if (unlikely(sdev->handler && sdev->handler->prep_fn)) { 1252 + int ret = sdev->handler->prep_fn(sdev, req); 1255 1253 if (ret != BLKPREP_OK) 1256 1254 return ret; 1257 1255 }
+9
drivers/scsi/scsi_priv.h
··· 170 170 extern struct async_domain scsi_sd_pm_domain; 171 171 extern struct async_domain scsi_sd_probe_domain; 172 172 173 + /* scsi_dh.c */ 174 + #ifdef CONFIG_SCSI_DH 175 + int scsi_dh_add_device(struct scsi_device *sdev); 176 + void scsi_dh_remove_device(struct scsi_device *sdev); 177 + #else 178 + static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; } 179 + static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } 180 + #endif 181 + 173 182 /* 174 183 * internal scsi timeout functions: for use by mid-layer and transport 175 184 * classes.
+10
drivers/scsi/scsi_sysfs.c
··· 1030 1030 "failed to add device: %d\n", error); 1031 1031 return error; 1032 1032 } 1033 + 1034 + error = scsi_dh_add_device(sdev); 1035 + if (error) { 1036 + sdev_printk(KERN_INFO, sdev, 1037 + "failed to add device handler: %d\n", error); 1038 + return error; 1039 + } 1040 + 1033 1041 device_enable_async_suspend(&sdev->sdev_dev); 1034 1042 error = device_add(&sdev->sdev_dev); 1035 1043 if (error) { 1036 1044 sdev_printk(KERN_INFO, sdev, 1037 1045 "failed to add class device: %d\n", error); 1046 + scsi_dh_remove_device(sdev); 1038 1047 device_del(&sdev->sdev_gendev); 1039 1048 return error; 1040 1049 } ··· 1083 1074 bsg_unregister_queue(sdev->request_queue); 1084 1075 device_unregister(&sdev->sdev_dev); 1085 1076 transport_remove_device(dev); 1077 + scsi_dh_remove_device(sdev); 1086 1078 device_del(dev); 1087 1079 } else 1088 1080 put_device(&sdev->sdev_dev);
-10
drivers/scsi/scsi_transport_sas.c
··· 1222 1222 u64 identifier; 1223 1223 int error; 1224 1224 1225 - /* 1226 - * Only devices behind an expander are supported, because the 1227 - * enclosure identifier is a SMP feature. 1228 - */ 1229 - if (scsi_is_sas_phy_local(phy)) 1230 - return -EINVAL; 1231 - 1232 1225 error = i->f->get_enclosure_identifier(rphy, &identifier); 1233 1226 if (error) 1234 1227 return error; ··· 1240 1247 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 1241 1248 struct sas_internal *i = to_sas_internal(shost->transportt); 1242 1249 int val; 1243 - 1244 - if (scsi_is_sas_phy_local(phy)) 1245 - return -EINVAL; 1246 1250 1247 1251 val = i->f->get_bay_identifier(rphy); 1248 1252 if (val < 0)
+3 -24
include/scsi/scsi_device.h
··· 196 196 struct execute_work ew; /* used to get process context on put */ 197 197 struct work_struct requeue_work; 198 198 199 - struct scsi_dh_data *scsi_dh_data; 199 + struct scsi_device_handler *handler; 200 + void *handler_data; 201 + 200 202 enum scsi_device_state sdev_state; 201 203 unsigned long sdev_data[0]; 202 204 } __attribute__((aligned(sizeof(unsigned long)))); 203 - 204 - typedef void (*activate_complete)(void *, int); 205 - struct scsi_device_handler { 206 - /* Used by the infrastructure */ 207 - struct list_head list; /* list of scsi_device_handlers */ 208 - 209 - /* Filled by the hardware handler */ 210 - struct module *module; 211 - const char *name; 212 - int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); 213 - struct scsi_dh_data *(*attach)(struct scsi_device *); 214 - void (*detach)(struct scsi_device *); 215 - int (*activate)(struct scsi_device *, activate_complete, void *); 216 - int (*prep_fn)(struct scsi_device *, struct request *); 217 - int (*set_params)(struct scsi_device *, const char *); 218 - bool (*match)(struct scsi_device *); 219 - }; 220 - 221 - struct scsi_dh_data { 222 - struct scsi_device_handler *scsi_dh; 223 - struct scsi_device *sdev; 224 - struct kref kref; 225 - }; 226 205 227 206 #define to_scsi_device(d) \ 228 207 container_of(d, struct scsi_device, sdev_gendev)
+18 -11
include/scsi/scsi_dh.h
··· 55 55 SCSI_DH_NOSYS, 56 56 SCSI_DH_DRIVER_MAX, 57 57 }; 58 - #if defined(CONFIG_SCSI_DH) || defined(CONFIG_SCSI_DH_MODULE) 58 + 59 + typedef void (*activate_complete)(void *, int); 60 + struct scsi_device_handler { 61 + /* Used by the infrastructure */ 62 + struct list_head list; /* list of scsi_device_handlers */ 63 + 64 + /* Filled by the hardware handler */ 65 + struct module *module; 66 + const char *name; 67 + int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); 68 + int (*attach)(struct scsi_device *); 69 + void (*detach)(struct scsi_device *); 70 + int (*activate)(struct scsi_device *, activate_complete, void *); 71 + int (*prep_fn)(struct scsi_device *, struct request *); 72 + int (*set_params)(struct scsi_device *, const char *); 73 + }; 74 + 75 + #ifdef CONFIG_SCSI_DH 59 76 extern int scsi_dh_activate(struct request_queue *, activate_complete, void *); 60 - extern int scsi_dh_handler_exist(const char *); 61 77 extern int scsi_dh_attach(struct request_queue *, const char *); 62 - extern void scsi_dh_detach(struct request_queue *); 63 78 extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t); 64 79 extern int scsi_dh_set_params(struct request_queue *, const char *); 65 80 #else ··· 84 69 fn(data, 0); 85 70 return 0; 86 71 } 87 - static inline int scsi_dh_handler_exist(const char *name) 88 - { 89 - return 0; 90 - } 91 72 static inline int scsi_dh_attach(struct request_queue *req, const char *name) 92 73 { 93 74 return SCSI_DH_NOSYS; 94 - } 95 - static inline void scsi_dh_detach(struct request_queue *q) 96 - { 97 - return; 98 75 } 99 76 static inline const char *scsi_dh_attached_handler_name(struct request_queue *q, 100 77 gfp_t gfp)