Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

+225 -133
+9
MAINTAINERS
··· 1636 W: http://ldm.sourceforge.net 1637 S: Maintained 1638 1639 LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers 1640 P: Matthew Wilcox 1641 M: matthew@wil.cx
··· 1636 W: http://ldm.sourceforge.net 1637 S: Maintained 1638 1639 + LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) 1640 + P: Eric Moore 1641 + M: Eric.Moore@lsil.com 1642 + M: support@lsil.com 1643 + L: mpt_linux_developer@lsil.com 1644 + L: linux-scsi@vger.kernel.org 1645 + W: http://www.lsilogic.com/support 1646 + S: Supported 1647 + 1648 LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers 1649 P: Matthew Wilcox 1650 M: matthew@wil.cx
+62 -2
drivers/message/fusion/mptbase.c
··· 1118 return -1; 1119 } 1120 1121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1122 /* 1123 * mpt_attach - Install a PCI intelligent MPT adapter. ··· 1245 ioc->pcidev = pdev; 1246 ioc->diagPending = 0; 1247 spin_lock_init(&ioc->diagLock); 1248 1249 /* Initialize the event logging. 1250 */ ··· 1468 */ 1469 mpt_detect_bound_ports(ioc, pdev); 1470 1471 - if ((r = mpt_do_ioc_recovery(ioc, 1472 - MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) { 1473 printk(KERN_WARNING MYNAM 1474 ": WARNING - %s did not initialize properly! (%d)\n", 1475 ioc->name, r); ··· 6357 EXPORT_SYMBOL(mpt_alloc_fw_memory); 6358 EXPORT_SYMBOL(mpt_free_fw_memory); 6359 EXPORT_SYMBOL(mptbase_sas_persist_operation); 6360 6361 6362 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
··· 1118 return -1; 1119 } 1120 1121 + int 1122 + mpt_alt_ioc_wait(MPT_ADAPTER *ioc) 1123 + { 1124 + int loop_count = 30 * 4; /* Wait 30 seconds */ 1125 + int status = -1; /* -1 means failed to get board READY */ 1126 + 1127 + do { 1128 + spin_lock(&ioc->initializing_hba_lock); 1129 + if (ioc->initializing_hba_lock_flag == 0) { 1130 + ioc->initializing_hba_lock_flag=1; 1131 + spin_unlock(&ioc->initializing_hba_lock); 1132 + status = 0; 1133 + break; 1134 + } 1135 + spin_unlock(&ioc->initializing_hba_lock); 1136 + set_current_state(TASK_INTERRUPTIBLE); 1137 + schedule_timeout(HZ/4); 1138 + } while (--loop_count); 1139 + 1140 + return status; 1141 + } 1142 + 1143 + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1144 + /* 1145 + * mpt_bringup_adapter - This is a wrapper function for mpt_do_ioc_recovery 1146 + * @ioc: Pointer to MPT adapter structure 1147 + * @sleepFlag: Use schedule if CAN_SLEEP else use udelay. 1148 + * 1149 + * This routine performs all the steps necessary to bring the IOC 1150 + * to a OPERATIONAL state. 1151 + * 1152 + * Special Note: This function was added with spin lock's so as to allow 1153 + * the dv(domain validation) work thread to succeed on the other channel 1154 + * that maybe occuring at the same time when this function is called. 1155 + * Without this lock, the dv would fail when message frames were 1156 + * requested during hba bringup on the alternate ioc. 1157 + */ 1158 + static int 1159 + mpt_bringup_adapter(MPT_ADAPTER *ioc, int sleepFlag) 1160 + { 1161 + int r; 1162 + 1163 + if(ioc->alt_ioc) { 1164 + if((r=mpt_alt_ioc_wait(ioc->alt_ioc)!=0)) 1165 + return r; 1166 + } 1167 + 1168 + r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 1169 + CAN_SLEEP); 1170 + 1171 + if(ioc->alt_ioc) { 1172 + spin_lock(&ioc->alt_ioc->initializing_hba_lock); 1173 + ioc->alt_ioc->initializing_hba_lock_flag=0; 1174 + spin_unlock(&ioc->alt_ioc->initializing_hba_lock); 1175 + } 1176 + 1177 + return r; 1178 + } 1179 + 1180 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1181 /* 1182 * mpt_attach - Install a PCI intelligent MPT adapter. ··· 1186 ioc->pcidev = pdev; 1187 ioc->diagPending = 0; 1188 spin_lock_init(&ioc->diagLock); 1189 + spin_lock_init(&ioc->initializing_hba_lock); 1190 1191 /* Initialize the event logging. 1192 */ ··· 1408 */ 1409 mpt_detect_bound_ports(ioc, pdev); 1410 1411 + if ((r = mpt_bringup_adapter(ioc, CAN_SLEEP)) != 0){ 1412 printk(KERN_WARNING MYNAM 1413 ": WARNING - %s did not initialize properly! (%d)\n", 1414 ioc->name, r); ··· 6298 EXPORT_SYMBOL(mpt_alloc_fw_memory); 6299 EXPORT_SYMBOL(mpt_free_fw_memory); 6300 EXPORT_SYMBOL(mptbase_sas_persist_operation); 6301 + EXPORT_SYMBOL(mpt_alt_ioc_wait); 6302 6303 6304 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+3
drivers/message/fusion/mptbase.h
··· 611 int DoneCtx; 612 int TaskCtx; 613 int InternalCtx; 614 struct list_head list; 615 struct net_device *netdev; 616 struct list_head sas_topology; ··· 1003 extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 1004 extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 1005 extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 1006 1007 /* 1008 * Public data decl's...
··· 611 int DoneCtx; 612 int TaskCtx; 613 int InternalCtx; 614 + spinlock_t initializing_hba_lock; 615 + int initializing_hba_lock_flag; 616 struct list_head list; 617 struct net_device *netdev; 618 struct list_head sas_topology; ··· 1001 extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 1002 extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 1003 extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 1004 + extern int mpt_alt_ioc_wait(MPT_ADAPTER *ioc); 1005 1006 /* 1007 * Public data decl's...
+10
drivers/message/fusion/mptscsih.c
··· 4162 } 4163 } 4164 4165 if (mptscsih_doDv(hd, 0, id) == 1) { 4166 /* Untagged device was busy, try again 4167 */ ··· 4178 */ 4179 hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING); 4180 } 4181 4182 if (isPhysDisk) { 4183 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
··· 4162 } 4163 } 4164 4165 + if(mpt_alt_ioc_wait(hd->ioc)!=0) { 4166 + ddvprintk((MYIOC_s_WARN_FMT "alt_ioc busy!\n", 4167 + hd->ioc->name)); 4168 + continue; 4169 + } 4170 + 4171 if (mptscsih_doDv(hd, 0, id) == 1) { 4172 /* Untagged device was busy, try again 4173 */ ··· 4172 */ 4173 hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING); 4174 } 4175 + 4176 + spin_lock(&hd->ioc->initializing_hba_lock); 4177 + hd->ioc->initializing_hba_lock_flag=0; 4178 + spin_unlock(&hd->ioc->initializing_hba_lock); 4179 4180 if (isPhysDisk) { 4181 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+14
drivers/s390/scsi/zfcp_aux.c
··· 996 spin_lock_init(&adapter->fsf_req_list_lock); 997 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 998 999 /* initialize abort lock */ 1000 rwlock_init(&adapter->abort_lock); 1001
··· 996 spin_lock_init(&adapter->fsf_req_list_lock); 997 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 998 999 + /* initialize debug locks */ 1000 + 1001 + spin_lock_init(&adapter->erp_dbf_lock); 1002 + spin_lock_init(&adapter->hba_dbf_lock); 1003 + spin_lock_init(&adapter->san_dbf_lock); 1004 + spin_lock_init(&adapter->scsi_dbf_lock); 1005 + 1006 + /* initialize error recovery stuff */ 1007 + 1008 + rwlock_init(&adapter->erp_lock); 1009 + sema_init(&adapter->erp_ready_sem, 0); 1010 + INIT_LIST_HEAD(&adapter->erp_ready_head); 1011 + INIT_LIST_HEAD(&adapter->erp_running_head); 1012 + 1013 /* initialize abort lock */ 1014 rwlock_init(&adapter->abort_lock); 1015
-4
drivers/s390/scsi/zfcp_dbf.c
··· 926 char dbf_name[DEBUG_MAX_NAME_LEN]; 927 928 /* debug feature area which records recovery activity */ 929 - spin_lock_init(&adapter->erp_dbf_lock); 930 sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter)); 931 adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2, 932 sizeof(struct zfcp_erp_dbf_record)); ··· 935 debug_set_level(adapter->erp_dbf, 3); 936 937 /* debug feature area which records HBA (FSF and QDIO) conditions */ 938 - spin_lock_init(&adapter->hba_dbf_lock); 939 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter)); 940 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, 941 sizeof(struct zfcp_hba_dbf_record)); ··· 945 debug_set_level(adapter->hba_dbf, 3); 946 947 /* debug feature area which records SAN command failures and recovery */ 948 - spin_lock_init(&adapter->san_dbf_lock); 949 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter)); 950 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, 951 sizeof(struct zfcp_san_dbf_record)); ··· 955 debug_set_level(adapter->san_dbf, 6); 956 957 /* debug feature area which records SCSI command failures and recovery */ 958 - spin_lock_init(&adapter->scsi_dbf_lock); 959 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter)); 960 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, 961 sizeof(struct zfcp_scsi_dbf_record));
··· 926 char dbf_name[DEBUG_MAX_NAME_LEN]; 927 928 /* debug feature area which records recovery activity */ 929 sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter)); 930 adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2, 931 sizeof(struct zfcp_erp_dbf_record)); ··· 936 debug_set_level(adapter->erp_dbf, 3); 937 938 /* debug feature area which records HBA (FSF and QDIO) conditions */ 939 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter)); 940 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, 941 sizeof(struct zfcp_hba_dbf_record)); ··· 947 debug_set_level(adapter->hba_dbf, 3); 948 949 /* debug feature area which records SAN command failures and recovery */ 950 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter)); 951 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, 952 sizeof(struct zfcp_san_dbf_record)); ··· 958 debug_set_level(adapter->san_dbf, 6); 959 960 /* debug feature area which records SCSI command failures and recovery */ 961 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter)); 962 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, 963 sizeof(struct zfcp_scsi_dbf_record));
+32 -62
drivers/s390/scsi/zfcp_erp.c
··· 1071 1072 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1073 1074 - rwlock_init(&adapter->erp_lock); 1075 - INIT_LIST_HEAD(&adapter->erp_ready_head); 1076 - INIT_LIST_HEAD(&adapter->erp_running_head); 1077 - sema_init(&adapter->erp_ready_sem, 0); 1078 - 1079 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1080 if (retval < 0) { 1081 ZFCP_LOG_NORMAL("error: creation of erp thread failed for " ··· 2243 return retval; 2244 } 2245 2246 - /* 2247 - * function: zfcp_fsf_init 2248 - * 2249 - * purpose: initializes FSF operation for the specified adapter 2250 - * 2251 - * returns: 0 - succesful initialization of FSF operation 2252 - * !0 - failed to initialize FSF operation 2253 - */ 2254 static int 2255 zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2256 { 2257 - int xconfig, xport; 2258 2259 - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2260 - &erp_action->adapter->status)) { 2261 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2262 atomic_set(&erp_action->adapter->erp_counter, 0); 2263 return ZFCP_ERP_FAILED; 2264 } 2265 2266 - xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2267 - xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2268 - if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED)) 2269 return ZFCP_ERP_FAILED; 2270 2271 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); ··· 2351 static int 2352 zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2353 { 2354 - int retval = ZFCP_ERP_SUCCEEDED; 2355 int retries; 2356 int sleep; 2357 struct zfcp_adapter *adapter = erp_action->adapter; 2358 2359 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2360 2361 - for (retries = 0; ; retries++) { 2362 - ZFCP_LOG_DEBUG("Doing exchange port data\n"); 2363 zfcp_erp_action_to_running(erp_action); 2364 zfcp_erp_timeout_init(erp_action); 2365 - if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) { 2366 - retval = ZFCP_ERP_FAILED; 2367 - debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf"); 2368 - ZFCP_LOG_INFO("error: initiation of exchange of " 2369 - "port data failed for adapter %s\n", 2370 - zfcp_get_busid_by_adapter(adapter)); 2371 - break; 2372 } 2373 - debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok"); 2374 - ZFCP_LOG_DEBUG("Xchange underway\n"); 2375 2376 - /* 2377 - * Why this works: 2378 - * Both the normal completion handler as well as the timeout 2379 - * handler will do an 'up' when the 'exchange port data' 2380 - * request completes or times out. Thus, the signal to go on 2381 - * won't be lost utilizing this semaphore. 2382 - * Furthermore, this 'adapter_reopen' action is 2383 - * guaranteed to be the only action being there (highest action 2384 - * which prevents other actions from being created). 2385 - * Resulting from that, the wake signal recognized here 2386 - * _must_ be the one belonging to the 'exchange port 2387 - * data' request. 2388 - */ 2389 down(&adapter->erp_ready_sem); 2390 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2391 ZFCP_LOG_INFO("error: exchange of port data " ··· 2381 zfcp_get_busid_by_adapter(adapter)); 2382 break; 2383 } 2384 - 2385 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2386 &adapter->status)) 2387 break; 2388 2389 - ZFCP_LOG_DEBUG("host connection still initialising... " 2390 - "waiting and retrying...\n"); 2391 - /* sleep a little bit before retry */ 2392 - sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ? 2393 - ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP : 2394 - ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2395 - msleep(jiffies_to_msecs(sleep)); 2396 - } 2397 2398 - if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2399 - &adapter->status)) { 2400 - ZFCP_LOG_INFO("error: exchange of port data for " 2401 - "adapter %s failed\n", 2402 - zfcp_get_busid_by_adapter(adapter)); 2403 - retval = ZFCP_ERP_FAILED; 2404 - } 2405 - 2406 - return retval; 2407 } 2408 2409 /*
··· 1071 1072 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1073 1074 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1075 if (retval < 0) { 1076 ZFCP_LOG_NORMAL("error: creation of erp thread failed for " ··· 2248 return retval; 2249 } 2250 2251 static int 2252 zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2253 { 2254 + int retval; 2255 2256 + if ((atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2257 + &erp_action->adapter->status)) && 2258 + (erp_action->adapter->adapter_features & 2259 + FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2260 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2261 atomic_set(&erp_action->adapter->erp_counter, 0); 2262 return ZFCP_ERP_FAILED; 2263 } 2264 2265 + retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2266 + if (retval == ZFCP_ERP_FAILED) 2267 + return ZFCP_ERP_FAILED; 2268 + 2269 + retval = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action); 2270 + if (retval == ZFCP_ERP_FAILED) 2271 return ZFCP_ERP_FAILED; 2272 2273 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action); ··· 2359 static int 2360 zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) 2361 { 2362 + int ret; 2363 int retries; 2364 int sleep; 2365 struct zfcp_adapter *adapter = erp_action->adapter; 2366 2367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); 2368 2369 + retries = 0; 2370 + do { 2371 + write_lock(&adapter->erp_lock); 2372 zfcp_erp_action_to_running(erp_action); 2373 + write_unlock(&adapter->erp_lock); 2374 zfcp_erp_timeout_init(erp_action); 2375 + ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2376 + if (ret == -EOPNOTSUPP) { 2377 + debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2378 + return ZFCP_ERP_SUCCEEDED; 2379 + } else if (ret) { 2380 + debug_text_event(adapter->erp_dbf, 3, "a_xport_failed"); 2381 + return ZFCP_ERP_FAILED; 2382 } 2383 + debug_text_event(adapter->erp_dbf, 6, "a_xport_ok"); 2384 2385 down(&adapter->erp_ready_sem); 2386 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 2387 ZFCP_LOG_INFO("error: exchange of port data " ··· 2401 zfcp_get_busid_by_adapter(adapter)); 2402 break; 2403 } 2404 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2405 &adapter->status)) 2406 break; 2407 2408 + if (retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES) { 2409 + sleep = ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP; 2410 + retries++; 2411 + } else 2412 + sleep = ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP; 2413 + schedule_timeout(sleep); 2414 + } while (1); 2415 2416 + return ZFCP_ERP_SUCCEEDED; 2417 } 2418 2419 /*
+63 -47
drivers/s390/scsi/zfcp_fsf.c
··· 554 zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, 555 struct fsf_link_down_info *link_down) 556 { 557 switch (link_down->error_code) { 558 case FSF_PSQ_LINK_NO_LIGHT: 559 ZFCP_LOG_NORMAL("The local link to adapter %s is down " ··· 645 link_down->explanation_code, 646 link_down->vendor_specific_code); 647 648 - if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 649 - &adapter->status)) { 650 - atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 651 - &adapter->status); 652 - switch (link_down->error_code) { 653 - case FSF_PSQ_LINK_NO_LIGHT: 654 - case FSF_PSQ_LINK_WRAP_PLUG: 655 - case FSF_PSQ_LINK_NO_FCP: 656 - case FSF_PSQ_LINK_FIRMWARE_UPDATE: 657 - zfcp_erp_adapter_reopen(adapter, 0); 658 - break; 659 - default: 660 - zfcp_erp_adapter_failed(adapter); 661 - } 662 } 663 } 664 ··· 925 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 926 ZFCP_LOG_INFO("Physical link to adapter %s is down\n", 927 zfcp_get_busid_by_adapter(adapter)); 928 break; 929 case FSF_STATUS_READ_SUB_FDISC_FAILED: 930 ZFCP_LOG_INFO("Local link to adapter %s is down " 931 "due to failed FDISC login\n", 932 - zfcp_get_busid_by_adapter(adapter)); 933 break; 934 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 935 ZFCP_LOG_INFO("Local link to adapter %s is down " 936 "due to firmware update on adapter\n", 937 zfcp_get_busid_by_adapter(adapter)); 938 break; 939 default: 940 ZFCP_LOG_INFO("Local link to adapter %s is down " 941 "due to unknown reason\n", 942 zfcp_get_busid_by_adapter(adapter)); 943 }; 944 - zfcp_fsf_link_down_info_eval(adapter, 945 - (struct fsf_link_down_info *) &status_buffer->payload); 946 break; 947 948 case FSF_STATUS_READ_LINK_UP: 949 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. " 950 - "Restarting operations on this adapter\n", 951 - zfcp_get_busid_by_adapter(adapter)); 952 /* All ports should be marked as ready to run again */ 953 zfcp_erp_modify_adapter_status(adapter, 954 ZFCP_STATUS_COMMON_RUNNING, ··· 2203 return -EOPNOTSUPP; 2204 } 2205 2206 - timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL); 2207 - if (!timer) 2208 - return -ENOMEM; 2209 - 2210 /* setup new FSF request */ 2211 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 2212 - 0, 0, &lock_flags, &fsf_req); 2213 if (retval < 0) { 2214 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 2215 "exchange port data request for" ··· 2214 zfcp_get_busid_by_adapter(adapter)); 2215 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2216 lock_flags); 2217 - goto out; 2218 - } 2219 - 2220 - if (erp_action) { 2221 - erp_action->fsf_req = fsf_req; 2222 - fsf_req->erp_action = erp_action; 2223 } 2224 2225 if (data) 2226 - fsf_req->data = (unsigned long) data; 2227 2228 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2229 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2230 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2231 2232 - init_timer(timer); 2233 - timer->function = zfcp_fsf_request_timeout_handler; 2234 - timer->data = (unsigned long) adapter; 2235 - timer->expires = ZFCP_FSF_REQUEST_TIMEOUT; 2236 2237 retval = zfcp_fsf_req_send(fsf_req, timer); 2238 if (retval) { ··· 2250 zfcp_fsf_req_free(fsf_req); 2251 if (erp_action) 2252 erp_action->fsf_req = NULL; 2253 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2254 lock_flags); 2255 - goto out; 2256 } 2257 2258 - ZFCP_LOG_DEBUG("Exchange Port Data request initiated (adapter %s)\n", 2259 - zfcp_get_busid_by_adapter(adapter)); 2260 2261 - write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2262 - lock_flags); 2263 - 2264 - wait_event(fsf_req->completion_wq, 2265 - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2266 - del_timer_sync(timer); 2267 - zfcp_fsf_req_free(fsf_req); 2268 - out: 2269 - kfree(timer); 2270 return retval; 2271 } 2272
··· 554 zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter, 555 struct fsf_link_down_info *link_down) 556 { 557 + if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 558 + &adapter->status)) 559 + return; 560 + 561 + atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 562 + 563 + if (link_down == NULL) { 564 + zfcp_erp_adapter_reopen(adapter, 0); 565 + return; 566 + } 567 + 568 switch (link_down->error_code) { 569 case FSF_PSQ_LINK_NO_LIGHT: 570 ZFCP_LOG_NORMAL("The local link to adapter %s is down " ··· 634 link_down->explanation_code, 635 link_down->vendor_specific_code); 636 637 + switch (link_down->error_code) { 638 + case FSF_PSQ_LINK_NO_LIGHT: 639 + case FSF_PSQ_LINK_WRAP_PLUG: 640 + case FSF_PSQ_LINK_NO_FCP: 641 + case FSF_PSQ_LINK_FIRMWARE_UPDATE: 642 + zfcp_erp_adapter_reopen(adapter, 0); 643 + break; 644 + default: 645 + zfcp_erp_adapter_failed(adapter); 646 } 647 } 648 ··· 919 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 920 ZFCP_LOG_INFO("Physical link to adapter %s is down\n", 921 zfcp_get_busid_by_adapter(adapter)); 922 + zfcp_fsf_link_down_info_eval(adapter, 923 + (struct fsf_link_down_info *) 924 + &status_buffer->payload); 925 break; 926 case FSF_STATUS_READ_SUB_FDISC_FAILED: 927 ZFCP_LOG_INFO("Local link to adapter %s is down " 928 "due to failed FDISC login\n", 929 + zfcp_get_busid_by_adapter(adapter)); 930 + zfcp_fsf_link_down_info_eval(adapter, 931 + (struct fsf_link_down_info *) 932 + &status_buffer->payload); 933 break; 934 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 935 ZFCP_LOG_INFO("Local link to adapter %s is down " 936 "due to firmware update on adapter\n", 937 zfcp_get_busid_by_adapter(adapter)); 938 + zfcp_fsf_link_down_info_eval(adapter, NULL); 939 break; 940 default: 941 ZFCP_LOG_INFO("Local link to adapter %s is down " 942 "due to unknown reason\n", 943 zfcp_get_busid_by_adapter(adapter)); 944 + zfcp_fsf_link_down_info_eval(adapter, NULL); 945 }; 946 break; 947 948 case FSF_STATUS_READ_LINK_UP: 949 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. " 950 + "Restarting operations on this adapter\n", 951 + zfcp_get_busid_by_adapter(adapter)); 952 /* All ports should be marked as ready to run again */ 953 zfcp_erp_modify_adapter_status(adapter, 954 ZFCP_STATUS_COMMON_RUNNING, ··· 2191 return -EOPNOTSUPP; 2192 } 2193 2194 /* setup new FSF request */ 2195 retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 2196 + erp_action ? ZFCP_REQ_AUTO_CLEANUP : 0, 2197 + 0, &lock_flags, &fsf_req); 2198 if (retval < 0) { 2199 ZFCP_LOG_INFO("error: Out of resources. Could not create an " 2200 "exchange port data request for" ··· 2205 zfcp_get_busid_by_adapter(adapter)); 2206 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2207 lock_flags); 2208 + return retval; 2209 } 2210 2211 if (data) 2212 + fsf_req->data = (unsigned long) data; 2213 2214 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2215 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2216 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2217 2218 + if (erp_action) { 2219 + erp_action->fsf_req = fsf_req; 2220 + fsf_req->erp_action = erp_action; 2221 + timer = &erp_action->timer; 2222 + } else { 2223 + timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC); 2224 + if (!timer) { 2225 + write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2226 + lock_flags); 2227 + zfcp_fsf_req_free(fsf_req); 2228 + return -ENOMEM; 2229 + } 2230 + init_timer(timer); 2231 + timer->function = zfcp_fsf_request_timeout_handler; 2232 + timer->data = (unsigned long) adapter; 2233 + timer->expires = ZFCP_FSF_REQUEST_TIMEOUT; 2234 + } 2235 2236 retval = zfcp_fsf_req_send(fsf_req, timer); 2237 if (retval) { ··· 2233 zfcp_fsf_req_free(fsf_req); 2234 if (erp_action) 2235 erp_action->fsf_req = NULL; 2236 + else 2237 + kfree(timer); 2238 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2239 lock_flags); 2240 + return retval; 2241 } 2242 2243 + write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 2244 2245 + if (!erp_action) { 2246 + wait_event(fsf_req->completion_wq, 2247 + fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2248 + del_timer_sync(timer); 2249 + zfcp_fsf_req_free(fsf_req); 2250 + kfree(timer); 2251 + } 2252 return retval; 2253 } 2254
+1 -1
drivers/s390/scsi/zfcp_scsi.c
··· 179 struct zfcp_adapter *adapter; 180 struct zfcp_unit *unit; 181 unsigned long flags; 182 - int retval = -ENODEV; 183 184 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 185 if (!adapter)
··· 179 struct zfcp_adapter *adapter; 180 struct zfcp_unit *unit; 181 unsigned long flags; 182 + int retval = -ENXIO; 183 184 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 185 if (!adapter)
+2
drivers/scsi/aacraid/linit.c
··· 325 * translations ( 64/32, 128/32, 255/63 ). 326 */ 327 buf = scsi_bios_ptable(bdev); 328 if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) { 329 struct partition *first = (struct partition * )buf; 330 struct partition *entry = first;
··· 325 * translations ( 64/32, 128/32, 255/63 ). 326 */ 327 buf = scsi_bios_ptable(bdev); 328 + if (!buf) 329 + return 0; 330 if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) { 331 struct partition *first = (struct partition * )buf; 332 struct partition *entry = first;
+1 -1
drivers/scsi/aic7xxx/aic79xx_osm.c
··· 2105 scmd_id(cmd), 2106 scmd_channel(cmd) + 'A', 2107 CAM_LUN_WILDCARD, 2108 - SCB_LIST_NULL, ROLE_INITIATOR) == 0) 2109 break; 2110 } 2111 }
··· 2105 scmd_id(cmd), 2106 scmd_channel(cmd) + 'A', 2107 CAM_LUN_WILDCARD, 2108 + SCB_LIST_NULL, ROLE_INITIATOR)) 2109 break; 2110 } 2111 }
+1 -1
drivers/scsi/aic7xxx/aic7xxx_osm.c
··· 2169 if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd), 2170 scmd_channel(cmd) + 'A', 2171 CAM_LUN_WILDCARD, 2172 - SCB_LIST_NULL, ROLE_INITIATOR) == 0) 2173 break; 2174 } 2175 }
··· 2169 if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd), 2170 scmd_channel(cmd) + 'A', 2171 CAM_LUN_WILDCARD, 2172 + SCB_LIST_NULL, ROLE_INITIATOR)) 2173 break; 2174 } 2175 }
+19 -9
drivers/scsi/scsi_transport_spi.c
··· 812 if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) 813 return; 814 815 - /* see if the device has an echo buffer. If it does we can 816 - * do the SPI pattern write tests */ 817 - 818 - len = 0; 819 - if (scsi_device_dt(sdev)) 820 - len = spi_dv_device_get_echo_buffer(sdev, buffer); 821 822 retry: 823 ··· 838 if (spi_min_period(starget) == 8) 839 DV_SET(pcomp_en, 1); 840 } 841 842 - if (len == 0) { 843 starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); 844 - spi_dv_retrain(sdev, buffer, buffer + len, 845 - spi_dv_device_compare_inquiry); 846 return; 847 } 848
··· 812 if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) 813 return; 814 815 + /* len == -1 is the signal that we need to ascertain the 816 + * presence of an echo buffer before trying to use it. len == 817 + * 0 means we don't have an echo buffer */ 818 + len = -1; 819 820 retry: 821 ··· 840 if (spi_min_period(starget) == 8) 841 DV_SET(pcomp_en, 1); 842 } 843 + /* Do the read only INQUIRY tests */ 844 + spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 845 + spi_dv_device_compare_inquiry); 846 + /* See if we actually managed to negotiate and sustain DT */ 847 + if (i->f->get_dt) 848 + i->f->get_dt(starget); 849 850 + /* see if the device has an echo buffer. If it does we can do 851 + * the SPI pattern write tests. Because of some broken 852 + * devices, we *only* try this on a device that has actually 853 + * negotiated DT */ 854 + 855 + if (len == -1 && spi_dt(starget)) 856 + len = spi_dv_device_get_echo_buffer(sdev, buffer); 857 + 858 + if (len <= 0) { 859 starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); 860 return; 861 } 862
+3 -3
drivers/scsi/sg.c
··· 1860 unlock_page(pages[j]); */ 1861 res = 0; 1862 out_unmap: 1863 - if (res > 0) 1864 for (j=0; j < res; j++) 1865 page_cache_release(pages[j]); 1866 kfree(pages); 1867 return res; 1868 } ··· 1880 for (i=0; i < nr_pages; i++) { 1881 struct page *page = sgl[i].page; 1882 1883 - /* XXX: just for debug. Remove when PageReserved is removed */ 1884 - BUG_ON(PageReserved(page)); 1885 if (dirtied) 1886 SetPageDirty(page); 1887 /* unlock_page(page); */
··· 1860 unlock_page(pages[j]); */ 1861 res = 0; 1862 out_unmap: 1863 + if (res > 0) { 1864 for (j=0; j < res; j++) 1865 page_cache_release(pages[j]); 1866 + res = 0; 1867 + } 1868 kfree(pages); 1869 return res; 1870 } ··· 1878 for (i=0; i < nr_pages; i++) { 1879 struct page *page = sgl[i].page; 1880 1881 if (dirtied) 1882 SetPageDirty(page); 1883 /* unlock_page(page); */
+1 -2
drivers/scsi/st.c
··· 4509 if (res > 0) { 4510 for (j=0; j < res; j++) 4511 page_cache_release(pages[j]); 4512 } 4513 kfree(pages); 4514 return res; ··· 4525 for (i=0; i < nr_pages; i++) { 4526 struct page *page = sgl[i].page; 4527 4528 - /* XXX: just for debug. Remove when PageReserved is removed */ 4529 - BUG_ON(PageReserved(page)); 4530 if (dirtied) 4531 SetPageDirty(page); 4532 /* FIXME: cache flush missing for rw==READ
··· 4509 if (res > 0) { 4510 for (j=0; j < res; j++) 4511 page_cache_release(pages[j]); 4512 + res = 0; 4513 } 4514 kfree(pages); 4515 return res; ··· 4524 for (i=0; i < nr_pages; i++) { 4525 struct page *page = sgl[i].page; 4526 4527 if (dirtied) 4528 SetPageDirty(page); 4529 /* FIXME: cache flush missing for rw==READ
+4 -1
drivers/scsi/sym53c8xx_2/sym_glue.c
··· 2086 tp->tgoal.check_nego = 1; 2087 } 2088 2089 static void sym2_set_iu(struct scsi_target *starget, int iu) 2090 { 2091 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); ··· 2112 tp->tgoal.qas = 0; 2113 tp->tgoal.check_nego = 1; 2114 } 2115 - 2116 2117 static struct spi_function_template sym2_transport_functions = { 2118 .set_offset = sym2_set_offset, ··· 2123 .show_width = 1, 2124 .set_dt = sym2_set_dt, 2125 .show_dt = 1, 2126 .set_iu = sym2_set_iu, 2127 .show_iu = 1, 2128 .set_qas = sym2_set_qas, 2129 .show_qas = 1, 2130 .get_signalling = sym2_get_signalling, 2131 }; 2132
··· 2086 tp->tgoal.check_nego = 1; 2087 } 2088 2089 + #if 0 2090 static void sym2_set_iu(struct scsi_target *starget, int iu) 2091 { 2092 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); ··· 2111 tp->tgoal.qas = 0; 2112 tp->tgoal.check_nego = 1; 2113 } 2114 + #endif 2115 2116 static struct spi_function_template sym2_transport_functions = { 2117 .set_offset = sym2_set_offset, ··· 2122 .show_width = 1, 2123 .set_dt = sym2_set_dt, 2124 .show_dt = 1, 2125 + #if 0 2126 .set_iu = sym2_set_iu, 2127 .show_iu = 1, 2128 .set_qas = sym2_set_qas, 2129 .show_qas = 1, 2130 + #endif 2131 .get_signalling = sym2_get_signalling, 2132 }; 2133