Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes

- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390)
- Convert byte count to word count when calling __iowrite32_copy (CR 122550)
- Checked the ERR1 and ERR2 registers for error attention due to SLI
Port state affected by forced debug dump. (CR 122986, 122426, 124859)
- Use the lpfc_readl routine instead of the readl for the port status
register read in lpfc_handle_eratt_s4 (CR 125403)
- Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing
a pci function reset (CR 125124, 125168, 125572, 125622)
- Zero out the HBQ when it is allocated (CR 125663)
- Alter port reset log messages to indicate error type (CR 125989)
- Added proper NULL pointer checking to all the places that accessing
the queue memory (CR 125832)

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

James Smart and committed by
James Bottomley
2e90f4b5 df9e1b59

+350 -139
+3 -2
drivers/scsi/lpfc/lpfc_compat.h
··· 1 1 /******************************************************************* 2 2 * This file is part of the Emulex Linux Device Driver for * 3 3 * Fibre Channel Host Bus Adapters. * 4 - * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 + * Copyright (C) 2004-2011 Emulex. All rights reserved. * 5 5 * EMULEX and SLI are trademarks of Emulex. * 6 6 * www.emulex.com * 7 7 * * ··· 82 82 static inline void 83 83 lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes) 84 84 { 85 - __iowrite32_copy(dest, src, bytes); 85 + /* convert bytes in argument list to word count for copy function */ 86 + __iowrite32_copy(dest, src, bytes / sizeof(uint32_t)); 86 87 } 87 88 88 89 static inline void
+113 -59
drivers/scsi/lpfc/lpfc_debugfs.c
··· 1997 1997 /* Get slow-path event queue information */ 1998 1998 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 1999 1999 "Slow-path EQ information:\n"); 2000 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2000 + if (phba->sli4_hba.sp_eq) { 2001 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2001 2002 "\tEQID[%02d], " 2002 2003 "QE-COUNT[%04d], QE-SIZE[%04d], " 2003 2004 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", ··· 2007 2006 phba->sli4_hba.sp_eq->entry_size, 2008 2007 phba->sli4_hba.sp_eq->host_index, 2009 2008 phba->sli4_hba.sp_eq->hba_index); 2009 + } 2010 2010 2011 2011 /* Get fast-path event queue information */ 2012 2012 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2013 2013 "Fast-path EQ information:\n"); 2014 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { 2015 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2014 + if (phba->sli4_hba.fp_eq) { 2015 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 2016 + fcp_qidx++) { 2017 + if (phba->sli4_hba.fp_eq[fcp_qidx]) { 2018 + len += snprintf(pbuffer+len, 2019 + LPFC_QUE_INFO_GET_BUF_SIZE-len, 2016 2020 "\tEQID[%02d], " 2017 2021 "QE-COUNT[%04d], QE-SIZE[%04d], " 2018 2022 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", ··· 2026 2020 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2027 2021 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2028 2022 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2023 + } 2024 + } 2029 2025 } 2030 2026 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2031 2027 2032 2028 /* Get mailbox complete queue information */ 2033 2029 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2034 2030 "Slow-path MBX CQ information:\n"); 2035 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2031 + if (phba->sli4_hba.mbx_cq) { 2032 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2036 2033 "Associated EQID[%02d]:\n", 2037 2034 phba->sli4_hba.mbx_cq->assoc_qid); 2038 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2035 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2039 2036 "\tCQID[%02d], " 2040 2037 "QE-COUNT[%04d], QE-SIZE[%04d], " 2041 2038 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", ··· 2047 2038 phba->sli4_hba.mbx_cq->entry_size, 2048 2039 phba->sli4_hba.mbx_cq->host_index, 2049 2040 phba->sli4_hba.mbx_cq->hba_index); 2041 + } 2050 2042 2051 2043 /* Get slow-path complete queue information */ 2052 2044 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2053 2045 "Slow-path ELS CQ information:\n"); 2054 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2046 + if (phba->sli4_hba.els_cq) { 2047 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2055 2048 "Associated EQID[%02d]:\n", 2056 2049 phba->sli4_hba.els_cq->assoc_qid); 2057 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2050 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2058 2051 "\tCQID [%02d], " 2059 2052 "QE-COUNT[%04d], QE-SIZE[%04d], " 2060 2053 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", ··· 2065 2054 phba->sli4_hba.els_cq->entry_size, 2066 2055 phba->sli4_hba.els_cq->host_index, 2067 2056 phba->sli4_hba.els_cq->hba_index); 2057 + } 2068 2058 2069 2059 /* Get fast-path complete queue information */ 2070 2060 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2071 2061 "Fast-path FCP CQ information:\n"); 2072 2062 fcp_qidx = 0; 2073 - do { 2074 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2063 + if (phba->sli4_hba.fcp_cq) { 2064 + do { 2065 + if (phba->sli4_hba.fcp_cq[fcp_qidx]) { 2066 + len += snprintf(pbuffer+len, 2067 + LPFC_QUE_INFO_GET_BUF_SIZE-len, 2075 2068 "Associated EQID[%02d]:\n", 2076 2069 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); 2077 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2070 + len += snprintf(pbuffer+len, 2071 + LPFC_QUE_INFO_GET_BUF_SIZE-len, 2078 2072 "\tCQID[%02d], " 2079 2073 "QE-COUNT[%04d], QE-SIZE[%04d], " 2080 2074 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", ··· 2088 2072 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 2089 2073 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2090 2074 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2091 - } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2092 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2075 + } 2076 + } while (++fcp_qidx < phba->cfg_fcp_eq_count); 2077 + len += snprintf(pbuffer+len, 2078 + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2079 + } 2093 2080 2094 2081 /* Get mailbox queue information */ 2095 2082 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2096 2083 "Slow-path MBX MQ information:\n"); 2097 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2084 + if (phba->sli4_hba.mbx_wq) { 2085 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2098 2086 "Associated CQID[%02d]:\n", 2099 2087 phba->sli4_hba.mbx_wq->assoc_qid); 2100 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2088 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2101 2089 "\tWQID[%02d], " 2102 2090 "QE-COUNT[%04d], QE-SIZE[%04d], " 2103 2091 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", ··· 2110 2090 phba->sli4_hba.mbx_wq->entry_size, 2111 2091 phba->sli4_hba.mbx_wq->host_index, 2112 2092 phba->sli4_hba.mbx_wq->hba_index); 2093 + } 2113 2094 2114 2095 /* Get slow-path work queue information */ 2115 2096 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2116 2097 "Slow-path ELS WQ information:\n"); 2117 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2098 + if (phba->sli4_hba.els_wq) { 2099 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2118 2100 "Associated CQID[%02d]:\n", 2119 2101 phba->sli4_hba.els_wq->assoc_qid); 2120 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2102 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2121 2103 "\tWQID[%02d], " 2122 2104 "QE-COUNT[%04d], QE-SIZE[%04d], " 2123 2105 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", ··· 2128 2106 phba->sli4_hba.els_wq->entry_size, 2129 2107 phba->sli4_hba.els_wq->host_index, 2130 2108 phba->sli4_hba.els_wq->hba_index); 2109 + } 2131 2110 2132 2111 /* Get fast-path work queue information */ 2133 2112 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2134 2113 "Fast-path FCP WQ information:\n"); 2135 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { 2136 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2114 + if (phba->sli4_hba.fcp_wq) { 2115 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 2116 + fcp_qidx++) { 2117 + if (!phba->sli4_hba.fcp_wq[fcp_qidx]) 2118 + continue; 2119 + len += snprintf(pbuffer+len, 2120 + LPFC_QUE_INFO_GET_BUF_SIZE-len, 2137 2121 "Associated CQID[%02d]:\n", 2138 2122 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 2139 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2123 + len += snprintf(pbuffer+len, 2124 + LPFC_QUE_INFO_GET_BUF_SIZE-len, 2140 2125 "\tWQID[%02d], " 2141 2126 "QE-COUNT[%04d], WQE-SIZE[%04d], " 2142 2127 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", ··· 2152 2123 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, 2153 2124 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 2154 2125 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 2126 + } 2127 + len += snprintf(pbuffer+len, 2128 + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2155 2129 } 2156 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); 2157 2130 2158 2131 /* Get receive queue information */ 2159 2132 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2160 2133 "Slow-path RQ information:\n"); 2161 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2134 + if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { 2135 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2162 2136 "Associated CQID[%02d]:\n", 2163 2137 phba->sli4_hba.hdr_rq->assoc_qid); 2164 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2138 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2165 2139 "\tHQID[%02d], " 2166 2140 "QE-COUNT[%04d], QE-SIZE[%04d], " 2167 2141 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", ··· 2173 2141 phba->sli4_hba.hdr_rq->entry_size, 2174 2142 phba->sli4_hba.hdr_rq->host_index, 2175 2143 phba->sli4_hba.hdr_rq->hba_index); 2176 - len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2144 + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2177 2145 "\tDQID[%02d], " 2178 2146 "QE-COUNT[%04d], QE-SIZE[%04d], " 2179 2147 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", ··· 2182 2150 phba->sli4_hba.dat_rq->entry_size, 2183 2151 phba->sli4_hba.dat_rq->host_index, 2184 2152 phba->sli4_hba.dat_rq->hba_index); 2185 - 2153 + } 2186 2154 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2187 2155 } 2188 2156 ··· 2392 2360 switch (quetp) { 2393 2361 case LPFC_IDIAG_EQ: 2394 2362 /* Slow-path event queue */ 2395 - if (phba->sli4_hba.sp_eq->queue_id == queid) { 2363 + if (phba->sli4_hba.sp_eq && 2364 + phba->sli4_hba.sp_eq->queue_id == queid) { 2396 2365 /* Sanity check */ 2397 2366 rc = lpfc_idiag_que_param_check( 2398 2367 phba->sli4_hba.sp_eq, index, count); ··· 2403 2370 goto pass_check; 2404 2371 } 2405 2372 /* Fast-path event queue */ 2406 - for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2407 - if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) { 2408 - /* Sanity check */ 2409 - rc = lpfc_idiag_que_param_check( 2373 + if (phba->sli4_hba.fp_eq) { 2374 + for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) { 2375 + if (phba->sli4_hba.fp_eq[qidx] && 2376 + phba->sli4_hba.fp_eq[qidx]->queue_id == 2377 + queid) { 2378 + /* Sanity check */ 2379 + rc = lpfc_idiag_que_param_check( 2410 2380 phba->sli4_hba.fp_eq[qidx], 2411 2381 index, count); 2412 - if (rc) 2413 - goto error_out; 2414 - idiag.ptr_private = phba->sli4_hba.fp_eq[qidx]; 2415 - goto pass_check; 2382 + if (rc) 2383 + goto error_out; 2384 + idiag.ptr_private = 2385 + phba->sli4_hba.fp_eq[qidx]; 2386 + goto pass_check; 2387 + } 2416 2388 } 2417 2389 } 2418 2390 goto error_out; 2419 2391 break; 2420 2392 case LPFC_IDIAG_CQ: 2421 2393 /* MBX complete queue */ 2422 - if (phba->sli4_hba.mbx_cq->queue_id == queid) { 2394 + if (phba->sli4_hba.mbx_cq && 2395 + phba->sli4_hba.mbx_cq->queue_id == queid) { 2423 2396 /* Sanity check */ 2424 2397 rc = lpfc_idiag_que_param_check( 2425 2398 phba->sli4_hba.mbx_cq, index, count); ··· 2435 2396 goto pass_check; 2436 2397 } 2437 2398 /* ELS complete queue */ 2438 - if (phba->sli4_hba.els_cq->queue_id == queid) { 2399 + if (phba->sli4_hba.els_cq && 2400 + phba->sli4_hba.els_cq->queue_id == queid) { 2439 2401 /* Sanity check */ 2440 2402 rc = lpfc_idiag_que_param_check( 2441 2403 phba->sli4_hba.els_cq, index, count); ··· 2446 2406 goto pass_check; 2447 2407 } 2448 2408 /* FCP complete queue */ 2449 - qidx = 0; 2450 - do { 2451 - if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) { 2452 - /* Sanity check */ 2453 - rc = lpfc_idiag_que_param_check( 2409 + if (phba->sli4_hba.fcp_cq) { 2410 + qidx = 0; 2411 + do { 2412 + if (phba->sli4_hba.fcp_cq[qidx] && 2413 + phba->sli4_hba.fcp_cq[qidx]->queue_id == 2414 + queid) { 2415 + /* Sanity check */ 2416 + rc = lpfc_idiag_que_param_check( 2454 2417 phba->sli4_hba.fcp_cq[qidx], 2455 2418 index, count); 2456 - if (rc) 2457 - goto error_out; 2458 - idiag.ptr_private = 2419 + if (rc) 2420 + goto error_out; 2421 + idiag.ptr_private = 2459 2422 phba->sli4_hba.fcp_cq[qidx]; 2460 - goto pass_check; 2461 - } 2462 - } while (++qidx < phba->cfg_fcp_eq_count); 2423 + goto pass_check; 2424 + } 2425 + } while (++qidx < phba->cfg_fcp_eq_count); 2426 + } 2463 2427 goto error_out; 2464 2428 break; 2465 2429 case LPFC_IDIAG_MQ: 2466 2430 /* MBX work queue */ 2467 - if (phba->sli4_hba.mbx_wq->queue_id == queid) { 2431 + if (phba->sli4_hba.mbx_wq && 2432 + phba->sli4_hba.mbx_wq->queue_id == queid) { 2468 2433 /* Sanity check */ 2469 2434 rc = lpfc_idiag_que_param_check( 2470 2435 phba->sli4_hba.mbx_wq, index, count); ··· 2478 2433 idiag.ptr_private = phba->sli4_hba.mbx_wq; 2479 2434 goto pass_check; 2480 2435 } 2436 + goto error_out; 2481 2437 break; 2482 2438 case LPFC_IDIAG_WQ: 2483 2439 /* ELS work queue */ 2484 - if (phba->sli4_hba.els_wq->queue_id == queid) { 2440 + if (phba->sli4_hba.els_wq && 2441 + phba->sli4_hba.els_wq->queue_id == queid) { 2485 2442 /* Sanity check */ 2486 2443 rc = lpfc_idiag_que_param_check( 2487 2444 phba->sli4_hba.els_wq, index, count); ··· 2493 2446 goto pass_check; 2494 2447 } 2495 2448 /* FCP work queue */ 2496 - for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2497 - if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) { 2498 - /* Sanity check */ 2499 - rc = lpfc_idiag_que_param_check( 2449 + if (phba->sli4_hba.fcp_wq) { 2450 + for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2451 + if (!phba->sli4_hba.fcp_wq[qidx]) 2452 + continue; 2453 + if (phba->sli4_hba.fcp_wq[qidx]->queue_id == 2454 + queid) { 2455 + /* Sanity check */ 2456 + rc = lpfc_idiag_que_param_check( 2500 2457 phba->sli4_hba.fcp_wq[qidx], 2501 2458 index, count); 2502 - if (rc) 2503 - goto error_out; 2504 - idiag.ptr_private = 2505 - phba->sli4_hba.fcp_wq[qidx]; 2506 - goto pass_check; 2459 + if (rc) 2460 + goto error_out; 2461 + idiag.ptr_private = 2462 + phba->sli4_hba.fcp_wq[qidx]; 2463 + goto pass_check; 2464 + } 2507 2465 } 2508 2466 } 2509 2467 goto error_out; 2510 2468 break; 2511 2469 case LPFC_IDIAG_RQ: 2512 2470 /* HDR queue */ 2513 - if (phba->sli4_hba.hdr_rq->queue_id == queid) { 2471 + if (phba->sli4_hba.hdr_rq && 2472 + phba->sli4_hba.hdr_rq->queue_id == queid) { 2514 2473 /* Sanity check */ 2515 2474 rc = lpfc_idiag_que_param_check( 2516 2475 phba->sli4_hba.hdr_rq, index, count); ··· 2526 2473 goto pass_check; 2527 2474 } 2528 2475 /* DAT queue */ 2529 - if (phba->sli4_hba.dat_rq->queue_id == queid) { 2476 + if (phba->sli4_hba.dat_rq && 2477 + phba->sli4_hba.dat_rq->queue_id == queid) { 2530 2478 /* Sanity check */ 2531 2479 rc = lpfc_idiag_que_param_check( 2532 2480 phba->sli4_hba.dat_rq, index, count);
+116 -48
drivers/scsi/lpfc/lpfc_init.c
··· 1417 1417 uint32_t event_data; 1418 1418 struct Scsi_Host *shost; 1419 1419 uint32_t if_type; 1420 - struct lpfc_register portstat_reg; 1420 + struct lpfc_register portstat_reg = {0}; 1421 + uint32_t reg_err1, reg_err2; 1422 + uint32_t uerrlo_reg, uemasklo_reg; 1423 + uint32_t pci_rd_rc1, pci_rd_rc2; 1421 1424 int rc; 1422 1425 1423 1426 /* If the pci channel is offline, ignore possible errors, since ··· 1432 1429 if (!phba->cfg_enable_hba_reset) 1433 1430 return; 1434 1431 1435 - /* Send an internal error event to mgmt application */ 1436 - lpfc_board_errevt_to_mgmt(phba); 1437 - 1438 - /* For now, the actual action for SLI4 device handling is not 1439 - * specified yet, just treated it as adaptor hardware failure 1440 - */ 1441 - event_data = FC_REG_DUMP_EVENT; 1442 - shost = lpfc_shost_from_vport(vport); 1443 - fc_host_post_vendor_event(shost, fc_get_event_number(), 1444 - sizeof(event_data), (char *) &event_data, 1445 - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1446 - 1447 1432 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1448 1433 switch (if_type) { 1449 1434 case LPFC_SLI_INTF_IF_TYPE_0: 1435 + pci_rd_rc1 = lpfc_readl( 1436 + phba->sli4_hba.u.if_type0.UERRLOregaddr, 1437 + &uerrlo_reg); 1438 + pci_rd_rc2 = lpfc_readl( 1439 + phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1440 + &uemasklo_reg); 1441 + /* consider PCI bus read error as pci_channel_offline */ 1442 + if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1443 + return; 1450 1444 lpfc_sli4_offline_eratt(phba); 1451 1445 break; 1452 1446 case LPFC_SLI_INTF_IF_TYPE_2: 1453 - portstat_reg.word0 = 1454 - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1455 - 1447 + pci_rd_rc1 = lpfc_readl( 1448 + phba->sli4_hba.u.if_type2.STATUSregaddr, 1449 + &portstat_reg.word0); 1450 + /* consider PCI bus read error as pci_channel_offline */ 1451 + if (pci_rd_rc1 == -EIO) 1452 + return; 1453 + reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1454 + reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1456 1455 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1457 1456 /* TODO: Register for Overtemp async events. */ 1458 1457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 1464 1459 phba->over_temp_state = HBA_OVER_TEMP; 1465 1460 spin_unlock_irq(&phba->hbalock); 1466 1461 lpfc_sli4_offline_eratt(phba); 1467 - return; 1462 + break; 1468 1463 } 1464 + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1465 + reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) 1466 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1467 + "3143 Port Down: Firmware Restarted\n"); 1468 + else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1469 + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1470 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1471 + "3144 Port Down: Debug Dump\n"); 1472 + else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1473 + reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1474 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1475 + "3145 Port Down: Provisioning\n"); 1469 1476 /* 1470 1477 * On error status condition, driver need to wait for port 1471 1478 * ready before performing reset. ··· 1486 1469 if (!rc) { 1487 1470 /* need reset: attempt for port recovery */ 1488 1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1489 - "2887 Port Error: Attempting " 1490 - "Port Recovery\n"); 1472 + "2887 Reset Needed: Attempting Port " 1473 + "Recovery...\n"); 1491 1474 lpfc_offline_prep(phba); 1492 1475 lpfc_offline(phba); 1493 1476 lpfc_sli_brdrestart(phba); 1494 1477 if (lpfc_online(phba) == 0) { 1495 1478 lpfc_unblock_mgmt_io(phba); 1496 - return; 1479 + /* don't report event on forced debug dump */ 1480 + if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1481 + reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1482 + return; 1483 + else 1484 + break; 1497 1485 } 1498 1486 /* fall through for not able to recover */ 1499 1487 } ··· 1508 1486 default: 1509 1487 break; 1510 1488 } 1489 + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1490 + "3123 Report dump event to upper layer\n"); 1491 + /* Send an internal error event to mgmt application */ 1492 + lpfc_board_errevt_to_mgmt(phba); 1493 + 1494 + event_data = FC_REG_DUMP_EVENT; 1495 + shost = lpfc_shost_from_vport(vport); 1496 + fc_host_post_vendor_event(shost, fc_get_event_number(), 1497 + sizeof(event_data), (char *) &event_data, 1498 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1511 1499 } 1512 1500 1513 1501 /** ··· 6507 6475 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6508 6476 } 6509 6477 kfree(phba->sli4_hba.fcp_wq); 6478 + phba->sli4_hba.fcp_wq = NULL; 6510 6479 out_free_els_wq: 6511 6480 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6512 6481 phba->sli4_hba.els_wq = NULL; ··· 6520 6487 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6521 6488 } 6522 6489 kfree(phba->sli4_hba.fcp_cq); 6490 + phba->sli4_hba.fcp_cq = NULL; 6523 6491 out_free_els_cq: 6524 6492 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6525 6493 phba->sli4_hba.els_cq = NULL; ··· 6533 6499 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6534 6500 } 6535 6501 kfree(phba->sli4_hba.fp_eq); 6502 + phba->sli4_hba.fp_eq = NULL; 6536 6503 out_free_sp_eq: 6537 6504 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6538 6505 phba->sli4_hba.sp_eq = NULL; ··· 6567 6532 phba->sli4_hba.els_wq = NULL; 6568 6533 6569 6534 /* Release FCP work queue */ 6570 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6571 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6535 + if (phba->sli4_hba.fcp_wq != NULL) 6536 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; 6537 + fcp_qidx++) 6538 + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6572 6539 kfree(phba->sli4_hba.fcp_wq); 6573 6540 phba->sli4_hba.fcp_wq = NULL; 6574 6541 ··· 6590 6553 6591 6554 /* Release FCP response complete queue */ 6592 6555 fcp_qidx = 0; 6593 - do 6594 - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6595 - while (++fcp_qidx < phba->cfg_fcp_eq_count); 6556 + if (phba->sli4_hba.fcp_cq != NULL) 6557 + do 6558 + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6559 + while (++fcp_qidx < phba->cfg_fcp_eq_count); 6596 6560 kfree(phba->sli4_hba.fcp_cq); 6597 6561 phba->sli4_hba.fcp_cq = NULL; 6598 6562 6599 6563 /* Release fast-path event queue */ 6600 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6601 - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6564 + if (phba->sli4_hba.fp_eq != NULL) 6565 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 6566 + fcp_qidx++) 6567 + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6602 6568 kfree(phba->sli4_hba.fp_eq); 6603 6569 phba->sli4_hba.fp_eq = NULL; 6604 6570 ··· 6654 6614 phba->sli4_hba.sp_eq->queue_id); 6655 6615 6656 6616 /* Set up fast-path event queue */ 6617 + if (!phba->sli4_hba.fp_eq) { 6618 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6619 + "3147 Fast-path EQs not allocated\n"); 6620 + goto out_destroy_sp_eq; 6621 + } 6657 6622 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6658 6623 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6659 6624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 6723 6678 phba->sli4_hba.sp_eq->queue_id); 6724 6679 6725 6680 /* Set up fast-path FCP Response Complete Queue */ 6681 + if (!phba->sli4_hba.fcp_cq) { 6682 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6683 + "3148 Fast-path FCP CQ array not " 6684 + "allocated\n"); 6685 + goto out_destroy_els_cq; 6686 + } 6726 6687 fcp_cqidx = 0; 6727 6688 do { 6728 6689 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { ··· 6808 6757 phba->sli4_hba.els_cq->queue_id); 6809 6758 6810 6759 /* Set up fast-path FCP Work Queue */ 6760 + if (!phba->sli4_hba.fcp_wq) { 6761 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6762 + "3149 Fast-path FCP WQ array not " 6763 + "allocated\n"); 6764 + goto out_destroy_els_wq; 6765 + } 6811 6766 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6812 6767 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6813 6768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 6875 6818 out_destroy_fcp_wq: 6876 6819 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6877 6820 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6821 + out_destroy_els_wq: 6878 6822 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6879 6823 out_destroy_mbx_wq: 6880 6824 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6881 6825 out_destroy_fcp_cq: 6882 6826 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6883 6827 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6828 + out_destroy_els_cq: 6884 6829 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6885 6830 out_destroy_mbx_cq: 6886 6831 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6887 6832 out_destroy_fp_eq: 6888 6833 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6889 6834 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6835 + out_destroy_sp_eq: 6890 6836 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6891 6837 out_error: 6892 6838 return rc; ··· 6926 6866 /* Unset ELS complete queue */ 6927 6867 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6928 6868 /* Unset FCP response complete queue */ 6929 - fcp_qidx = 0; 6930 - do { 6931 - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6932 - } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6869 + if (phba->sli4_hba.fcp_cq) { 6870 + fcp_qidx = 0; 6871 + do { 6872 + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6873 + } while (++fcp_qidx < phba->cfg_fcp_eq_count); 6874 + } 6933 6875 /* Unset fast-path event queue */ 6934 - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6935 - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6876 + if (phba->sli4_hba.fp_eq) { 6877 + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 6878 + fcp_qidx++) 6879 + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6880 + } 6936 6881 /* Unset slow-path event queue */ 6937 6882 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6938 6883 } ··· 7476 7411 static void 7477 7412 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7478 7413 { 7479 - struct pci_dev *pdev; 7414 + uint32_t if_type; 7415 + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7480 7416 7481 - /* Obtain PCI device reference */ 7482 - if (!phba->pcidev) 7483 - return; 7484 - else 7485 - pdev = phba->pcidev; 7486 - 7487 - /* Free coherent DMA memory allocated */ 7488 - 7489 - /* Unmap I/O memory space */ 7490 - iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7491 - iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7492 - iounmap(phba->sli4_hba.conf_regs_memmap_p); 7493 - 7494 - return; 7417 + switch (if_type) { 7418 + case LPFC_SLI_INTF_IF_TYPE_0: 7419 + iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7420 + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7421 + iounmap(phba->sli4_hba.conf_regs_memmap_p); 7422 + break; 7423 + case LPFC_SLI_INTF_IF_TYPE_2: 7424 + iounmap(phba->sli4_hba.conf_regs_memmap_p); 7425 + break; 7426 + case LPFC_SLI_INTF_IF_TYPE_1: 7427 + default: 7428 + dev_printk(KERN_ERR, &phba->pcidev->dev, 7429 + "FATAL - unsupported SLI4 interface type - %d\n", 7430 + if_type); 7431 + break; 7432 + } 7495 7433 } 7496 7434 7497 7435 /**
+2 -2
drivers/scsi/lpfc/lpfc_mem.c
··· 389 389 { 390 390 struct hbq_dmabuf *hbqbp; 391 391 392 - hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 392 + hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 393 393 if (!hbqbp) 394 394 return NULL; 395 395 ··· 441 441 { 442 442 struct hbq_dmabuf *dma_buf; 443 443 444 - dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 444 + dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); 445 445 if (!dma_buf) 446 446 return NULL; 447 447
+107 -28
drivers/scsi/lpfc/lpfc_sli.c
··· 89 89 static uint32_t 90 90 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 91 91 { 92 - union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 92 + union lpfc_wqe *temp_wqe; 93 93 struct lpfc_register doorbell; 94 94 uint32_t host_index; 95 + 96 + /* sanity check on queue memory */ 97 + if (unlikely(!q)) 98 + return -ENOMEM; 99 + temp_wqe = q->qe[q->host_index].wqe; 95 100 96 101 /* If the host has not yet processed the next entry then we are done */ 97 102 if (((q->host_index + 1) % q->entry_count) == q->hba_index) ··· 139 134 { 140 135 uint32_t released = 0; 141 136 137 + /* sanity check on queue memory */ 138 + if (unlikely(!q)) 139 + return 0; 140 + 142 141 if (q->hba_index == index) 143 142 return 0; 144 143 do { ··· 167 158 static uint32_t 168 159 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 169 160 { 170 - struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 161 + struct lpfc_mqe *temp_mqe; 171 162 struct lpfc_register doorbell; 172 163 uint32_t host_index; 164 + 165 + /* sanity check on queue memory */ 166 + if (unlikely(!q)) 167 + return -ENOMEM; 168 + temp_mqe = q->qe[q->host_index].mqe; 173 169 174 170 /* If the host has not yet processed the next entry then we are done */ 175 171 if (((q->host_index + 1) % q->entry_count) == q->hba_index) ··· 209 195 static uint32_t 210 196 lpfc_sli4_mq_release(struct lpfc_queue *q) 211 197 { 198 + /* sanity check on queue memory */ 199 + if (unlikely(!q)) 200 + return 0; 201 + 212 202 /* Clear the mailbox pointer for completion */ 213 203 q->phba->mbox = NULL; 214 204 q->hba_index = ((q->hba_index + 1) % q->entry_count); ··· 231 213 static struct lpfc_eqe * 232 214 lpfc_sli4_eq_get(struct lpfc_queue *q) 233 215 { 234 - struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 216 + struct lpfc_eqe *eqe; 217 + 218 + /* sanity check on queue memory */ 219 + if (unlikely(!q)) 220 + return NULL; 221 + eqe = q->qe[q->hba_index].eqe; 235 222 236 223 /* If the next EQE is not valid then we are done */ 237 224 if (!bf_get_le32(lpfc_eqe_valid, eqe)) ··· 270 247 uint32_t released = 0; 271 248 struct lpfc_eqe *temp_eqe; 272 249 struct lpfc_register doorbell; 250 + 251 + /* sanity check on queue memory */ 252 + if (unlikely(!q)) 253 + return 0; 273 254 274 255 /* while there are valid entries */ 275 256 while (q->hba_index != q->host_index) { ··· 315 288 { 316 289 struct lpfc_cqe *cqe; 317 290 291 + /* sanity check on queue memory */ 292 + if (unlikely(!q)) 293 + return NULL; 294 + 318 295 /* If the next CQE is not valid then we are done */ 319 296 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 320 297 return NULL; ··· 353 322 struct lpfc_cqe *temp_qe; 354 323 struct lpfc_register doorbell; 355 324 325 + /* sanity check on queue memory */ 326 + if (unlikely(!q)) 327 + return 0; 356 328 /* while there are valid entries */ 357 329 while (q->hba_index != q->host_index) { 358 330 temp_qe = q->qe[q->host_index].cqe; ··· 393 359 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 394 360 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 395 361 { 396 - struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 397 - struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 362 + struct lpfc_rqe *temp_hrqe; 363 + struct lpfc_rqe *temp_drqe; 398 364 struct lpfc_register doorbell; 399 365 int put_index = hq->host_index; 366 + 367 + /* sanity check on queue memory */ 368 + if (unlikely(!hq) || unlikely(!dq)) 369 + return -ENOMEM; 370 + temp_hrqe = hq->qe[hq->host_index].rqe; 371 + temp_drqe = dq->qe[dq->host_index].rqe; 400 372 401 373 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 402 374 return -EINVAL; ··· 442 402 static uint32_t 443 403 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 444 404 { 405 + /* sanity check on queue memory */ 406 + if (unlikely(!hq) || unlikely(!dq)) 407 + return 0; 408 + 445 409 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 446 410 return 0; 447 411 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); ··· 3895 3851 { 3896 3852 struct lpfc_sli *psli = &phba->sli; 3897 3853 uint16_t cfg_value; 3898 - uint8_t qindx; 3899 3854 3900 3855 /* Reset HBA */ 3901 3856 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, ··· 3910 3867 spin_lock_irq(&phba->hbalock); 3911 3868 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3912 3869 phba->fcf.fcf_flag = 0; 3913 - /* Clean up the child queue list for the CQs */ 3914 - list_del_init(&phba->sli4_hba.mbx_wq->list); 3915 - list_del_init(&phba->sli4_hba.els_wq->list); 3916 - list_del_init(&phba->sli4_hba.hdr_rq->list); 3917 - list_del_init(&phba->sli4_hba.dat_rq->list); 3918 - list_del_init(&phba->sli4_hba.mbx_cq->list); 3919 - list_del_init(&phba->sli4_hba.els_cq->list); 3920 - for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3921 - list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3922 - qindx = 0; 3923 - do 3924 - list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3925 - while (++qindx < phba->cfg_fcp_eq_count); 3926 3870 spin_unlock_irq(&phba->hbalock); 3927 3871 3928 3872 /* Now physically reset the device */ ··· 3922 3892 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3923 3893 3924 3894 /* Perform FCoE PCI function reset */ 3895 + lpfc_sli4_queue_destroy(phba); 3925 3896 lpfc_pci_function_reset(phba); 3926 3897 3927 3898 /* Restore PCI cmd register */ ··· 4900 4869 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4901 4870 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4902 4871 fcp_eqidx = 0; 4903 - do 4904 - lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4905 - LPFC_QUEUE_REARM); 4906 - while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4872 + if (phba->sli4_hba.fcp_cq) { 4873 + do 4874 + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4875 + LPFC_QUEUE_REARM); 4876 + while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4877 + } 4907 4878 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4908 - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4909 - lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4910 - LPFC_QUEUE_REARM); 4879 + if (phba->sli4_hba.fp_eq) { 4880 + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; 4881 + fcp_eqidx++) 4882 + lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4883 + LPFC_QUEUE_REARM); 4884 + } 4911 4885 } 4912 4886 4913 4887 /** ··· 8119 8083 */ 8120 8084 if (piocb->iocb_flag & LPFC_IO_FCP) 8121 8085 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8086 + if (unlikely(!phba->sli4_hba.fcp_wq)) 8087 + return IOCB_ERROR; 8122 8088 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8123 8089 &wqe)) 8124 8090 return IOCB_ERROR; ··· 9938 9900 phba->work_status[1] = 9939 9901 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 9940 9902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9941 - "2885 Port Error Detected: " 9903 + "2885 Port Status Event: " 9942 9904 "port status reg 0x%x, " 9943 9905 "port smphr reg 0x%x, " 9944 9906 "error 1=0x%x, error 2=0x%x\n", ··· 10944 10906 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 10945 10907 struct lpfc_wcqe_release *wcqe) 10946 10908 { 10909 + /* sanity check on queue memory */ 10910 + if (unlikely(!phba->sli4_hba.els_wq)) 10911 + return; 10947 10912 /* Check for the slow-path ELS work queue */ 10948 10913 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 10949 10914 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, ··· 11035 10994 struct hbq_dmabuf *dma_buf; 11036 10995 uint32_t status, rq_id; 11037 10996 unsigned long iflags; 10997 + 10998 + /* sanity check on queue memory */ 10999 + if (unlikely(!hrq) || unlikely(!drq)) 11000 + return workposted; 11038 11001 11039 11002 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11040 11003 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); ··· 11174 11129 11175 11130 /* Search for completion queue pointer matching this cqid */ 11176 11131 speq = phba->sli4_hba.sp_eq; 11132 + /* sanity check on queue memory */ 11133 + if (unlikely(!speq)) 11134 + return; 11177 11135 list_for_each_entry(childq, &speq->child_list, list) { 11178 11136 if (childq->queue_id == cqid) { 11179 11137 cq = childq; ··· 11418 11370 return; 11419 11371 } 11420 11372 11373 + if (unlikely(!phba->sli4_hba.fcp_cq)) { 11374 + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11375 + "3146 Fast-path completion queues " 11376 + "does not exist\n"); 11377 + return; 11378 + } 11421 11379 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11422 11380 if (unlikely(!cq)) { 11423 11381 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11424 11382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11425 11383 "0367 Fast-path completion queue " 11426 - "does not exist\n"); 11384 + "(%d) does not exist\n", fcp_cqidx); 11427 11385 return; 11428 11386 } 11429 11387 ··· 11600 11546 11601 11547 /* Get to the EQ struct associated with this vector */ 11602 11548 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11549 + if (unlikely(!fpeq)) 11550 + return IRQ_NONE; 11603 11551 11604 11552 /* Check device state for handling interrupt */ 11605 11553 if (unlikely(lpfc_intr_state_check(phba))) { ··· 11820 11764 uint16_t dmult; 11821 11765 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11822 11766 11767 + /* sanity check on queue memory */ 11768 + if (!eq) 11769 + return -ENODEV; 11823 11770 if (!phba->sli4_hba.pc_sli4_params.supported) 11824 11771 hw_page_size = SLI4_PAGE_SIZE; 11825 11772 ··· 11939 11880 union lpfc_sli4_cfg_shdr *shdr; 11940 11881 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 11941 11882 11883 + /* sanity check on queue memory */ 11884 + if (!cq || !eq) 11885 + return -ENODEV; 11942 11886 if (!phba->sli4_hba.pc_sli4_params.supported) 11943 11887 hw_page_size = SLI4_PAGE_SIZE; 11944 11888 ··· 12124 12062 union lpfc_sli4_cfg_shdr *shdr; 12125 12063 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12126 12064 12065 + /* sanity check on queue memory */ 12066 + if (!mq || !cq) 12067 + return -ENODEV; 12127 12068 if (!phba->sli4_hba.pc_sli4_params.supported) 12128 12069 hw_page_size = SLI4_PAGE_SIZE; 12129 12070 ··· 12277 12212 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12278 12213 struct dma_address *page; 12279 12214 12215 + /* sanity check on queue memory */ 12216 + if (!wq || !cq) 12217 + return -ENODEV; 12280 12218 if (!phba->sli4_hba.pc_sli4_params.supported) 12281 12219 hw_page_size = SLI4_PAGE_SIZE; 12282 12220 ··· 12372 12304 { 12373 12305 uint32_t cnt; 12374 12306 12307 + /* sanity check on queue memory */ 12308 + if (!rq) 12309 + return; 12375 12310 cnt = lpfc_hbq_defs[qno]->entry_count; 12376 12311 12377 12312 /* Recalc repost for RQs based on buffers initially posted */ ··· 12420 12349 union lpfc_sli4_cfg_shdr *shdr; 12421 12350 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12422 12351 12352 + /* sanity check on queue memory */ 12353 + if (!hrq || !drq || !cq) 12354 + return -ENODEV; 12423 12355 if (!phba->sli4_hba.pc_sli4_params.supported) 12424 12356 hw_page_size = SLI4_PAGE_SIZE; 12425 12357 ··· 12624 12550 uint32_t shdr_status, shdr_add_status; 12625 12551 union lpfc_sli4_cfg_shdr *shdr; 12626 12552 12553 + /* sanity check on queue memory */ 12627 12554 if (!eq) 12628 12555 return -ENODEV; 12629 12556 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); ··· 12680 12605 uint32_t shdr_status, shdr_add_status; 12681 12606 union lpfc_sli4_cfg_shdr *shdr; 12682 12607 12608 + /* sanity check on queue memory */ 12683 12609 if (!cq) 12684 12610 return -ENODEV; 12685 12611 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); ··· 12734 12658 uint32_t shdr_status, shdr_add_status; 12735 12659 union lpfc_sli4_cfg_shdr *shdr; 12736 12660 12661 + /* sanity check on queue memory */ 12737 12662 if (!mq) 12738 12663 return -ENODEV; 12739 12664 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); ··· 12788 12711 uint32_t shdr_status, shdr_add_status; 12789 12712 union lpfc_sli4_cfg_shdr *shdr; 12790 12713 12714 + /* sanity check on queue memory */ 12791 12715 if (!wq) 12792 12716 return -ENODEV; 12793 12717 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); ··· 12842 12764 uint32_t shdr_status, shdr_add_status; 12843 12765 union lpfc_sli4_cfg_shdr *shdr; 12844 12766 12767 + /* sanity check on queue memory */ 12845 12768 if (!hrq || !drq) 12846 12769 return -ENODEV; 12847 12770 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+9
drivers/scsi/lpfc/lpfc_sli4.h
··· 420 420 void __iomem *STATUSregaddr; 421 421 void __iomem *CTRLregaddr; 422 422 void __iomem *ERR1regaddr; 423 + #define SLIPORT_ERR1_REG_ERR_CODE_1 0x1 424 + #define SLIPORT_ERR1_REG_ERR_CODE_2 0x2 423 425 void __iomem *ERR2regaddr; 426 + #define SLIPORT_ERR2_REG_FW_RESTART 0x0 427 + #define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1 428 + #define SLIPORT_ERR2_REG_FORCED_DUMP 0x2 429 + #define SLIPORT_ERR2_REG_FAILURE_EQ 0x3 430 + #define SLIPORT_ERR2_REG_FAILURE_CQ 0x4 431 + #define SLIPORT_ERR2_REG_FAILURE_BUS 0x5 432 + #define SLIPORT_ERR2_REG_FAILURE_RQ 0x6 424 433 } if_type2; 425 434 } u; 426 435