Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] lpfc 8.2.8 v2 : Add statistical reporting control and additional fc vendor events

Added support for new sysfs attributes: lpfc_stat_data_ctrl and
lpfc_max_scsicmpl_time. The attributes control statistical reporting
of io load.

Added support for new fc vendor events for error reporting.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>

authored by

James Smart and committed by
James Bottomley
ea2151b4 977b5a0a

+1207 -15
+15 -12
drivers/scsi/lpfc/lpfc.h
··· 40 40 #define LPFC_MIN_TGT_QDEPTH 100 41 41 #define LPFC_MAX_TGT_QDEPTH 0xFFFF 42 42 43 + #define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data 44 + collection. */ 43 45 /* 44 46 * Following time intervals are used of adjusting SCSI device 45 47 * queue depths when there are driver resource error or Firmware ··· 383 381 struct lpfc_debugfs_trc *disc_trc; 384 382 atomic_t disc_trc_cnt; 385 383 #endif 384 + uint8_t stat_data_enabled; 385 + uint8_t stat_data_blocked; 386 386 }; 387 387 388 388 struct hbq_s { ··· 645 641 uint32_t buffer_tag_count; 646 642 int wait_4_mlo_maint_flg; 647 643 wait_queue_head_t wait_4_mlo_m_q; 644 + /* data structure used for latency data collection */ 645 + #define LPFC_NO_BUCKET 0 646 + #define LPFC_LINEAR_BUCKET 1 647 + #define LPFC_POWER2_BUCKET 2 648 + uint8_t bucket_type; 649 + uint32_t bucket_base; 650 + uint32_t bucket_step; 651 + 652 + /* Maximum number of events that can be outstanding at any time*/ 653 + #define LPFC_MAX_EVT_COUNT 512 654 + atomic_t fast_event_count; 648 655 }; 649 656 650 657 static inline struct Scsi_Host * ··· 714 699 return; 715 700 } 716 701 717 - #define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 718 - #define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature 719 - event */ 720 - 721 - struct temp_event { 722 - uint32_t event_type; 723 - uint32_t event_code; 724 - uint32_t data; 725 - }; 726 - #define LPFC_CRIT_TEMP 0x1 727 - #define LPFC_THRESHOLD_TEMP 0x2 728 - #define LPFC_NORMAL_TEMP 0x3
+342 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 32 32 33 33 #include "lpfc_hw.h" 34 34 #include "lpfc_sli.h" 35 + #include "lpfc_nl.h" 35 36 #include "lpfc_disc.h" 36 37 #include "lpfc_scsi.h" 37 38 #include "lpfc.h" ··· 2184 2183 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2185 2184 lpfc_topology_show, lpfc_topology_store); 2186 2185 2186 + 2187 + /** 2188 + * lpfc_stat_data_ctrl_store: write call back for lpfc_stat_data_ctrl 2189 + * sysfs file. 2190 + * @dev: Pointer to class device. 2191 + * @buf: Data buffer. 2192 + * @count: Size of the data buffer. 2193 + * 2194 + * This function get called when an user write to the lpfc_stat_data_ctrl 2195 + * sysfs file. This function parse the command written to the sysfs file 2196 + * and take appropriate action. These commands are used for controlling 2197 + * driver statistical data collection. 2198 + * Following are the command this function handles. 2199 + * 2200 + * setbucket <bucket_type> <base> <step> 2201 + * = Set the latency buckets. 2202 + * destroybucket = destroy all the buckets. 2203 + * start = start data collection 2204 + * stop = stop data collection 2205 + * reset = reset the collected data 2206 + **/ 2207 + static ssize_t 2208 + lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, 2209 + const char *buf, size_t count) 2210 + { 2211 + struct Scsi_Host *shost = class_to_shost(dev); 2212 + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2213 + struct lpfc_hba *phba = vport->phba; 2214 + #define LPFC_MAX_DATA_CTRL_LEN 1024 2215 + static char bucket_data[LPFC_MAX_DATA_CTRL_LEN]; 2216 + unsigned long i; 2217 + char *str_ptr, *token; 2218 + struct lpfc_vport **vports; 2219 + struct Scsi_Host *v_shost; 2220 + char *bucket_type_str, *base_str, *step_str; 2221 + unsigned long base, step, bucket_type; 2222 + 2223 + if (!strncmp(buf, "setbucket", strlen("setbucket"))) { 2224 + if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN) 2225 + return -EINVAL; 2226 + 2227 + strcpy(bucket_data, buf); 2228 + str_ptr = &bucket_data[0]; 2229 + /* Ignore this token - this is command token */ 2230 + token = strsep(&str_ptr, "\t "); 2231 + if (!token) 2232 + return -EINVAL; 2233 + 2234 + bucket_type_str = strsep(&str_ptr, "\t "); 2235 + if (!bucket_type_str) 2236 + return -EINVAL; 2237 + 2238 + if (!strncmp(bucket_type_str, "linear", strlen("linear"))) 2239 + bucket_type = LPFC_LINEAR_BUCKET; 2240 + else if (!strncmp(bucket_type_str, "power2", strlen("power2"))) 2241 + bucket_type = LPFC_POWER2_BUCKET; 2242 + else 2243 + return -EINVAL; 2244 + 2245 + base_str = strsep(&str_ptr, "\t "); 2246 + if (!base_str) 2247 + return -EINVAL; 2248 + base = simple_strtoul(base_str, NULL, 0); 2249 + 2250 + step_str = strsep(&str_ptr, "\t "); 2251 + if (!step_str) 2252 + return -EINVAL; 2253 + step = simple_strtoul(step_str, NULL, 0); 2254 + if (!step) 2255 + return -EINVAL; 2256 + 2257 + /* Block the data collection for every vport */ 2258 + vports = lpfc_create_vport_work_array(phba); 2259 + if (vports == NULL) 2260 + return -ENOMEM; 2261 + 2262 + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2263 + v_shost = lpfc_shost_from_vport(vports[i]); 2264 + spin_lock_irq(v_shost->host_lock); 2265 + /* Block and reset data collection */ 2266 + vports[i]->stat_data_blocked = 1; 2267 + if (vports[i]->stat_data_enabled) 2268 + lpfc_vport_reset_stat_data(vports[i]); 2269 + spin_unlock_irq(v_shost->host_lock); 2270 + } 2271 + 2272 + /* Set the bucket attributes */ 2273 + phba->bucket_type = bucket_type; 2274 + phba->bucket_base = base; 2275 + phba->bucket_step = step; 2276 + 2277 + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2278 + v_shost = lpfc_shost_from_vport(vports[i]); 2279 + 2280 + /* Unblock data collection */ 2281 + spin_lock_irq(v_shost->host_lock); 2282 + vports[i]->stat_data_blocked = 0; 2283 + spin_unlock_irq(v_shost->host_lock); 2284 + } 2285 + lpfc_destroy_vport_work_array(phba, vports); 2286 + return strlen(buf); 2287 + } 2288 + 2289 + if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) { 2290 + vports = lpfc_create_vport_work_array(phba); 2291 + if (vports == NULL) 2292 + return -ENOMEM; 2293 + 2294 + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2295 + v_shost = lpfc_shost_from_vport(vports[i]); 2296 + spin_lock_irq(shost->host_lock); 2297 + vports[i]->stat_data_blocked = 1; 2298 + lpfc_free_bucket(vport); 2299 + vport->stat_data_enabled = 0; 2300 + vports[i]->stat_data_blocked = 0; 2301 + spin_unlock_irq(shost->host_lock); 2302 + } 2303 + lpfc_destroy_vport_work_array(phba, vports); 2304 + phba->bucket_type = LPFC_NO_BUCKET; 2305 + phba->bucket_base = 0; 2306 + phba->bucket_step = 0; 2307 + return strlen(buf); 2308 + } 2309 + 2310 + if (!strncmp(buf, "start", strlen("start"))) { 2311 + /* If no buckets configured return error */ 2312 + if (phba->bucket_type == LPFC_NO_BUCKET) 2313 + return -EINVAL; 2314 + spin_lock_irq(shost->host_lock); 2315 + if (vport->stat_data_enabled) { 2316 + spin_unlock_irq(shost->host_lock); 2317 + return strlen(buf); 2318 + } 2319 + lpfc_alloc_bucket(vport); 2320 + vport->stat_data_enabled = 1; 2321 + spin_unlock_irq(shost->host_lock); 2322 + return strlen(buf); 2323 + } 2324 + 2325 + if (!strncmp(buf, "stop", strlen("stop"))) { 2326 + spin_lock_irq(shost->host_lock); 2327 + if (vport->stat_data_enabled == 0) { 2328 + spin_unlock_irq(shost->host_lock); 2329 + return strlen(buf); 2330 + } 2331 + lpfc_free_bucket(vport); 2332 + vport->stat_data_enabled = 0; 2333 + spin_unlock_irq(shost->host_lock); 2334 + return strlen(buf); 2335 + } 2336 + 2337 + if (!strncmp(buf, "reset", strlen("reset"))) { 2338 + if ((phba->bucket_type == LPFC_NO_BUCKET) 2339 + || !vport->stat_data_enabled) 2340 + return strlen(buf); 2341 + spin_lock_irq(shost->host_lock); 2342 + vport->stat_data_blocked = 1; 2343 + lpfc_vport_reset_stat_data(vport); 2344 + vport->stat_data_blocked = 0; 2345 + spin_unlock_irq(shost->host_lock); 2346 + return strlen(buf); 2347 + } 2348 + return -EINVAL; 2349 + } 2350 + 2351 + 2352 + /** 2353 + * lpfc_stat_data_ctrl_show: Read callback function for 2354 + * lpfc_stat_data_ctrl sysfs file. 2355 + * @dev: Pointer to class device object. 2356 + * @buf: Data buffer. 2357 + * 2358 + * This function is the read call back function for 2359 + * lpfc_stat_data_ctrl sysfs file. This function report the 2360 + * current statistical data collection state. 2361 + **/ 2362 + static ssize_t 2363 + lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr, 2364 + char *buf) 2365 + { 2366 + struct Scsi_Host *shost = class_to_shost(dev); 2367 + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2368 + struct lpfc_hba *phba = vport->phba; 2369 + int index = 0; 2370 + int i; 2371 + char *bucket_type; 2372 + unsigned long bucket_value; 2373 + 2374 + switch (phba->bucket_type) { 2375 + case LPFC_LINEAR_BUCKET: 2376 + bucket_type = "linear"; 2377 + break; 2378 + case LPFC_POWER2_BUCKET: 2379 + bucket_type = "power2"; 2380 + break; 2381 + default: 2382 + bucket_type = "No Bucket"; 2383 + break; 2384 + } 2385 + 2386 + sprintf(&buf[index], "Statistical Data enabled :%d, " 2387 + "blocked :%d, Bucket type :%s, Bucket base :%d," 2388 + " Bucket step :%d\nLatency Ranges :", 2389 + vport->stat_data_enabled, vport->stat_data_blocked, 2390 + bucket_type, phba->bucket_base, phba->bucket_step); 2391 + index = strlen(buf); 2392 + if (phba->bucket_type != LPFC_NO_BUCKET) { 2393 + for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 2394 + if (phba->bucket_type == LPFC_LINEAR_BUCKET) 2395 + bucket_value = phba->bucket_base + 2396 + phba->bucket_step * i; 2397 + else 2398 + bucket_value = phba->bucket_base + 2399 + (1 << i) * phba->bucket_step; 2400 + 2401 + if (index + 10 > PAGE_SIZE) 2402 + break; 2403 + sprintf(&buf[index], "%08ld ", bucket_value); 2404 + index = strlen(buf); 2405 + } 2406 + } 2407 + sprintf(&buf[index], "\n"); 2408 + return strlen(buf); 2409 + } 2410 + 2411 + /* 2412 + * Sysfs attribute to control the statistical data collection. 2413 + */ 2414 + static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR, 2415 + lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store); 2416 + 2417 + /* 2418 + * lpfc_drvr_stat_data: sysfs attr to get driver statistical data. 2419 + */ 2420 + 2421 + /* 2422 + * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN 2423 + * for each target. 2424 + */ 2425 + #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18) 2426 + #define MAX_STAT_DATA_SIZE_PER_TARGET \ 2427 + STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT) 2428 + 2429 + 2430 + /** 2431 + * sysfs_drvr_stat_data_read: Read callback function for lpfc_drvr_stat_data 2432 + * sysfs attribute. 2433 + * @kobj: Pointer to the kernel object 2434 + * @bin_attr: Attribute object 2435 + * @buff: Buffer pointer 2436 + * @off: File offset 2437 + * @count: Buffer size 2438 + * 2439 + * This function is the read call back function for lpfc_drvr_stat_data 2440 + * sysfs file. This function export the statistical data to user 2441 + * applications. 2442 + **/ 2443 + static ssize_t 2444 + sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, 2445 + char *buf, loff_t off, size_t count) 2446 + { 2447 + struct device *dev = container_of(kobj, struct device, 2448 + kobj); 2449 + struct Scsi_Host *shost = class_to_shost(dev); 2450 + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2451 + struct lpfc_hba *phba = vport->phba; 2452 + int i = 0, index = 0; 2453 + unsigned long nport_index; 2454 + struct lpfc_nodelist *ndlp = NULL; 2455 + nport_index = (unsigned long)off / 2456 + MAX_STAT_DATA_SIZE_PER_TARGET; 2457 + 2458 + if (!vport->stat_data_enabled || vport->stat_data_blocked 2459 + || (phba->bucket_type == LPFC_NO_BUCKET)) 2460 + return 0; 2461 + 2462 + spin_lock_irq(shost->host_lock); 2463 + list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2464 + if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data) 2465 + continue; 2466 + 2467 + if (nport_index > 0) { 2468 + nport_index--; 2469 + continue; 2470 + } 2471 + 2472 + if ((index + MAX_STAT_DATA_SIZE_PER_TARGET) 2473 + > count) 2474 + break; 2475 + 2476 + if (!ndlp->lat_data) 2477 + continue; 2478 + 2479 + /* Print the WWN */ 2480 + sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:", 2481 + ndlp->nlp_portname.u.wwn[0], 2482 + ndlp->nlp_portname.u.wwn[1], 2483 + ndlp->nlp_portname.u.wwn[2], 2484 + ndlp->nlp_portname.u.wwn[3], 2485 + ndlp->nlp_portname.u.wwn[4], 2486 + ndlp->nlp_portname.u.wwn[5], 2487 + ndlp->nlp_portname.u.wwn[6], 2488 + ndlp->nlp_portname.u.wwn[7]); 2489 + 2490 + index = strlen(buf); 2491 + 2492 + for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) { 2493 + sprintf(&buf[index], "%010u,", 2494 + ndlp->lat_data[i].cmd_count); 2495 + index = strlen(buf); 2496 + } 2497 + sprintf(&buf[index], "\n"); 2498 + index = strlen(buf); 2499 + } 2500 + spin_unlock_irq(shost->host_lock); 2501 + return index; 2502 + } 2503 + 2504 + static struct bin_attribute sysfs_drvr_stat_data_attr = { 2505 + .attr = { 2506 + .name = "lpfc_drvr_stat_data", 2507 + .mode = S_IRUSR, 2508 + .owner = THIS_MODULE, 2509 + }, 2510 + .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET, 2511 + .read = sysfs_drvr_stat_data_read, 2512 + .write = NULL, 2513 + }; 2514 + 2187 2515 /* 2188 2516 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel 2189 2517 # connection. ··· 2832 2502 &dev_attr_lpfc_enable_hba_heartbeat, 2833 2503 &dev_attr_lpfc_sg_seg_cnt, 2834 2504 &dev_attr_lpfc_max_scsicmpl_time, 2505 + &dev_attr_lpfc_stat_data_ctrl, 2835 2506 NULL, 2836 2507 }; 2837 2508 ··· 2855 2524 &dev_attr_nport_evt_cnt, 2856 2525 &dev_attr_npiv_info, 2857 2526 &dev_attr_lpfc_enable_da_id, 2527 + &dev_attr_lpfc_max_scsicmpl_time, 2528 + &dev_attr_lpfc_stat_data_ctrl, 2858 2529 NULL, 2859 2530 }; 2860 2531 ··· 3291 2958 if (error) 3292 2959 goto out_remove_ctlreg_attr; 3293 2960 2961 + error = sysfs_create_bin_file(&shost->shost_dev.kobj, 2962 + &sysfs_drvr_stat_data_attr); 2963 + if (error) 2964 + goto out_remove_mbox_attr; 2965 + 3294 2966 return 0; 2967 + out_remove_mbox_attr: 2968 + sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 3295 2969 out_remove_ctlreg_attr: 3296 2970 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3297 2971 out: ··· 3313 2973 lpfc_free_sysfs_attr(struct lpfc_vport *vport) 3314 2974 { 3315 2975 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3316 - 2976 + sysfs_remove_bin_file(&shost->shost_dev.kobj, 2977 + &sysfs_drvr_stat_data_attr); 3317 2978 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr); 3318 2979 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr); 3319 2980 }
+6
drivers/scsi/lpfc/lpfc_crtn.h
··· 294 294 void lpfc_ramp_up_queue_handler(struct lpfc_hba *); 295 295 void lpfc_scsi_dev_block(struct lpfc_hba *); 296 296 297 + void 298 + lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, 299 + struct lpfc_iocbq *); 300 + struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 301 + void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 302 + 297 303 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 298 304 #define HBA_EVENT_RSCN 5 299 305 #define HBA_EVENT_LINK_UP 2
+1
drivers/scsi/lpfc/lpfc_ct.c
··· 34 34 35 35 #include "lpfc_hw.h" 36 36 #include "lpfc_sli.h" 37 + #include "lpfc_nl.h" 37 38 #include "lpfc_disc.h" 38 39 #include "lpfc_scsi.h" 39 40 #include "lpfc.h"
+1
drivers/scsi/lpfc/lpfc_debugfs.c
··· 35 35 36 36 #include "lpfc_hw.h" 37 37 #include "lpfc_sli.h" 38 + #include "lpfc_nl.h" 38 39 #include "lpfc_disc.h" 39 40 #include "lpfc_scsi.h" 40 41 #include "lpfc.h"
+20
drivers/scsi/lpfc/lpfc_disc.h
··· 37 37 LPFC_EVT_KILL, 38 38 LPFC_EVT_ELS_RETRY, 39 39 LPFC_EVT_DEV_LOSS, 40 + LPFC_EVT_FASTPATH_MGMT_EVT, 40 41 }; 41 42 42 43 /* structure used to queue event to the discovery tasklet */ ··· 48 47 enum lpfc_work_type evt; 49 48 }; 50 49 50 + struct lpfc_scsi_check_condition_event; 51 + struct lpfc_scsi_varqueuedepth_event; 52 + struct lpfc_scsi_event_header; 53 + struct lpfc_fabric_event_header; 54 + struct lpfc_fcprdchkerr_event; 55 + 56 + /* structure used for sending events from fast path */ 57 + struct lpfc_fast_path_event { 58 + struct lpfc_work_evt work_evt; 59 + struct lpfc_vport *vport; 60 + union { 61 + struct lpfc_scsi_check_condition_event check_cond_evt; 62 + struct lpfc_scsi_varqueuedepth_event queue_depth_evt; 63 + struct lpfc_scsi_event_header scsi_evt; 64 + struct lpfc_fabric_event_header fabric_evt; 65 + struct lpfc_fcprdchkerr_event read_check_error; 66 + } un; 67 + }; 51 68 52 69 struct lpfc_nodelist { 53 70 struct list_head nlp_listp; ··· 110 91 atomic_t cmd_pending; 111 92 uint32_t cmd_qdepth; 112 93 unsigned long last_change_time; 94 + struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ 113 95 }; 114 96 115 97 /* Defines for nlp_flag (uint32) */
+114
drivers/scsi/lpfc/lpfc_els.c
··· 30 30 31 31 #include "lpfc_hw.h" 32 32 #include "lpfc_sli.h" 33 + #include "lpfc_nl.h" 33 34 #include "lpfc_disc.h" 34 35 #include "lpfc_scsi.h" 35 36 #include "lpfc.h" ··· 5086 5085 } 5087 5086 5088 5087 /** 5088 + * lpfc_send_els_failure_event: Posts an ELS command failure event. 5089 + * @phba: Pointer to hba context object. 5090 + * @cmdiocbp: Pointer to command iocb which reported error. 5091 + * @rspiocbp: Pointer to response iocb which reported error. 5092 + * 5093 + * This function sends an event when there is an ELS command 5094 + * failure. 5095 + **/ 5096 + void 5097 + lpfc_send_els_failure_event(struct lpfc_hba *phba, 5098 + struct lpfc_iocbq *cmdiocbp, 5099 + struct lpfc_iocbq *rspiocbp) 5100 + { 5101 + struct lpfc_vport *vport = cmdiocbp->vport; 5102 + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5103 + struct lpfc_lsrjt_event lsrjt_event; 5104 + struct lpfc_fabric_event_header fabric_event; 5105 + struct ls_rjt stat; 5106 + struct lpfc_nodelist *ndlp; 5107 + uint32_t *pcmd; 5108 + 5109 + ndlp = cmdiocbp->context1; 5110 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 5111 + return; 5112 + 5113 + if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 5114 + lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 5115 + lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 5116 + memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 5117 + sizeof(struct lpfc_name)); 5118 + memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 5119 + sizeof(struct lpfc_name)); 5120 + pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 5121 + cmdiocbp->context2)->virt); 5122 + lsrjt_event.command = *pcmd; 5123 + stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 5124 + lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 5125 + lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 5126 + fc_host_post_vendor_event(shost, 5127 + fc_get_event_number(), 5128 + sizeof(lsrjt_event), 5129 + (char *)&lsrjt_event, 5130 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5131 + return; 5132 + } 5133 + if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 5134 + (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 5135 + fabric_event.event_type = FC_REG_FABRIC_EVENT; 5136 + if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 5137 + fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 5138 + else 5139 + fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 5140 + memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 5141 + sizeof(struct lpfc_name)); 5142 + memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 5143 + sizeof(struct lpfc_name)); 5144 + fc_host_post_vendor_event(shost, 5145 + fc_get_event_number(), 5146 + sizeof(fabric_event), 5147 + (char *)&fabric_event, 5148 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5149 + return; 5150 + } 5151 + 5152 + } 5153 + 5154 + /** 5155 + * lpfc_send_els_event: Posts unsolicited els event. 5156 + * @vport: Pointer to vport object. 5157 + * @ndlp: Pointer FC node object. 5158 + * @cmd: ELS command code. 5159 + * 5160 + * This function posts an event when there is an incoming 5161 + * unsolicited ELS command. 5162 + **/ 5163 + static void 5164 + lpfc_send_els_event(struct lpfc_vport *vport, 5165 + struct lpfc_nodelist *ndlp, 5166 + uint32_t cmd) 5167 + { 5168 + struct lpfc_els_event_header els_data; 5169 + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5170 + 5171 + els_data.event_type = FC_REG_ELS_EVENT; 5172 + switch (cmd) { 5173 + case ELS_CMD_PLOGI: 5174 + els_data.subcategory = LPFC_EVENT_PLOGI_RCV; 5175 + break; 5176 + case ELS_CMD_PRLO: 5177 + els_data.subcategory = LPFC_EVENT_PRLO_RCV; 5178 + break; 5179 + case ELS_CMD_ADISC: 5180 + els_data.subcategory = LPFC_EVENT_ADISC_RCV; 5181 + break; 5182 + default: 5183 + return; 5184 + } 5185 + memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 5186 + memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 5187 + fc_host_post_vendor_event(shost, 5188 + fc_get_event_number(), 5189 + sizeof(els_data), 5190 + (char *)&els_data, 5191 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 5192 + 5193 + return; 5194 + } 5195 + 5196 + 5197 + /** 5089 5198 * lpfc_els_unsol_buffer: Process an unsolicited event data buffer. 5090 5199 * @phba: pointer to lpfc hba data structure. 5091 5200 * @pring: pointer to a SLI ring. ··· 5296 5185 phba->fc_stat.elsRcvPLOGI++; 5297 5186 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 5298 5187 5188 + lpfc_send_els_event(vport, ndlp, cmd); 5299 5189 if (vport->port_state < LPFC_DISC_AUTH) { 5300 5190 if (!(phba->pport->fc_flag & FC_PT2PT) || 5301 5191 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { ··· 5346 5234 did, vport->port_state, ndlp->nlp_flag); 5347 5235 5348 5236 phba->fc_stat.elsRcvPRLO++; 5237 + lpfc_send_els_event(vport, ndlp, cmd); 5349 5238 if (vport->port_state < LPFC_DISC_AUTH) { 5350 5239 rjt_err = LSRJT_UNABLE_TPC; 5351 5240 break; ··· 5364 5251 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 5365 5252 did, vport->port_state, ndlp->nlp_flag); 5366 5253 5254 + lpfc_send_els_event(vport, ndlp, cmd); 5367 5255 phba->fc_stat.elsRcvADISC++; 5368 5256 if (vport->port_state < LPFC_DISC_AUTH) { 5369 5257 rjt_err = LSRJT_UNABLE_TPC;
+142 -1
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 30 30 #include <scsi/scsi_transport_fc.h> 31 31 32 32 #include "lpfc_hw.h" 33 + #include "lpfc_nl.h" 33 34 #include "lpfc_disc.h" 34 35 #include "lpfc_sli.h" 35 36 #include "lpfc_scsi.h" ··· 275 274 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 275 } 277 276 277 + /** 278 + * lpfc_alloc_fast_evt: Allocates data structure for posting event. 279 + * @phba: Pointer to hba context object. 280 + * 281 + * This function is called from the functions which need to post 282 + * events from interrupt context. This function allocates data 283 + * structure required for posting event. It also keeps track of 284 + * number of events pending and prevent event storm when there are 285 + * too many events. 286 + **/ 287 + struct lpfc_fast_path_event * 288 + lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 289 + struct lpfc_fast_path_event *ret; 290 + 291 + /* If there are lot of fast event do not exhaust memory due to this */ 292 + if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 293 + return NULL; 294 + 295 + ret = kzalloc(sizeof(struct lpfc_fast_path_event), 296 + GFP_ATOMIC); 297 + if (ret) 298 + atomic_inc(&phba->fast_event_count); 299 + INIT_LIST_HEAD(&ret->work_evt.evt_listp); 300 + ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 301 + return ret; 302 + } 303 + 304 + /** 305 + * lpfc_free_fast_evt: Frees event data structure. 306 + * @phba: Pointer to hba context object. 307 + * @evt: Event object which need to be freed. 308 + * 309 + * This function frees the data structure required for posting 310 + * events. 311 + **/ 312 + void 313 + lpfc_free_fast_evt(struct lpfc_hba *phba, 314 + struct lpfc_fast_path_event *evt) { 315 + 316 + atomic_dec(&phba->fast_event_count); 317 + kfree(evt); 318 + } 319 + 320 + /** 321 + * lpfc_send_fastpath_evt: Posts events generated from fast path. 322 + * @phba: Pointer to hba context object. 323 + * @evtp: Event data structure. 324 + * 325 + * This function is called from worker thread, when the interrupt 326 + * context need to post an event. This function posts the event 327 + * to fc transport netlink interface. 328 + **/ 329 + static void 330 + lpfc_send_fastpath_evt(struct lpfc_hba *phba, 331 + struct lpfc_work_evt *evtp) 332 + { 333 + unsigned long evt_category, evt_sub_category; 334 + struct lpfc_fast_path_event *fast_evt_data; 335 + char *evt_data; 336 + uint32_t evt_data_size; 337 + struct Scsi_Host *shost; 338 + 339 + fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 340 + work_evt); 341 + 342 + evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 343 + evt_sub_category = (unsigned long) fast_evt_data->un. 344 + fabric_evt.subcategory; 345 + shost = lpfc_shost_from_vport(fast_evt_data->vport); 346 + if (evt_category == FC_REG_FABRIC_EVENT) { 347 + if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 348 + evt_data = (char *) &fast_evt_data->un.read_check_error; 349 + evt_data_size = sizeof(fast_evt_data->un. 350 + read_check_error); 351 + } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 352 + (evt_sub_category == IOSTAT_NPORT_BSY)) { 353 + evt_data = (char *) &fast_evt_data->un.fabric_evt; 354 + evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 355 + } else { 356 + lpfc_free_fast_evt(phba, fast_evt_data); 357 + return; 358 + } 359 + } else if (evt_category == FC_REG_SCSI_EVENT) { 360 + switch (evt_sub_category) { 361 + case LPFC_EVENT_QFULL: 362 + case LPFC_EVENT_DEVBSY: 363 + evt_data = (char *) &fast_evt_data->un.scsi_evt; 364 + evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 365 + break; 366 + case LPFC_EVENT_CHECK_COND: 367 + evt_data = (char *) &fast_evt_data->un.check_cond_evt; 368 + evt_data_size = sizeof(fast_evt_data->un. 369 + check_cond_evt); 370 + break; 371 + case LPFC_EVENT_VARQUEDEPTH: 372 + evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 373 + evt_data_size = sizeof(fast_evt_data->un. 374 + queue_depth_evt); 375 + break; 376 + default: 377 + lpfc_free_fast_evt(phba, fast_evt_data); 378 + return; 379 + } 380 + } else { 381 + lpfc_free_fast_evt(phba, fast_evt_data); 382 + return; 383 + } 384 + 385 + fc_host_post_vendor_event(shost, 386 + fc_get_event_number(), 387 + evt_data_size, 388 + evt_data, 389 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 390 + 391 + lpfc_free_fast_evt(phba, fast_evt_data); 392 + return; 393 + } 394 + 278 395 static void 279 396 lpfc_work_list_done(struct lpfc_hba *phba) 280 397 { ··· 463 344 ? 0 : lpfc_sli_brdkill(phba); 464 345 lpfc_unblock_mgmt_io(phba); 465 346 complete((struct completion *)(evtp->evt_arg2)); 347 + break; 348 + case LPFC_EVT_FASTPATH_MGMT_EVT: 349 + lpfc_send_fastpath_evt(phba, evtp); 350 + free_evt = 0; 466 351 break; 467 352 } 468 353 if (free_evt) ··· 1723 1600 * sure to unblock any attached scsi devices 1724 1601 */ 1725 1602 lpfc_register_remote_port(vport, ndlp); 1603 + } 1604 + if ((new_state == NLP_STE_MAPPED_NODE) && 1605 + (vport->stat_data_enabled)) { 1606 + /* 1607 + * A new target is discovered, if there is no buffer for 1608 + * statistical data collection allocate buffer. 1609 + */ 1610 + ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 1611 + sizeof(struct lpfc_scsicmd_bkt), 1612 + GFP_KERNEL); 1613 + 1614 + if (!ndlp->lat_data) 1615 + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 1616 + "0286 lpfc_nlp_state_cleanup failed to " 1617 + "allocate statistical data buffer DID " 1618 + "0x%x\n", ndlp->nlp_DID); 1726 1619 } 1727 1620 /* 1728 1621 * if we added to Mapped list, but the remote port ··· 3168 3029 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3169 3030 3170 3031 /* free ndlp memory for final ndlp release */ 3171 - if (NLP_CHK_FREE_REQ(ndlp)) 3032 + if (NLP_CHK_FREE_REQ(ndlp)) { 3033 + kfree(ndlp->lat_data); 3172 3034 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3035 + } 3173 3036 } 3174 3037 3175 3038 /* This routine bumps the reference count for a ndlp structure to ensure
+22
drivers/scsi/lpfc/lpfc_init.c
··· 36 36 37 37 #include "lpfc_hw.h" 38 38 #include "lpfc_sli.h" 39 + #include "lpfc_nl.h" 39 40 #include "lpfc_disc.h" 40 41 #include "lpfc_scsi.h" 41 42 #include "lpfc.h" ··· 816 815 unsigned long temperature; 817 816 struct temp_event temp_event_data; 818 817 struct Scsi_Host *shost; 818 + struct lpfc_board_event_header board_event; 819 819 820 820 /* If the pci channel is offline, ignore possible errors, 821 821 * since we cannot communicate with the pci card anyway. */ ··· 825 823 /* If resets are disabled then leave the HBA alone and return */ 826 824 if (!phba->cfg_enable_hba_reset) 827 825 return; 826 + 827 + /* Send an internal error event to mgmt application */ 828 + board_event.event_type = FC_REG_BOARD_EVENT; 829 + board_event.subcategory = LPFC_EVENT_PORTINTERR; 830 + shost = lpfc_shost_from_vport(phba->pport); 831 + fc_host_post_vendor_event(shost, fc_get_event_number(), 832 + sizeof(board_event), 833 + (char *) &board_event, 834 + SCSI_NL_VID_TYPE_PCI 835 + | PCI_VENDOR_ID_EMULEX); 828 836 829 837 if (phba->work_hs & HS_FFER6) { 830 838 /* Re-establishing Link */ ··· 2357 2345 int i, hbq_count; 2358 2346 uint16_t iotag; 2359 2347 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2348 + struct lpfc_adapter_event_header adapter_event; 2360 2349 2361 2350 if (pci_enable_device_mem(pdev)) 2362 2351 goto out; ··· 2368 2355 if (!phba) 2369 2356 goto out_release_regions; 2370 2357 2358 + atomic_set(&phba->fast_event_count, 0); 2371 2359 spin_lock_init(&phba->hbalock); 2372 2360 2373 2361 /* Initialize ndlp management spinlock */ ··· 2640 2626 2641 2627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2642 2628 "0428 Perform SCSI scan\n"); 2629 + /* Send board arrival event to upper layer */ 2630 + adapter_event.event_type = FC_REG_ADAPTER_EVENT; 2631 + adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 2632 + fc_host_post_vendor_event(shost, fc_get_event_number(), 2633 + sizeof(adapter_event), 2634 + (char *) &adapter_event, 2635 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 2636 + 2643 2637 scsi_scan_host(shost); 2644 2638 2645 2639 return 0;
+1
drivers/scsi/lpfc/lpfc_mbox.c
··· 30 30 31 31 #include "lpfc_hw.h" 32 32 #include "lpfc_sli.h" 33 + #include "lpfc_nl.h" 33 34 #include "lpfc_disc.h" 34 35 #include "lpfc_scsi.h" 35 36 #include "lpfc.h"
+1
drivers/scsi/lpfc/lpfc_mem.c
··· 30 30 31 31 #include "lpfc_hw.h" 32 32 #include "lpfc_sli.h" 33 + #include "lpfc_nl.h" 33 34 #include "lpfc_disc.h" 34 35 #include "lpfc_scsi.h" 35 36 #include "lpfc.h"
+163
drivers/scsi/lpfc/lpfc_nl.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex Linux Device Driver for * 3 + * Fibre Channel Host Bus Adapters. * 4 + * Copyright (C) 2008 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + *******************************************************************/ 20 + 21 + /* Event definitions for RegisterForEvent */ 22 + #define FC_REG_LINK_EVENT 0x0001 /* link up / down events */ 23 + #define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */ 24 + #define FC_REG_CT_EVENT 0x0004 /* CT request events */ 25 + #define FC_REG_DUMP_EVENT 0x0008 /* Dump events */ 26 + #define FC_REG_TEMPERATURE_EVENT 0x0010 /* temperature events */ 27 + #define FC_REG_ELS_EVENT 0x0020 /* lpfc els events */ 28 + #define FC_REG_FABRIC_EVENT 0x0040 /* lpfc fabric events */ 29 + #define FC_REG_SCSI_EVENT 0x0080 /* lpfc scsi events */ 30 + #define FC_REG_BOARD_EVENT 0x0100 /* lpfc board events */ 31 + #define FC_REG_ADAPTER_EVENT 0x0200 /* lpfc adapter events */ 32 + #define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \ 33 + FC_REG_RSCN_EVENT | \ 34 + FC_REG_CT_EVENT | \ 35 + FC_REG_DUMP_EVENT | \ 36 + FC_REG_TEMPERATURE_EVENT | \ 37 + FC_REG_ELS_EVENT | \ 38 + FC_REG_FABRIC_EVENT | \ 39 + FC_REG_SCSI_EVENT | \ 40 + FC_REG_BOARD_EVENT | \ 41 + FC_REG_ADAPTER_EVENT) 42 + /* Temperature events */ 43 + #define LPFC_CRIT_TEMP 0x1 44 + #define LPFC_THRESHOLD_TEMP 0x2 45 + #define LPFC_NORMAL_TEMP 0x3 46 + /* 47 + * All net link event payloads will begin with and event type 48 + * and subcategory. The event type must come first. 49 + * The subcategory further defines the data that follows in the rest 50 + * of the payload. Each category will have its own unique header plus 51 + * any addtional data unique to the subcategory. 52 + * The payload sent via the fc transport is one-way driver->application. 53 + */ 54 + 55 + /* els event header */ 56 + struct lpfc_els_event_header { 57 + uint32_t event_type; 58 + uint32_t subcategory; 59 + uint8_t wwpn[8]; 60 + uint8_t wwnn[8]; 61 + }; 62 + 63 + /* subcategory codes for FC_REG_ELS_EVENT */ 64 + #define LPFC_EVENT_PLOGI_RCV 0x01 65 + #define LPFC_EVENT_PRLO_RCV 0x02 66 + #define LPFC_EVENT_ADISC_RCV 0x04 67 + #define LPFC_EVENT_LSRJT_RCV 0x08 68 + 69 + /* special els lsrjt event */ 70 + struct lpfc_lsrjt_event { 71 + struct lpfc_els_event_header header; 72 + uint32_t command; 73 + uint32_t reason_code; 74 + uint32_t explanation; 75 + }; 76 + 77 + 78 + /* fabric event header */ 79 + struct lpfc_fabric_event_header { 80 + uint32_t event_type; 81 + uint32_t subcategory; 82 + uint8_t wwpn[8]; 83 + uint8_t wwnn[8]; 84 + }; 85 + 86 + /* subcategory codes for FC_REG_FABRIC_EVENT */ 87 + #define LPFC_EVENT_FABRIC_BUSY 0x01 88 + #define LPFC_EVENT_PORT_BUSY 0x02 89 + #define LPFC_EVENT_FCPRDCHKERR 0x04 90 + 91 + /* special case fabric fcprdchkerr event */ 92 + struct lpfc_fcprdchkerr_event { 93 + struct lpfc_fabric_event_header header; 94 + uint32_t lun; 95 + uint32_t opcode; 96 + uint32_t fcpiparam; 97 + }; 98 + 99 + 100 + /* scsi event header */ 101 + struct lpfc_scsi_event_header { 102 + uint32_t event_type; 103 + uint32_t subcategory; 104 + uint32_t lun; 105 + uint8_t wwpn[8]; 106 + uint8_t wwnn[8]; 107 + }; 108 + 109 + /* subcategory codes for FC_REG_SCSI_EVENT */ 110 + #define LPFC_EVENT_QFULL 0x0001 111 + #define LPFC_EVENT_DEVBSY 0x0002 112 + #define LPFC_EVENT_CHECK_COND 0x0004 113 + #define LPFC_EVENT_LUNRESET 0x0008 114 + #define LPFC_EVENT_TGTRESET 0x0010 115 + #define LPFC_EVENT_BUSRESET 0x0020 116 + #define LPFC_EVENT_VARQUEDEPTH 0x0040 117 + 118 + /* special case scsi varqueuedepth event */ 119 + struct lpfc_scsi_varqueuedepth_event { 120 + struct lpfc_scsi_event_header scsi_event; 121 + uint32_t oldval; 122 + uint32_t newval; 123 + }; 124 + 125 + /* special case scsi check condition event */ 126 + struct lpfc_scsi_check_condition_event { 127 + struct lpfc_scsi_event_header scsi_event; 128 + uint8_t sense_key; 129 + uint8_t asc; 130 + uint8_t ascq; 131 + }; 132 + 133 + /* event codes for FC_REG_BOARD_EVENT */ 134 + #define LPFC_EVENT_PORTINTERR 0x01 135 + 136 + /* board event header */ 137 + struct lpfc_board_event_header { 138 + uint32_t event_type; 139 + uint32_t subcategory; 140 + }; 141 + 142 + 143 + /* event codes for FC_REG_ADAPTER_EVENT */ 144 + #define LPFC_EVENT_ARRIVAL 0x01 145 + 146 + /* adapter event header */ 147 + struct lpfc_adapter_event_header { 148 + uint32_t event_type; 149 + uint32_t subcategory; 150 + }; 151 + 152 + 153 + /* event codes for temp_event */ 154 + #define LPFC_CRIT_TEMP 0x1 155 + #define LPFC_THRESHOLD_TEMP 0x2 156 + #define LPFC_NORMAL_TEMP 0x3 157 + 158 + struct temp_event { 159 + uint32_t event_type; 160 + uint32_t event_code; 161 + uint32_t data; 162 + }; 163 +
+1
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 30 30 31 31 #include "lpfc_hw.h" 32 32 #include "lpfc_sli.h" 33 + #include "lpfc_nl.h" 33 34 #include "lpfc_disc.h" 34 35 #include "lpfc_scsi.h" 35 36 #include "lpfc.h"
+278 -1
drivers/scsi/lpfc/lpfc_scsi.c
··· 32 32 #include "lpfc_version.h" 33 33 #include "lpfc_hw.h" 34 34 #include "lpfc_sli.h" 35 + #include "lpfc_nl.h" 35 36 #include "lpfc_disc.h" 36 37 #include "lpfc_scsi.h" 37 38 #include "lpfc.h" ··· 42 41 43 42 #define LPFC_RESET_WAIT 2 44 43 #define LPFC_ABORT_WAIT 2 44 + 45 + /** 46 + * lpfc_update_stats: Update statistical data for the command completion. 47 + * @phba: Pointer to HBA object. 48 + * @lpfc_cmd: lpfc scsi command object pointer. 49 + * 50 + * This function is called when there is a command completion and this 51 + * function updates the statistical data for the command completion. 52 + **/ 53 + static void 54 + lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 55 + { 56 + struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 57 + struct lpfc_nodelist *pnode = rdata->pnode; 58 + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 59 + unsigned long flags; 60 + struct Scsi_Host *shost = cmd->device->host; 61 + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 62 + unsigned long latency; 63 + int i; 64 + 65 + if (cmd->result) 66 + return; 67 + 68 + spin_lock_irqsave(shost->host_lock, flags); 69 + if (!vport->stat_data_enabled || 70 + vport->stat_data_blocked || 71 + !pnode->lat_data || 72 + (phba->bucket_type == LPFC_NO_BUCKET)) { 73 + spin_unlock_irqrestore(shost->host_lock, flags); 74 + return; 75 + } 76 + latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time); 77 + 78 + if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 79 + i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 80 + phba->bucket_step; 81 + if (i >= LPFC_MAX_BUCKET_COUNT) 82 + i = LPFC_MAX_BUCKET_COUNT; 83 + } else { 84 + for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 85 + if (latency <= (phba->bucket_base + 86 + ((1<<i)*phba->bucket_step))) 87 + break; 88 + } 89 + 90 + pnode->lat_data[i].cmd_count++; 91 + spin_unlock_irqrestore(shost->host_lock, flags); 92 + } 93 + 94 + 95 + /** 96 + * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change 97 + * event. 98 + * @phba: Pointer to HBA context object. 99 + * @vport: Pointer to vport object. 100 + * @ndlp: Pointer to FC node associated with the target. 101 + * @lun: Lun number of the scsi device. 102 + * @old_val: Old value of the queue depth. 103 + * @new_val: New value of the queue depth. 104 + * 105 + * This function sends an event to the mgmt application indicating 106 + * there is a change in the scsi device queue depth. 107 + **/ 108 + static void 109 + lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, 110 + struct lpfc_vport *vport, 111 + struct lpfc_nodelist *ndlp, 112 + uint32_t lun, 113 + uint32_t old_val, 114 + uint32_t new_val) 115 + { 116 + struct lpfc_fast_path_event *fast_path_evt; 117 + unsigned long flags; 118 + 119 + fast_path_evt = lpfc_alloc_fast_evt(phba); 120 + if (!fast_path_evt) 121 + return; 122 + 123 + fast_path_evt->un.queue_depth_evt.scsi_event.event_type = 124 + FC_REG_SCSI_EVENT; 125 + fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = 126 + LPFC_EVENT_VARQUEDEPTH; 127 + 128 + /* Report all luns with change in queue depth */ 129 + fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; 130 + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 131 + memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, 132 + &ndlp->nlp_portname, sizeof(struct lpfc_name)); 133 + memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, 134 + &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 135 + } 136 + 137 + fast_path_evt->un.queue_depth_evt.oldval = old_val; 138 + fast_path_evt->un.queue_depth_evt.newval = new_val; 139 + fast_path_evt->vport = vport; 140 + 141 + fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 142 + spin_lock_irqsave(&phba->hbalock, flags); 143 + list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 144 + spin_unlock_irqrestore(&phba->hbalock, flags); 145 + lpfc_worker_wake_up(phba); 146 + 147 + return; 148 + } 45 149 46 150 /* 47 151 * This function is called with no lock held when there is a resource ··· 223 117 struct lpfc_vport **vports; 224 118 struct Scsi_Host *shost; 225 119 struct scsi_device *sdev; 226 - unsigned long new_queue_depth; 120 + unsigned long new_queue_depth, old_queue_depth; 227 121 unsigned long num_rsrc_err, num_cmd_success; 228 122 int i; 123 + struct lpfc_rport_data *rdata; 229 124 230 125 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 231 126 num_cmd_success = atomic_read(&phba->num_cmd_success); ··· 244 137 else 245 138 new_queue_depth = sdev->queue_depth - 246 139 new_queue_depth; 140 + old_queue_depth = sdev->queue_depth; 247 141 if (sdev->ordered_tags) 248 142 scsi_adjust_queue_depth(sdev, 249 143 MSG_ORDERED_TAG, ··· 253 145 scsi_adjust_queue_depth(sdev, 254 146 MSG_SIMPLE_TAG, 255 147 new_queue_depth); 148 + rdata = sdev->hostdata; 149 + if (rdata) 150 + lpfc_send_sdev_queuedepth_change_event( 151 + phba, vports[i], 152 + rdata->pnode, 153 + sdev->lun, old_queue_depth, 154 + new_queue_depth); 256 155 } 257 156 } 258 157 lpfc_destroy_vport_work_array(phba, vports); ··· 274 159 struct Scsi_Host *shost; 275 160 struct scsi_device *sdev; 276 161 int i; 162 + struct lpfc_rport_data *rdata; 277 163 278 164 vports = lpfc_create_vport_work_array(phba); 279 165 if (vports != NULL) ··· 292 176 scsi_adjust_queue_depth(sdev, 293 177 MSG_SIMPLE_TAG, 294 178 sdev->queue_depth+1); 179 + rdata = sdev->hostdata; 180 + if (rdata) 181 + lpfc_send_sdev_queuedepth_change_event( 182 + phba, vports[i], 183 + rdata->pnode, 184 + sdev->lun, 185 + sdev->queue_depth - 1, 186 + sdev->queue_depth); 295 187 } 296 188 } 297 189 lpfc_destroy_vport_work_array(phba, vports); ··· 590 466 return 0; 591 467 } 592 468 469 + /** 470 + * lpfc_send_scsi_error_event: Posts an event when there is SCSI error. 471 + * @phba: Pointer to hba context object. 472 + * @vport: Pointer to vport object. 473 + * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 474 + * @rsp_iocb: Pointer to response iocb object which reported error. 475 + * 476 + * This function posts an event when there is a SCSI command reporting 477 + * error from the scsi device. 478 + **/ 479 + static void 480 + lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 481 + struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { 482 + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 483 + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 484 + uint32_t resp_info = fcprsp->rspStatus2; 485 + uint32_t scsi_status = fcprsp->rspStatus3; 486 + uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 487 + struct lpfc_fast_path_event *fast_path_evt = NULL; 488 + struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 489 + unsigned long flags; 490 + 491 + /* If there is queuefull or busy condition send a scsi event */ 492 + if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 493 + (cmnd->result == SAM_STAT_BUSY)) { 494 + fast_path_evt = lpfc_alloc_fast_evt(phba); 495 + if (!fast_path_evt) 496 + return; 497 + fast_path_evt->un.scsi_evt.event_type = 498 + FC_REG_SCSI_EVENT; 499 + fast_path_evt->un.scsi_evt.subcategory = 500 + (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 501 + LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 502 + fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 503 + memcpy(&fast_path_evt->un.scsi_evt.wwpn, 504 + &pnode->nlp_portname, sizeof(struct lpfc_name)); 505 + memcpy(&fast_path_evt->un.scsi_evt.wwnn, 506 + &pnode->nlp_nodename, sizeof(struct lpfc_name)); 507 + } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 508 + ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 509 + fast_path_evt = lpfc_alloc_fast_evt(phba); 510 + if (!fast_path_evt) 511 + return; 512 + fast_path_evt->un.check_cond_evt.scsi_event.event_type = 513 + FC_REG_SCSI_EVENT; 514 + fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 515 + LPFC_EVENT_CHECK_COND; 516 + fast_path_evt->un.check_cond_evt.scsi_event.lun = 517 + cmnd->device->lun; 518 + memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 519 + &pnode->nlp_portname, sizeof(struct lpfc_name)); 520 + memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 521 + &pnode->nlp_nodename, sizeof(struct lpfc_name)); 522 + fast_path_evt->un.check_cond_evt.sense_key = 523 + cmnd->sense_buffer[2] & 0xf; 524 + fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 525 + fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 526 + } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 527 + fcpi_parm && 528 + ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 529 + ((scsi_status == SAM_STAT_GOOD) && 530 + !(resp_info & (RESID_UNDER | RESID_OVER))))) { 531 + /* 532 + * If status is good or resid does not match with fcp_param and 533 + * there is valid fcpi_parm, then there is a read_check error 534 + */ 535 + fast_path_evt = lpfc_alloc_fast_evt(phba); 536 + if (!fast_path_evt) 537 + return; 538 + fast_path_evt->un.read_check_error.header.event_type = 539 + FC_REG_FABRIC_EVENT; 540 + fast_path_evt->un.read_check_error.header.subcategory = 541 + LPFC_EVENT_FCPRDCHKERR; 542 + memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 543 + &pnode->nlp_portname, sizeof(struct lpfc_name)); 544 + memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 545 + &pnode->nlp_nodename, sizeof(struct lpfc_name)); 546 + fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 547 + fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 548 + fast_path_evt->un.read_check_error.fcpiparam = 549 + fcpi_parm; 550 + } else 551 + return; 552 + 553 + fast_path_evt->vport = vport; 554 + spin_lock_irqsave(&phba->hbalock, flags); 555 + list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 556 + spin_unlock_irqrestore(&phba->hbalock, flags); 557 + lpfc_worker_wake_up(phba); 558 + return; 559 + } 593 560 static void 594 561 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 595 562 { ··· 708 493 uint32_t host_status = DID_OK; 709 494 uint32_t rsplen = 0; 710 495 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 496 + 711 497 712 498 /* 713 499 * If this is a task management command, there is no ··· 825 609 826 610 out: 827 611 cmnd->result = ScsiResult(host_status, scsi_status); 612 + lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 828 613 } 829 614 830 615 static void ··· 842 625 struct scsi_device *sdev, *tmp_sdev; 843 626 int depth = 0; 844 627 unsigned long flags; 628 + struct lpfc_fast_path_event *fast_path_evt; 845 629 846 630 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 847 631 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; ··· 873 655 case IOSTAT_NPORT_BSY: 874 656 case IOSTAT_FABRIC_BSY: 875 657 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 658 + fast_path_evt = lpfc_alloc_fast_evt(phba); 659 + if (!fast_path_evt) 660 + break; 661 + fast_path_evt->un.fabric_evt.event_type = 662 + FC_REG_FABRIC_EVENT; 663 + fast_path_evt->un.fabric_evt.subcategory = 664 + (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 665 + LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 666 + if (pnode && NLP_CHK_NODE_ACT(pnode)) { 667 + memcpy(&fast_path_evt->un.fabric_evt.wwpn, 668 + &pnode->nlp_portname, 669 + sizeof(struct lpfc_name)); 670 + memcpy(&fast_path_evt->un.fabric_evt.wwnn, 671 + &pnode->nlp_nodename, 672 + sizeof(struct lpfc_name)); 673 + } 674 + fast_path_evt->vport = vport; 675 + fast_path_evt->work_evt.evt = 676 + LPFC_EVT_FASTPATH_MGMT_EVT; 677 + spin_lock_irqsave(&phba->hbalock, flags); 678 + list_add_tail(&fast_path_evt->work_evt.evt_listp, 679 + &phba->work_list); 680 + spin_unlock_irqrestore(&phba->hbalock, flags); 681 + lpfc_worker_wake_up(phba); 876 682 break; 877 683 case IOSTAT_LOCAL_REJECT: 878 684 if (lpfc_cmd->result == IOERR_INVALID_RPI || ··· 929 687 scsi_get_resid(cmd)); 930 688 } 931 689 690 + lpfc_update_stats(phba, lpfc_cmd); 932 691 result = cmd->result; 933 692 sdev = cmd->device; 934 693 if (vport->cfg_max_scsicmpl_time && ··· 998 755 pnode->last_ramp_up_time = jiffies; 999 756 } 1000 757 } 758 + lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode, 759 + 0xFFFFFFFF, 760 + sdev->queue_depth - 1, sdev->queue_depth); 1001 761 } 1002 762 1003 763 /* ··· 1030 784 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1031 785 "0711 detected queue full - lun queue " 1032 786 "depth adjusted to %d.\n", depth); 787 + lpfc_send_sdev_queuedepth_change_event(phba, vport, 788 + pnode, 0xFFFFFFFF, 789 + depth+1, depth); 1033 790 } 1034 791 } 1035 792 ··· 1361 1112 goto out_host_busy; 1362 1113 } 1363 1114 1115 + lpfc_cmd->start_time = jiffies; 1364 1116 /* 1365 1117 * Store the midlayer's command structure for the completion phase 1366 1118 * and complete the command initialization. ··· 1530 1280 int ret = SUCCESS; 1531 1281 int status; 1532 1282 int cnt; 1283 + struct lpfc_scsi_event_header scsi_event; 1533 1284 1534 1285 lpfc_block_error_handler(cmnd); 1535 1286 /* ··· 1549 1298 break; 1550 1299 pnode = rdata->pnode; 1551 1300 } 1301 + 1302 + scsi_event.event_type = FC_REG_SCSI_EVENT; 1303 + scsi_event.subcategory = LPFC_EVENT_TGTRESET; 1304 + scsi_event.lun = 0; 1305 + memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 1306 + memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 1307 + 1308 + fc_host_post_vendor_event(shost, 1309 + fc_get_event_number(), 1310 + sizeof(scsi_event), 1311 + (char *)&scsi_event, 1312 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1313 + 1552 1314 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1553 1315 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1554 1316 "0721 LUN Reset rport " ··· 1645 1381 int cnt; 1646 1382 struct lpfc_scsi_buf * lpfc_cmd; 1647 1383 unsigned long later; 1384 + struct lpfc_scsi_event_header scsi_event; 1385 + 1386 + scsi_event.event_type = FC_REG_SCSI_EVENT; 1387 + scsi_event.subcategory = LPFC_EVENT_BUSRESET; 1388 + scsi_event.lun = 0; 1389 + memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 1390 + memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 1391 + 1392 + fc_host_post_vendor_event(shost, 1393 + fc_get_event_number(), 1394 + sizeof(scsi_event), 1395 + (char *)&scsi_event, 1396 + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1648 1397 1649 1398 lpfc_block_error_handler(cmnd); 1650 1399 /*
+4
drivers/scsi/lpfc/lpfc_scsi.h
··· 107 107 108 108 }; 109 109 110 + struct lpfc_scsicmd_bkt { 111 + uint32_t cmd_count; 112 + }; 113 + 110 114 struct lpfc_scsi_buf { 111 115 struct list_head list; 112 116 struct scsi_cmnd *pCmd;
+12
drivers/scsi/lpfc/lpfc_sli.c
··· 32 32 33 33 #include "lpfc_hw.h" 34 34 #include "lpfc_sli.h" 35 + #include "lpfc_nl.h" 35 36 #include "lpfc_disc.h" 36 37 #include "lpfc_scsi.h" 37 38 #include "lpfc.h" ··· 1611 1610 1612 1611 if (cmdiocbp) { 1613 1612 if (cmdiocbp->iocb_cmpl) { 1613 + /* 1614 + * If an ELS command failed send an event to mgmt 1615 + * application. 1616 + */ 1617 + if (saveq->iocb.ulpStatus && 1618 + (pring->ringno == LPFC_ELS_RING) && 1619 + (cmdiocbp->iocb.ulpCommand == 1620 + CMD_ELS_REQUEST64_CR)) 1621 + lpfc_send_els_failure_event(phba, 1622 + cmdiocbp, saveq); 1623 + 1614 1624 /* 1615 1625 * Post all ELS completions to the worker thread. 1616 1626 * All other are passed to the completion callback.
+80
drivers/scsi/lpfc/lpfc_vport.c
··· 34 34 #include <scsi/scsi_transport_fc.h> 35 35 #include "lpfc_hw.h" 36 36 #include "lpfc_sli.h" 37 + #include "lpfc_nl.h" 37 38 #include "lpfc_disc.h" 38 39 #include "lpfc_scsi.h" 39 40 #include "lpfc.h" ··· 745 744 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 746 745 scsi_host_put(lpfc_shost_from_vport(vports[i])); 747 746 kfree(vports); 747 + } 748 + 749 + 750 + /** 751 + * lpfc_vport_reset_stat_data: Reset the statistical data for the vport. 752 + * @vport: Pointer to vport object. 753 + * 754 + * This function resets the statistical data for the vport. This function 755 + * is called with the host_lock held 756 + **/ 757 + void 758 + lpfc_vport_reset_stat_data(struct lpfc_vport *vport) 759 + { 760 + struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 761 + 762 + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 763 + if (!NLP_CHK_NODE_ACT(ndlp)) 764 + continue; 765 + if (ndlp->lat_data) 766 + memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT * 767 + sizeof(struct lpfc_scsicmd_bkt)); 768 + } 769 + } 770 + 771 + 772 + /** 773 + * lpfc_alloc_bucket: Allocate data buffer required for collecting 774 + * statistical data. 775 + * @vport: Pointer to vport object. 776 + * 777 + * This function allocates data buffer required for all the FC 778 + * nodes of the vport to collect statistical data. 779 + **/ 780 + void 781 + lpfc_alloc_bucket(struct lpfc_vport *vport) 782 + { 783 + struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 784 + 785 + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 786 + if (!NLP_CHK_NODE_ACT(ndlp)) 787 + continue; 788 + 789 + kfree(ndlp->lat_data); 790 + ndlp->lat_data = NULL; 791 + 792 + if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 793 + ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 794 + sizeof(struct lpfc_scsicmd_bkt), 795 + GFP_ATOMIC); 796 + 797 + if (!ndlp->lat_data) 798 + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 799 + "0287 lpfc_alloc_bucket failed to " 800 + "allocate statistical data buffer DID " 801 + "0x%x\n", ndlp->nlp_DID); 802 + } 803 + } 804 + } 805 + 806 + /** 807 + * lpfc_free_bucket: Free data buffer required for collecting 808 + * statistical data. 809 + * @vport: Pointer to vport object. 810 + * 811 + * Th function frees statistical data buffer of all the FC 812 + * nodes of the vport. 813 + **/ 814 + void 815 + lpfc_free_bucket(struct lpfc_vport *vport) 816 + { 817 + struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 818 + 819 + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 820 + if (!NLP_CHK_NODE_ACT(ndlp)) 821 + continue; 822 + 823 + kfree(ndlp->lat_data); 824 + ndlp->lat_data = NULL; 825 + } 748 826 }
+4
drivers/scsi/lpfc/lpfc_vport.h
··· 112 112 void lpfc_vport_set_state(struct lpfc_vport *vport, 113 113 enum fc_vport_state new_state); 114 114 115 + void lpfc_vport_reset_stat_data(struct lpfc_vport *); 116 + void lpfc_alloc_bucket(struct lpfc_vport *); 117 + void lpfc_free_bucket(struct lpfc_vport *); 118 + 115 119 #endif /* H_LPFC_VPORT */