[SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

James Smart and committed by
James Bottomley
ec2087a7 14660f4f

+27 -6
+4 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 4013 # For [0], FCP commands are issued to Work Queues ina round robin fashion. 4014 # For [1], FCP commands are issued to a Work Queue associated with the 4015 # current CPU. 4016 */ 4017 - LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " 4018 "issuing commands [0] - Round Robin, [1] - Current CPU"); 4019 4020 /*
··· 4013 # For [0], FCP commands are issued to Work Queues ina round robin fashion. 4014 # For [1], FCP commands are issued to a Work Queue associated with the 4015 # current CPU. 4016 + # It would be set to 1 by the driver if it's able to set up cpu affinity 4017 + # for FCP I/Os through Work Queue associated with the current CPU. Otherwise, 4018 + # roundrobin scheduling of FCP I/Os through WQs will be used. 4019 */ 4020 + LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " 4021 "issuing commands [0] - Round Robin, [1] - Current CPU"); 4022 4023 /*
+23 -5
drivers/scsi/lpfc/lpfc_init.c
··· 8399 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8400 { 8401 int i, idx, saved_chann, used_chann, cpu, phys_id; 8402 - int max_phys_id, num_io_channel, first_cpu; 8403 struct lpfc_vector_map_info *cpup; 8404 #ifdef CONFIG_X86 8405 struct cpuinfo_x86 *cpuinfo; ··· 8418 phba->sli4_hba.num_present_cpu)); 8419 8420 max_phys_id = 0; 8421 phys_id = 0; 8422 num_io_channel = 0; 8423 first_cpu = LPFC_VECTOR_MAP_EMPTY; ··· 8442 8443 if (cpup->phys_id > max_phys_id) 8444 max_phys_id = cpup->phys_id; 8445 cpup++; 8446 } 8447 8448 /* Now associate the HBA vectors with specific CPUs */ 8449 for (idx = 0; idx < vectors; idx++) { 8450 cpup = phba->sli4_hba.cpu_map; ··· 8458 for (i = 1; i < max_phys_id; i++) { 8459 phys_id++; 8460 if (phys_id > max_phys_id) 8461 - phys_id = 0; 8462 cpu = lpfc_find_next_cpu(phba, phys_id); 8463 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8464 continue; 8465 goto found; 8466 } 8467 8468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 8514 /* Spread vector mapping across multple physical CPU nodes */ 8515 phys_id++; 8516 if (phys_id > max_phys_id) 8517 - phys_id = 0; 8518 } 8519 8520 /* ··· 8524 * Base the remaining IO channel assigned, to IO channels already 8525 * assigned to other CPUs on the same phys_id. 8526 */ 8527 - for (i = 0; i <= max_phys_id; i++) { 8528 /* 8529 * If there are no io channels already mapped to 8530 * this phys_id, just round robin thru the io_channels. ··· 8606 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8608 "3333 Set affinity mismatch:" 8609 - "%d chann != %d cpus: %d vactors\n", 8610 num_io_channel, phba->sli4_hba.num_present_cpu, 8611 vectors); 8612 8613 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8614 return 1; 8615 }
··· 8399 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8400 { 8401 int i, idx, saved_chann, used_chann, cpu, phys_id; 8402 + int max_phys_id, min_phys_id; 8403 + int num_io_channel, first_cpu, chan; 8404 struct lpfc_vector_map_info *cpup; 8405 #ifdef CONFIG_X86 8406 struct cpuinfo_x86 *cpuinfo; ··· 8417 phba->sli4_hba.num_present_cpu)); 8418 8419 max_phys_id = 0; 8420 + min_phys_id = 0xff; 8421 phys_id = 0; 8422 num_io_channel = 0; 8423 first_cpu = LPFC_VECTOR_MAP_EMPTY; ··· 8440 8441 if (cpup->phys_id > max_phys_id) 8442 max_phys_id = cpup->phys_id; 8443 + if (cpup->phys_id < min_phys_id) 8444 + min_phys_id = cpup->phys_id; 8445 cpup++; 8446 } 8447 8448 + phys_id = min_phys_id; 8449 /* Now associate the HBA vectors with specific CPUs */ 8450 for (idx = 0; idx < vectors; idx++) { 8451 cpup = phba->sli4_hba.cpu_map; ··· 8453 for (i = 1; i < max_phys_id; i++) { 8454 phys_id++; 8455 if (phys_id > max_phys_id) 8456 + phys_id = min_phys_id; 8457 cpu = lpfc_find_next_cpu(phba, phys_id); 8458 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8459 continue; 8460 goto found; 8461 + } 8462 + 8463 + /* Use round robin for scheduling */ 8464 + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8465 + chan = 0; 8466 + cpup = phba->sli4_hba.cpu_map; 8467 + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8468 + cpup->channel_id = chan; 8469 + cpup++; 8470 + chan++; 8471 + if (chan >= phba->cfg_fcp_io_channel) 8472 + chan = 0; 8473 } 8474 8475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 8497 /* Spread vector mapping across multple physical CPU nodes */ 8498 phys_id++; 8499 if (phys_id > max_phys_id) 8500 + phys_id = min_phys_id; 8501 } 8502 8503 /* ··· 8507 * Base the remaining IO channel assigned, to IO channels already 8508 * assigned to other CPUs on the same phys_id. 8509 */ 8510 + for (i = min_phys_id; i <= max_phys_id; i++) { 8511 /* 8512 * If there are no io channels already mapped to 8513 * this phys_id, just round robin thru the io_channels. ··· 8589 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8591 "3333 Set affinity mismatch:" 8592 + "%d chann != %d cpus: %d vectors\n", 8593 num_io_channel, phba->sli4_hba.num_present_cpu, 8594 vectors); 8595 8596 + /* Enable using cpu affinity for scheduling */ 8597 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8598 return 1; 8599 }