Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: csiostor: switch to pci_alloc_irq_vectors

And get automatic MSI-X affinity for free.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Varun Prakash <varun@chelsio.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Christoph Hellwig and committed by
Martin K. Petersen
104d9c7f 75106523

+47 -82
-1
drivers/scsi/csiostor/csio_hw.h
··· 95 95 }; 96 96 97 97 struct csio_msix_entries { 98 - unsigned short vector; /* Assigned MSI-X vector */ 99 98 void *dev_id; /* Priv object associated w/ this msix*/ 100 99 char desc[24]; /* Description of this vector */ 101 100 };
+47 -81
drivers/scsi/csiostor/csio_isr.c
··· 383 383 int rv, i, j, k = 0; 384 384 struct csio_msix_entries *entryp = &hw->msix_entries[0]; 385 385 struct csio_scsi_cpu_info *info; 386 + struct pci_dev *pdev = hw->pdev; 386 387 387 388 if (hw->intr_mode != CSIO_IM_MSIX) { 388 - rv = request_irq(hw->pdev->irq, csio_fcoe_isr, 389 - (hw->intr_mode == CSIO_IM_MSI) ? 390 - 0 : IRQF_SHARED, 391 - KBUILD_MODNAME, hw); 389 + rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr, 390 + hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED, 391 + KBUILD_MODNAME, hw); 392 392 if (rv) { 393 - if (hw->intr_mode == CSIO_IM_MSI) 394 - pci_disable_msi(hw->pdev); 395 393 csio_err(hw, "Failed to allocate interrupt line.\n"); 396 - return -EINVAL; 394 + goto out_free_irqs; 397 395 } 398 396 399 397 goto out; ··· 400 402 /* Add the MSIX vector descriptions */ 401 403 csio_add_msix_desc(hw); 402 404 403 - rv = request_irq(entryp[k].vector, csio_nondata_isr, 0, 405 + rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0, 404 406 entryp[k].desc, hw); 405 407 if (rv) { 406 408 csio_err(hw, "IRQ request failed for vec %d err:%d\n", 407 - entryp[k].vector, rv); 408 - goto err; 409 + pci_irq_vector(pdev, k), rv); 410 + goto out_free_irqs; 409 411 } 410 412 411 - entryp[k++].dev_id = (void *)hw; 413 + entryp[k++].dev_id = hw; 412 414 413 - rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0, 415 + rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0, 414 416 entryp[k].desc, hw); 415 417 if (rv) { 416 418 csio_err(hw, "IRQ request failed for vec %d err:%d\n", 417 - entryp[k].vector, rv); 418 - goto err; 419 + pci_irq_vector(pdev, k), rv); 420 + goto out_free_irqs; 419 421 } 420 422 421 423 entryp[k++].dev_id = (void *)hw; ··· 427 429 struct csio_scsi_qset *sqset = &hw->sqset[i][j]; 428 430 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; 429 431 430 - rv = request_irq(entryp[k].vector, csio_scsi_isr, 0, 432 + rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0, 431 433 entryp[k].desc, q); 432 434 if (rv) { 433 435 csio_err(hw, 434 436 "IRQ request failed for vec %d err:%d\n", 435 - entryp[k].vector, rv); 436 - goto err; 437 + pci_irq_vector(pdev, k), rv); 438 + goto out_free_irqs; 437 439 } 438 440 439 - entryp[k].dev_id = (void *)q; 441 + entryp[k].dev_id = q; 440 442 441 443 } /* for all scsi cpus */ 442 444 } /* for all ports */ 443 445 444 446 out: 445 447 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; 446 - 447 448 return 0; 448 449 449 - err: 450 - for (i = 0; i < k; i++) { 451 - entryp = &hw->msix_entries[i]; 452 - free_irq(entryp->vector, entryp->dev_id); 453 - } 454 - pci_disable_msix(hw->pdev); 455 - 450 + out_free_irqs: 451 + for (i = 0; i < k; i++) 452 + free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id); 453 + pci_free_irq_vectors(hw->pdev); 456 454 return -EINVAL; 457 - } 458 - 459 - static void 460 - csio_disable_msix(struct csio_hw *hw, bool free) 461 - { 462 - int i; 463 - struct csio_msix_entries *entryp; 464 - int cnt = hw->num_sqsets + CSIO_EXTRA_VECS; 465 - 466 - if (free) { 467 - for (i = 0; i < cnt; i++) { 468 - entryp = &hw->msix_entries[i]; 469 - free_irq(entryp->vector, entryp->dev_id); 470 - } 471 - } 472 - pci_disable_msix(hw->pdev); 473 455 } 474 456 475 457 /* Reduce per-port max possible CPUs */ ··· 478 500 csio_enable_msix(struct csio_hw *hw) 479 501 { 480 502 int i, j, k, n, min, cnt; 481 - struct csio_msix_entries *entryp; 482 - struct msix_entry *entries; 483 503 int extra = CSIO_EXTRA_VECS; 484 504 struct csio_scsi_cpu_info *info; 505 + struct irq_affinity desc = { .pre_vectors = 2 }; 485 506 486 507 min = hw->num_pports + extra; 487 508 cnt = hw->num_sqsets + extra; ··· 489 512 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) 490 513 cnt = min_t(uint8_t, hw->cfg_niq, cnt); 491 514 492 - entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL); 493 - if (!entries) 494 - return -ENOMEM; 495 - 496 - for (i = 0; i < cnt; i++) 497 - entries[i].entry = (uint16_t)i; 498 - 499 515 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); 500 516 501 - cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); 502 - if (cnt < 0) { 503 - kfree(entries); 517 + cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt, 518 + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); 519 + if (cnt < 0) 504 520 return cnt; 505 - } 506 521 507 522 if (cnt < (hw->num_sqsets + extra)) { 508 523 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); 509 524 csio_reduce_sqsets(hw, cnt - extra); 510 525 } 511 526 512 - /* Save off vectors */ 513 - for (i = 0; i < cnt; i++) { 514 - entryp = &hw->msix_entries[i]; 515 - entryp->vector = entries[i].vector; 516 - } 517 - 518 527 /* Distribute vectors */ 519 528 k = 0; 520 - csio_set_nondata_intr_idx(hw, entries[k].entry); 521 - csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); 522 - csio_set_fwevt_intr_idx(hw, entries[k++].entry); 529 + csio_set_nondata_intr_idx(hw, k); 530 + csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++); 531 + csio_set_fwevt_intr_idx(hw, k++); 523 532 524 533 for (i = 0; i < hw->num_pports; i++) { 525 534 info = &hw->scsi_cpu_info[i]; 526 535 527 536 for (j = 0; j < hw->num_scsi_msix_cpus; j++) { 528 537 n = (j % info->max_cpus) + k; 529 - hw->sqset[i][j].intr_idx = entries[n].entry; 538 + hw->sqset[i][j].intr_idx = n; 530 539 } 531 540 532 541 k += info->max_cpus; 533 542 } 534 543 535 - kfree(entries); 536 544 return 0; 537 545 } 538 546 ··· 559 597 { 560 598 csio_hw_intr_disable(hw); 561 599 562 - switch (hw->intr_mode) { 563 - case CSIO_IM_MSIX: 564 - csio_disable_msix(hw, free); 565 - break; 566 - case CSIO_IM_MSI: 567 - if (free) 568 - free_irq(hw->pdev->irq, hw); 569 - pci_disable_msi(hw->pdev); 570 - break; 571 - case CSIO_IM_INTX: 572 - if (free) 573 - free_irq(hw->pdev->irq, hw); 574 - break; 575 - default: 576 - break; 600 + if (free) { 601 + int i; 602 + 603 + switch (hw->intr_mode) { 604 + case CSIO_IM_MSIX: 605 + for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) { 606 + free_irq(pci_irq_vector(hw->pdev, i), 607 + hw->msix_entries[i].dev_id); 608 + } 609 + break; 610 + case CSIO_IM_MSI: 611 + case CSIO_IM_INTX: 612 + free_irq(pci_irq_vector(hw->pdev, 0), hw); 613 + break; 614 + default: 615 + break; 616 + } 577 617 } 618 + 619 + pci_free_irq_vectors(hw->pdev); 578 620 hw->intr_mode = CSIO_IM_NONE; 579 621 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; 580 622 }