[PATCH] pcnet32: Handle memory allocation failures cleanly when resizing tx/rx rings

Fix pcnet32_set_ringparam to handle memory allocation errors without
leaving the adapter in an inoperative state and null pointers waiting to
be dereferenced.

Tested ia32 and ppc64.

Signed-off-by: Don Fry <brazilnut@us.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by Don Fry and committed by Jeff Garzik 06c87850 12fa30f3

+251 -24
+251 -24
drivers/net/pcnet32.c
··· 185 186 #define PCNET32_TOTAL_SIZE 0x20 187 188 /* The PCNET32 Rx and Tx ring descriptors. */ 189 struct pcnet32_rx_head { 190 u32 base; ··· 432 .reset = pcnet32_dwio_reset 433 }; 434 435 #ifdef CONFIG_NET_POLL_CONTROLLER 436 static void pcnet32_poll_controller(struct net_device *dev) 437 { ··· 756 { 757 struct pcnet32_private *lp = dev->priv; 758 unsigned long flags; 759 int i; 760 761 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 762 return -EINVAL; 763 764 if (netif_running(dev)) 765 - pcnet32_close(dev); 766 767 spin_lock_irqsave(&lp->lock, flags); 768 - pcnet32_free_ring(dev); 769 - lp->tx_ring_size = 770 - min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 771 - lp->rx_ring_size = 772 - min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); 773 774 /* set the minimum ring size to 4, to allow the loopback test to work 775 * unchanged. 776 */ 777 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 778 - if (lp->tx_ring_size <= (1 << i)) 779 break; 780 } 781 - lp->tx_ring_size = (1 << i); 782 - lp->tx_mod_mask = lp->tx_ring_size - 1; 783 - lp->tx_len_bits = (i << 12); 784 - 785 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 786 - if (lp->rx_ring_size <= (1 << i)) 787 break; 788 } 789 - lp->rx_ring_size = (1 << i); 790 - lp->rx_mod_mask = lp->rx_ring_size - 1; 791 - lp->rx_len_bits = (i << 4); 792 793 - if (pcnet32_alloc_ring(dev, dev->name)) { 794 - pcnet32_free_ring(dev); 795 - spin_unlock_irqrestore(&lp->lock, flags); 796 - return -ENOMEM; 797 } 798 799 spin_unlock_irqrestore(&lp->lock, flags); 800 801 - if (pcnet32_debug & NETIF_MSG_DRV) 802 - printk(KERN_INFO PFX 803 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, 804 lp->rx_ring_size, lp->tx_ring_size); 805 - 806 - if (netif_running(dev)) 807 - pcnet32_open(dev); 808 809 return 0; 810 }
··· 185 186 #define PCNET32_TOTAL_SIZE 0x20 187 188 + #define CSR0 0 189 + #define CSR0_INIT 0x1 190 + #define CSR0_START 0x2 191 + #define CSR0_STOP 0x4 192 + #define CSR0_TXPOLL 0x8 193 + #define CSR0_INTEN 0x40 194 + #define CSR0_IDON 0x0100 195 + #define CSR0_NORMAL (CSR0_START | CSR0_INTEN) 196 + #define PCNET32_INIT_LOW 1 197 + #define PCNET32_INIT_HIGH 2 198 + #define CSR3 3 199 + #define CSR4 4 200 + #define CSR5 5 201 + #define CSR5_SUSPEND 0x0001 202 + #define CSR15 15 203 + #define PCNET32_MC_FILTER 8 204 + 205 /* The PCNET32 Rx and Tx ring descriptors. */ 206 struct pcnet32_rx_head { 207 u32 base; ··· 415 .reset = pcnet32_dwio_reset 416 }; 417 418 + static void pcnet32_netif_stop(struct net_device *dev) 419 + { 420 + dev->trans_start = jiffies; 421 + netif_poll_disable(dev); 422 + netif_tx_disable(dev); 423 + } 424 + 425 + static void pcnet32_netif_start(struct net_device *dev) 426 + { 427 + netif_wake_queue(dev); 428 + netif_poll_enable(dev); 429 + } 430 + 431 + /* 432 + * Allocate space for the new sized tx ring. 433 + * Free old resources 434 + * Save new resources. 435 + * Any failure keeps old resources. 436 + * Must be called with lp->lock held. 437 + */ 438 + static void pcnet32_realloc_tx_ring(struct net_device *dev, 439 + struct pcnet32_private *lp, 440 + unsigned int size) 441 + { 442 + dma_addr_t new_ring_dma_addr; 443 + dma_addr_t *new_dma_addr_list; 444 + struct pcnet32_tx_head *new_tx_ring; 445 + struct sk_buff **new_skb_list; 446 + 447 + pcnet32_purge_tx_ring(dev); 448 + 449 + new_tx_ring = pci_alloc_consistent(lp->pci_dev, 450 + sizeof(struct pcnet32_tx_head) * 451 + (1 << size), 452 + &new_ring_dma_addr); 453 + if (new_tx_ring == NULL) { 454 + if (netif_msg_drv(lp)) 455 + printk("\n" KERN_ERR 456 + "%s: Consistent memory allocation failed.\n", 457 + dev->name); 458 + return; 459 + } 460 + memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); 461 + 462 + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), 463 + GFP_ATOMIC); 464 + if (!new_dma_addr_list) { 465 + if (netif_msg_drv(lp)) 466 + printk("\n" KERN_ERR 467 + "%s: Memory allocation failed.\n", dev->name); 468 + goto free_new_tx_ring; 469 + } 470 + 471 + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), 472 + GFP_ATOMIC); 473 + if (!new_skb_list) { 474 + if (netif_msg_drv(lp)) 475 + printk("\n" KERN_ERR 476 + "%s: Memory allocation failed.\n", dev->name); 477 + goto free_new_lists; 478 + } 479 + 480 + kfree(lp->tx_skbuff); 481 + kfree(lp->tx_dma_addr); 482 + pci_free_consistent(lp->pci_dev, 483 + sizeof(struct pcnet32_tx_head) * 484 + lp->tx_ring_size, lp->tx_ring, 485 + lp->tx_ring_dma_addr); 486 + 487 + lp->tx_ring_size = (1 << size); 488 + lp->tx_mod_mask = lp->tx_ring_size - 1; 489 + lp->tx_len_bits = (size << 12); 490 + lp->tx_ring = new_tx_ring; 491 + lp->tx_ring_dma_addr = new_ring_dma_addr; 492 + lp->tx_dma_addr = new_dma_addr_list; 493 + lp->tx_skbuff = new_skb_list; 494 + return; 495 + 496 + free_new_lists: 497 + kfree(new_dma_addr_list); 498 + free_new_tx_ring: 499 + pci_free_consistent(lp->pci_dev, 500 + sizeof(struct pcnet32_tx_head) * 501 + (1 << size), 502 + new_tx_ring, 503 + new_ring_dma_addr); 504 + return; 505 + } 506 + 507 + /* 508 + * Allocate space for the new sized rx ring. 509 + * Re-use old receive buffers. 510 + * alloc extra buffers 511 + * free unneeded buffers 512 + * free unneeded buffers 513 + * Save new resources. 514 + * Any failure keeps old resources. 515 + * Must be called with lp->lock held. 516 + */ 517 + static void pcnet32_realloc_rx_ring(struct net_device *dev, 518 + struct pcnet32_private *lp, 519 + unsigned int size) 520 + { 521 + dma_addr_t new_ring_dma_addr; 522 + dma_addr_t *new_dma_addr_list; 523 + struct pcnet32_rx_head *new_rx_ring; 524 + struct sk_buff **new_skb_list; 525 + int new, overlap; 526 + 527 + new_rx_ring = pci_alloc_consistent(lp->pci_dev, 528 + sizeof(struct pcnet32_rx_head) * 529 + (1 << size), 530 + &new_ring_dma_addr); 531 + if (new_rx_ring == NULL) { 532 + if (netif_msg_drv(lp)) 533 + printk("\n" KERN_ERR 534 + "%s: Consistent memory allocation failed.\n", 535 + dev->name); 536 + return; 537 + } 538 + memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); 539 + 540 + new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), 541 + GFP_ATOMIC); 542 + if (!new_dma_addr_list) { 543 + if (netif_msg_drv(lp)) 544 + printk("\n" KERN_ERR 545 + "%s: Memory allocation failed.\n", dev->name); 546 + goto free_new_rx_ring; 547 + } 548 + 549 + new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), 550 + GFP_ATOMIC); 551 + if (!new_skb_list) { 552 + if (netif_msg_drv(lp)) 553 + printk("\n" KERN_ERR 554 + "%s: Memory allocation failed.\n", dev->name); 555 + goto free_new_lists; 556 + } 557 + 558 + /* first copy the current receive buffers */ 559 + overlap = min(size, lp->rx_ring_size); 560 + for (new = 0; new < overlap; new++) { 561 + new_rx_ring[new] = lp->rx_ring[new]; 562 + new_dma_addr_list[new] = lp->rx_dma_addr[new]; 563 + new_skb_list[new] = lp->rx_skbuff[new]; 564 + } 565 + /* now allocate any new buffers needed */ 566 + for (; new < size; new++ ) { 567 + struct sk_buff *rx_skbuff; 568 + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); 569 + if (!(rx_skbuff = new_skb_list[new])) { 570 + /* keep the original lists and buffers */ 571 + if (netif_msg_drv(lp)) 572 + printk(KERN_ERR 573 + "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n", 574 + dev->name); 575 + goto free_all_new; 576 + } 577 + skb_reserve(rx_skbuff, 2); 578 + 579 + new_dma_addr_list[new] = 580 + pci_map_single(lp->pci_dev, rx_skbuff->data, 581 + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 582 + new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); 583 + new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); 584 + new_rx_ring[new].status = le16_to_cpu(0x8000); 585 + } 586 + /* and free any unneeded buffers */ 587 + for (; new < lp->rx_ring_size; new++) { 588 + if (lp->rx_skbuff[new]) { 589 + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 590 + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 591 + dev_kfree_skb(lp->rx_skbuff[new]); 592 + } 593 + } 594 + 595 + kfree(lp->rx_skbuff); 596 + kfree(lp->rx_dma_addr); 597 + pci_free_consistent(lp->pci_dev, 598 + sizeof(struct pcnet32_rx_head) * 599 + lp->rx_ring_size, lp->rx_ring, 600 + lp->rx_ring_dma_addr); 601 + 602 + lp->rx_ring_size = (1 << size); 603 + lp->rx_mod_mask = lp->rx_ring_size - 1; 604 + lp->rx_len_bits = (size << 4); 605 + lp->rx_ring = new_rx_ring; 606 + lp->rx_ring_dma_addr = new_ring_dma_addr; 607 + lp->rx_dma_addr = new_dma_addr_list; 608 + lp->rx_skbuff = new_skb_list; 609 + return; 610 + 611 + free_all_new: 612 + for (; --new >= lp->rx_ring_size; ) { 613 + if (new_skb_list[new]) { 614 + pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 615 + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 616 + dev_kfree_skb(new_skb_list[new]); 617 + } 618 + } 619 + kfree(new_skb_list); 620 + free_new_lists: 621 + kfree(new_dma_addr_list); 622 + free_new_rx_ring: 623 + pci_free_consistent(lp->pci_dev, 624 + sizeof(struct pcnet32_rx_head) * 625 + (1 << size), 626 + new_rx_ring, 627 + new_ring_dma_addr); 628 + return; 629 + } 630 + 631 #ifdef CONFIG_NET_POLL_CONTROLLER 632 static void pcnet32_poll_controller(struct net_device *dev) 633 { ··· 526 { 527 struct pcnet32_private *lp = dev->priv; 528 unsigned long flags; 529 + unsigned int size; 530 + ulong ioaddr = dev->base_addr; 531 int i; 532 533 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 534 return -EINVAL; 535 536 if (netif_running(dev)) 537 + pcnet32_netif_stop(dev); 538 539 spin_lock_irqsave(&lp->lock, flags); 540 + lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ 541 + 542 + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 543 544 /* set the minimum ring size to 4, to allow the loopback test to work 545 * unchanged. 546 */ 547 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 548 + if (size <= (1 << i)) 549 break; 550 } 551 + if ((1 << i) != lp->tx_ring_size) 552 + pcnet32_realloc_tx_ring(dev, lp, i); 553 + 554 + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); 555 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 556 + if (size <= (1 << i)) 557 break; 558 } 559 + if ((1 << i) != lp->rx_ring_size) 560 + pcnet32_realloc_rx_ring(dev, lp, i); 561 + 562 + dev->weight = lp->rx_ring_size / 2; 563 564 + if (netif_running(dev)) { 565 + pcnet32_netif_start(dev); 566 + pcnet32_restart(dev, CSR0_NORMAL); 567 } 568 569 spin_unlock_irqrestore(&lp->lock, flags); 570 571 + if (netif_msg_drv(lp)) 572 + printk(KERN_INFO 573 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, 574 lp->rx_ring_size, lp->tx_ring_size); 575 576 return 0; 577 }