Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] ibmveth fix buffer pool management

This patch changes the way the ibmveth driver handles the receive
buffers. The old code mallocs and maps all the buffers in the pools
regardless of MTU size and it also limits the number of buffer pools to
three. This patch makes the driver malloc and map the buffers necessary
to support the current MTU. It also changes the hardcoded names of the
buffer pool number, size, and elements to arrays to make it easier to
change (with the hope of making them runtime parameters in the future).

Signed-off-by: Santiago Leon <santil@us.ibm.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>

authored by

Santiago Leon and committed by
Jeff Garzik
b6d35182 0abe791e

+85 -35
+74 -28
drivers/net/ibmveth.c
··· 97 97 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 98 98 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 99 99 static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 100 + static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 100 101 101 102 #ifdef CONFIG_PROC_FS 102 103 #define IBMVETH_PROC_DIR "net/ibmveth" ··· 182 181 atomic_set(&pool->available, 0); 183 182 pool->producer_index = 0; 184 183 pool->consumer_index = 0; 184 + pool->active = 0; 185 185 186 186 return 0; 187 187 } ··· 260 258 /* check if replenishing is needed. */ 261 259 static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) 262 260 { 263 - return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) || 264 - (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) || 265 - (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold)); 261 + int i; 262 + 263 + for(i = 0; i < IbmVethNumBufferPools; i++) 264 + if(adapter->rx_buff_pool[i].active && 265 + (atomic_read(&adapter->rx_buff_pool[i].available) < 266 + adapter->rx_buff_pool[i].threshold)) 267 + return 1; 268 + return 0; 266 269 } 267 270 268 271 /* kick the replenish tasklet if we need replenishing and it isn't already running */ ··· 282 275 /* replenish tasklet routine */ 283 276 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 284 277 { 278 + int i; 279 + 285 280 adapter->replenish_task_cycles++; 286 281 287 - ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 288 - ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 289 - ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 282 + for(i = 0; i < IbmVethNumBufferPools; i++) 283 + if(adapter->rx_buff_pool[i].active) 284 + ibmveth_replenish_buffer_pool(adapter, 285 + &adapter->rx_buff_pool[i]); 290 286 291 287 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 292 288 ··· 331 321 kfree(pool->skbuff); 332 322 pool->skbuff = NULL; 333 323 } 324 + pool->active = 0; 334 325 } 335 326 336 327 /* remove a buffer from a pool */ ··· 390 379 ibmveth_assert(pool < IbmVethNumBufferPools); 391 380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 392 381 382 + if(!adapter->rx_buff_pool[pool].active) { 383 + ibmveth_rxq_harvest_buffer(adapter); 384 + ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 385 + return; 386 + } 387 + 393 388 desc.desc = 0; 394 389 desc.fields.valid = 1; 395 390 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; ··· 426 409 427 410 static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 428 411 { 412 + int i; 413 + 429 414 if(adapter->buffer_list_addr != NULL) { 430 415 if(!dma_mapping_error(adapter->buffer_list_dma)) { 431 416 dma_unmap_single(&adapter->vdev->dev, ··· 462 443 adapter->rx_queue.queue_addr = NULL; 463 444 } 464 445 465 - ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 466 - ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 467 - ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 446 + for(i = 0; i<IbmVethNumBufferPools; i++) 447 + ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); 468 448 } 469 449 470 450 static int ibmveth_open(struct net_device *netdev) 471 451 { 472 452 struct ibmveth_adapter *adapter = netdev->priv; 473 453 u64 mac_address = 0; 474 - int rxq_entries; 454 + int rxq_entries = 1; 475 455 unsigned long lpar_rc; 476 456 int rc; 477 457 union ibmveth_buf_desc rxq_desc; 458 + int i; 478 459 479 460 ibmveth_debug_printk("open starting\n"); 480 461 481 - rxq_entries = 482 - adapter->rx_buff_pool[0].size + 483 - adapter->rx_buff_pool[1].size + 484 - adapter->rx_buff_pool[2].size + 1; 462 + for(i = 0; i<IbmVethNumBufferPools; i++) 463 + rxq_entries += adapter->rx_buff_pool[i].size; 485 464 486 465 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 487 466 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); ··· 519 502 adapter->rx_queue.num_slots = rxq_entries; 520 503 adapter->rx_queue.toggle = 1; 521 504 522 - if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 523 - ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 524 - ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2])) 525 - { 526 - ibmveth_error_printk("unable to allocate buffer pools\n"); 527 - ibmveth_cleanup(adapter); 528 - return -ENOMEM; 529 - } 505 + /* call change_mtu to init the buffer pools based in initial mtu */ 506 + ibmveth_change_mtu(netdev, netdev->mtu); 530 507 531 508 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 532 509 mac_address = mac_address >> 16; ··· 896 885 897 886 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 898 887 { 899 - if ((new_mtu < 68) || (new_mtu > (1<<20))) 888 + struct ibmveth_adapter *adapter = dev->priv; 889 + int i; 890 + int prev_smaller = 1; 891 + 892 + if ((new_mtu < 68) || 893 + (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) 900 894 return -EINVAL; 895 + 896 + for(i = 0; i<IbmVethNumBufferPools; i++) { 897 + int activate = 0; 898 + if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { 899 + activate = 1; 900 + prev_smaller= 1; 901 + } else { 902 + if (prev_smaller) 903 + activate = 1; 904 + prev_smaller= 0; 905 + } 906 + 907 + if (activate && !adapter->rx_buff_pool[i].active) { 908 + struct ibmveth_buff_pool *pool = 909 + &adapter->rx_buff_pool[i]; 910 + if(ibmveth_alloc_buffer_pool(pool)) { 911 + ibmveth_error_printk("unable to alloc pool\n"); 912 + return -ENOMEM; 913 + } 914 + adapter->rx_buff_pool[i].active = 1; 915 + } else if (!activate && adapter->rx_buff_pool[i].active) { 916 + adapter->rx_buff_pool[i].active = 0; 917 + h_free_logical_lan_buffer(adapter->vdev->unit_address, 918 + (u64)pool_size[i]); 919 + } 920 + 921 + } 922 + 923 + 924 + ibmveth_schedule_replenishing(adapter); 901 925 dev->mtu = new_mtu; 902 926 return 0; 903 927 } 904 928 905 929 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 906 930 { 907 - int rc; 931 + int rc, i; 908 932 struct net_device *netdev; 909 - struct ibmveth_adapter *adapter; 933 + struct ibmveth_adapter *adapter = NULL; 910 934 911 935 unsigned char *mac_addr_p; 912 936 unsigned int *mcastFilterSize_p; ··· 1011 965 1012 966 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1013 967 1014 - ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 1015 - ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 1016 - ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 968 + for(i = 0; i<IbmVethNumBufferPools; i++) 969 + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 970 + pool_count[i], pool_size[i]); 1017 971 1018 972 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1019 973
+11 -7
drivers/net/ibmveth.h
··· 49 49 #define H_SEND_LOGICAL_LAN 0x120 50 50 #define H_MULTICAST_CTRL 0x130 51 51 #define H_CHANGE_LOGICAL_LAN_MAC 0x14C 52 + #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 52 53 53 54 /* hcall macros */ 54 55 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ ··· 70 69 #define h_change_logical_lan_mac(ua, mac) \ 71 70 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 72 71 73 - #define IbmVethNumBufferPools 3 74 - #define IbmVethPool0DftSize (1024 * 2) 75 - #define IbmVethPool1DftSize (1024 * 4) 76 - #define IbmVethPool2DftSize (1024 * 10) 77 - #define IbmVethPool0DftCnt 256 78 - #define IbmVethPool1DftCnt 256 79 - #define IbmVethPool2DftCnt 256 72 + #define h_free_logical_lan_buffer(ua, bufsize) \ 73 + plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize) 74 + 75 + #define IbmVethNumBufferPools 5 76 + #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 77 + 78 + /* pool_size should be sorted */ 79 + static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 80 + static int pool_count[] = { 256, 768, 256, 256, 256 }; 80 81 81 82 #define IBM_VETH_INVALID_MAP ((u16)0xffff) 82 83 ··· 93 90 u16 *free_map; 94 91 dma_addr_t *dma_addr; 95 92 struct sk_buff **skbuff; 93 + int active; 96 94 }; 97 95 98 96 struct ibmveth_rx_q {