Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc3 1204 lines 36 kB view raw
1/**************************************************************************/ 2/* */ 3/* IBM eServer i/pSeries Virtual Ethernet Device Driver */ 4/* Copyright (C) 2003 IBM Corp. */ 5/* Originally written by Dave Larson (larson1@us.ibm.com) */ 6/* Maintained by Santiago Leon (santil@us.ibm.com) */ 7/* */ 8/* This program is free software; you can redistribute it and/or modify */ 9/* it under the terms of the GNU General Public License as published by */ 10/* the Free Software Foundation; either version 2 of the License, or */ 11/* (at your option) any later version. */ 12/* */ 13/* This program is distributed in the hope that it will be useful, */ 14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 16/* GNU General Public License for more details. */ 17/* */ 18/* You should have received a copy of the GNU General Public License */ 19/* along with this program; if not, write to the Free Software */ 20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ 21/* USA */ 22/* */ 23/* This module contains the implementation of a virtual ethernet device */ 24/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ 25/* option of the RS/6000 Platform Architechture to interface with virtual */ 26/* ethernet NICs that are presented to the partition by the hypervisor. */ 27/* */ 28/**************************************************************************/ 29/* 30 TODO: 31 - remove frag processing code - no longer needed 32 - add support for sysfs 33 - possibly remove procfs support 34*/ 35 36#include <linux/config.h> 37#include <linux/module.h> 38#include <linux/types.h> 39#include <linux/errno.h> 40#include <linux/ioport.h> 41#include <linux/dma-mapping.h> 42#include <linux/kernel.h> 43#include <linux/netdevice.h> 44#include <linux/etherdevice.h> 45#include <linux/skbuff.h> 46#include <linux/init.h> 47#include <linux/delay.h> 48#include <linux/mm.h> 49#include <linux/ethtool.h> 50#include <linux/proc_fs.h> 51#include <asm/semaphore.h> 52#include <asm/hvcall.h> 53#include <asm/atomic.h> 54#include <asm/iommu.h> 55#include <asm/vio.h> 56#include <asm/uaccess.h> 57#include <linux/seq_file.h> 58 59#include "ibmveth.h" 60 61#undef DEBUG 62 63#define ibmveth_printk(fmt, args...) \ 64 printk(KERN_INFO "%s: " fmt, __FILE__, ## args) 65 66#define ibmveth_error_printk(fmt, args...) \ 67 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) 68 69#ifdef DEBUG 70#define ibmveth_debug_printk_no_adapter(fmt, args...) \ 71 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args) 72#define ibmveth_debug_printk(fmt, args...) \ 73 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) 74#define ibmveth_assert(expr) \ 75 if(!(expr)) { \ 76 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ 77 BUG(); \ 78 } 79#else 80#define ibmveth_debug_printk_no_adapter(fmt, args...) 81#define ibmveth_debug_printk(fmt, args...) 82#define ibmveth_assert(expr) 83#endif 84 85static int ibmveth_open(struct net_device *dev); 86static int ibmveth_close(struct net_device *dev); 87static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 88static int ibmveth_poll(struct net_device *dev, int *budget); 89static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); 90static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); 91static void ibmveth_set_multicast_list(struct net_device *dev); 92static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); 93static void ibmveth_proc_register_driver(void); 94static void ibmveth_proc_unregister_driver(void); 95static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 99 100#ifdef CONFIG_PROC_FS 101#define IBMVETH_PROC_DIR "net/ibmveth" 102static struct proc_dir_entry *ibmveth_proc_dir; 103#endif 104 105static const char ibmveth_driver_name[] = "ibmveth"; 106static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; 107#define ibmveth_driver_version "1.03" 108 109MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); 110MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); 111MODULE_LICENSE("GPL"); 112MODULE_VERSION(ibmveth_driver_version); 113 114/* simple methods of getting data from the current rxq entry */ 115static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 116{ 117 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); 118} 119 120static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 121{ 122 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); 123} 124 125static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 126{ 127 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); 128} 129 130static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 131{ 132 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 133} 134 135/* setup the initial settings for a buffer pool */ 136static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size) 137{ 138 pool->size = pool_size; 139 pool->index = pool_index; 140 pool->buff_size = buff_size; 141 pool->threshold = pool_size / 2; 142} 143 144/* allocate and setup an buffer pool - called during open */ 145static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) 146{ 147 int i; 148 149 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 150 151 if(!pool->free_map) { 152 return -1; 153 } 154 155 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 156 if(!pool->dma_addr) { 157 kfree(pool->free_map); 158 pool->free_map = NULL; 159 return -1; 160 } 161 162 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL); 163 164 if(!pool->skbuff) { 165 kfree(pool->dma_addr); 166 pool->dma_addr = NULL; 167 168 kfree(pool->free_map); 169 pool->free_map = NULL; 170 return -1; 171 } 172 173 memset(pool->skbuff, 0, sizeof(void*) * pool->size); 174 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 175 176 for(i = 0; i < pool->size; ++i) { 177 pool->free_map[i] = i; 178 } 179 180 atomic_set(&pool->available, 0); 181 pool->producer_index = 0; 182 pool->consumer_index = 0; 183 pool->active = 0; 184 185 return 0; 186} 187 188/* replenish the buffers for a pool. note that we don't need to 189 * skb_reserve these since they are used for incoming... 190 */ 191static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 192{ 193 u32 i; 194 u32 count = pool->size - atomic_read(&pool->available); 195 u32 buffers_added = 0; 196 197 mb(); 198 199 for(i = 0; i < count; ++i) { 200 struct sk_buff *skb; 201 unsigned int free_index, index; 202 u64 correlator; 203 union ibmveth_buf_desc desc; 204 unsigned long lpar_rc; 205 dma_addr_t dma_addr; 206 207 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 208 209 if(!skb) { 210 ibmveth_debug_printk("replenish: unable to allocate skb\n"); 211 adapter->replenish_no_mem++; 212 break; 213 } 214 215 free_index = pool->consumer_index++ % pool->size; 216 index = pool->free_map[free_index]; 217 218 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 219 ibmveth_assert(pool->skbuff[index] == NULL); 220 221 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 222 pool->buff_size, DMA_FROM_DEVICE); 223 224 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 225 pool->dma_addr[index] = dma_addr; 226 pool->skbuff[index] = skb; 227 228 correlator = ((u64)pool->index << 32) | index; 229 *(u64*)skb->data = correlator; 230 231 desc.desc = 0; 232 desc.fields.valid = 1; 233 desc.fields.length = pool->buff_size; 234 desc.fields.address = dma_addr; 235 236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 238 if(lpar_rc != H_Success) { 239 pool->free_map[free_index] = index; 240 pool->skbuff[index] = NULL; 241 pool->consumer_index--; 242 dma_unmap_single(&adapter->vdev->dev, 243 pool->dma_addr[index], pool->buff_size, 244 DMA_FROM_DEVICE); 245 dev_kfree_skb_any(skb); 246 adapter->replenish_add_buff_failure++; 247 break; 248 } else { 249 buffers_added++; 250 adapter->replenish_add_buff_success++; 251 } 252 } 253 254 mb(); 255 atomic_add(buffers_added, &(pool->available)); 256} 257 258/* replenish routine */ 259static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260{ 261 int i; 262 263 adapter->replenish_task_cycles++; 264 265 for(i = 0; i < IbmVethNumBufferPools; i++) 266 if(adapter->rx_buff_pool[i].active) 267 ibmveth_replenish_buffer_pool(adapter, 268 &adapter->rx_buff_pool[i]); 269 270 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271} 272 273/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 275{ 276 int i; 277 278 kfree(pool->free_map); 279 pool->free_map = NULL; 280 281 if(pool->skbuff && pool->dma_addr) { 282 for(i = 0; i < pool->size; ++i) { 283 struct sk_buff *skb = pool->skbuff[i]; 284 if(skb) { 285 dma_unmap_single(&adapter->vdev->dev, 286 pool->dma_addr[i], 287 pool->buff_size, 288 DMA_FROM_DEVICE); 289 dev_kfree_skb_any(skb); 290 pool->skbuff[i] = NULL; 291 } 292 } 293 } 294 295 if(pool->dma_addr) { 296 kfree(pool->dma_addr); 297 pool->dma_addr = NULL; 298 } 299 300 if(pool->skbuff) { 301 kfree(pool->skbuff); 302 pool->skbuff = NULL; 303 } 304 pool->active = 0; 305} 306 307/* remove a buffer from a pool */ 308static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) 309{ 310 unsigned int pool = correlator >> 32; 311 unsigned int index = correlator & 0xffffffffUL; 312 unsigned int free_index; 313 struct sk_buff *skb; 314 315 ibmveth_assert(pool < IbmVethNumBufferPools); 316 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 317 318 skb = adapter->rx_buff_pool[pool].skbuff[index]; 319 320 ibmveth_assert(skb != NULL); 321 322 adapter->rx_buff_pool[pool].skbuff[index] = NULL; 323 324 dma_unmap_single(&adapter->vdev->dev, 325 adapter->rx_buff_pool[pool].dma_addr[index], 326 adapter->rx_buff_pool[pool].buff_size, 327 DMA_FROM_DEVICE); 328 329 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; 330 adapter->rx_buff_pool[pool].free_map[free_index] = index; 331 332 mb(); 333 334 atomic_dec(&(adapter->rx_buff_pool[pool].available)); 335} 336 337/* get the current buffer on the rx queue */ 338static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) 339{ 340 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; 341 unsigned int pool = correlator >> 32; 342 unsigned int index = correlator & 0xffffffffUL; 343 344 ibmveth_assert(pool < IbmVethNumBufferPools); 345 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 346 347 return adapter->rx_buff_pool[pool].skbuff[index]; 348} 349 350/* recycle the current buffer on the rx queue */ 351static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 352{ 353 u32 q_index = adapter->rx_queue.index; 354 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 355 unsigned int pool = correlator >> 32; 356 unsigned int index = correlator & 0xffffffffUL; 357 union ibmveth_buf_desc desc; 358 unsigned long lpar_rc; 359 360 ibmveth_assert(pool < IbmVethNumBufferPools); 361 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 362 363 if(!adapter->rx_buff_pool[pool].active) { 364 ibmveth_rxq_harvest_buffer(adapter); 365 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 366 return; 367 } 368 369 desc.desc = 0; 370 desc.fields.valid = 1; 371 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 372 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; 373 374 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 375 376 if(lpar_rc != H_Success) { 377 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 378 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 379 } 380 381 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 382 adapter->rx_queue.index = 0; 383 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 384 } 385} 386 387static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 388{ 389 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 390 391 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 392 adapter->rx_queue.index = 0; 393 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 394 } 395} 396 397static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 398{ 399 int i; 400 401 if(adapter->buffer_list_addr != NULL) { 402 if(!dma_mapping_error(adapter->buffer_list_dma)) { 403 dma_unmap_single(&adapter->vdev->dev, 404 adapter->buffer_list_dma, 4096, 405 DMA_BIDIRECTIONAL); 406 adapter->buffer_list_dma = DMA_ERROR_CODE; 407 } 408 free_page((unsigned long)adapter->buffer_list_addr); 409 adapter->buffer_list_addr = NULL; 410 } 411 412 if(adapter->filter_list_addr != NULL) { 413 if(!dma_mapping_error(adapter->filter_list_dma)) { 414 dma_unmap_single(&adapter->vdev->dev, 415 adapter->filter_list_dma, 4096, 416 DMA_BIDIRECTIONAL); 417 adapter->filter_list_dma = DMA_ERROR_CODE; 418 } 419 free_page((unsigned long)adapter->filter_list_addr); 420 adapter->filter_list_addr = NULL; 421 } 422 423 if(adapter->rx_queue.queue_addr != NULL) { 424 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { 425 dma_unmap_single(&adapter->vdev->dev, 426 adapter->rx_queue.queue_dma, 427 adapter->rx_queue.queue_len, 428 DMA_BIDIRECTIONAL); 429 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 430 } 431 kfree(adapter->rx_queue.queue_addr); 432 adapter->rx_queue.queue_addr = NULL; 433 } 434 435 for(i = 0; i<IbmVethNumBufferPools; i++) 436 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); 437} 438 439static int ibmveth_open(struct net_device *netdev) 440{ 441 struct ibmveth_adapter *adapter = netdev->priv; 442 u64 mac_address = 0; 443 int rxq_entries = 1; 444 unsigned long lpar_rc; 445 int rc; 446 union ibmveth_buf_desc rxq_desc; 447 int i; 448 449 ibmveth_debug_printk("open starting\n"); 450 451 for(i = 0; i<IbmVethNumBufferPools; i++) 452 rxq_entries += adapter->rx_buff_pool[i].size; 453 454 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 455 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 456 457 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 458 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 459 ibmveth_cleanup(adapter); 460 return -ENOMEM; 461 } 462 463 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; 464 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 465 466 if(!adapter->rx_queue.queue_addr) { 467 ibmveth_error_printk("unable to allocate rx queue pages\n"); 468 ibmveth_cleanup(adapter); 469 return -ENOMEM; 470 } 471 472 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, 473 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 474 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, 475 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 476 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, 477 adapter->rx_queue.queue_addr, 478 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); 479 480 if((dma_mapping_error(adapter->buffer_list_dma) ) || 481 (dma_mapping_error(adapter->filter_list_dma)) || 482 (dma_mapping_error(adapter->rx_queue.queue_dma))) { 483 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 484 ibmveth_cleanup(adapter); 485 return -ENOMEM; 486 } 487 488 adapter->rx_queue.index = 0; 489 adapter->rx_queue.num_slots = rxq_entries; 490 adapter->rx_queue.toggle = 1; 491 492 /* call change_mtu to init the buffer pools based in initial mtu */ 493 ibmveth_change_mtu(netdev, netdev->mtu); 494 495 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 496 mac_address = mac_address >> 16; 497 498 rxq_desc.desc = 0; 499 rxq_desc.fields.valid = 1; 500 rxq_desc.fields.length = adapter->rx_queue.queue_len; 501 rxq_desc.fields.address = adapter->rx_queue.queue_dma; 502 503 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); 504 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 505 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 506 507 508 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, 509 adapter->buffer_list_dma, 510 rxq_desc.desc, 511 adapter->filter_list_dma, 512 mac_address); 513 514 if(lpar_rc != H_Success) { 515 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 516 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n", 517 adapter->buffer_list_dma, 518 adapter->filter_list_dma, 519 rxq_desc.desc, 520 mac_address); 521 ibmveth_cleanup(adapter); 522 return -ENONET; 523 } 524 525 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 526 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 527 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 528 do { 529 rc = h_free_logical_lan(adapter->vdev->unit_address); 530 } while (H_isLongBusy(rc) || (rc == H_Busy)); 531 532 ibmveth_cleanup(adapter); 533 return rc; 534 } 535 536 ibmveth_debug_printk("initial replenish cycle\n"); 537 ibmveth_interrupt(netdev->irq, netdev, NULL); 538 539 netif_start_queue(netdev); 540 541 ibmveth_debug_printk("open complete\n"); 542 543 return 0; 544} 545 546static int ibmveth_close(struct net_device *netdev) 547{ 548 struct ibmveth_adapter *adapter = netdev->priv; 549 long lpar_rc; 550 551 ibmveth_debug_printk("close starting\n"); 552 553 netif_stop_queue(netdev); 554 555 free_irq(netdev->irq, netdev); 556 557 do { 558 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 559 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 560 561 if(lpar_rc != H_Success) 562 { 563 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 564 lpar_rc); 565 } 566 567 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 568 569 ibmveth_cleanup(adapter); 570 571 ibmveth_debug_printk("close complete\n"); 572 573 return 0; 574} 575 576static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 577 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 578 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); 579 cmd->speed = SPEED_1000; 580 cmd->duplex = DUPLEX_FULL; 581 cmd->port = PORT_FIBRE; 582 cmd->phy_address = 0; 583 cmd->transceiver = XCVR_INTERNAL; 584 cmd->autoneg = AUTONEG_ENABLE; 585 cmd->maxtxpkt = 0; 586 cmd->maxrxpkt = 1; 587 return 0; 588} 589 590static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { 591 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); 592 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); 593} 594 595static u32 netdev_get_link(struct net_device *dev) { 596 return 1; 597} 598 599static struct ethtool_ops netdev_ethtool_ops = { 600 .get_drvinfo = netdev_get_drvinfo, 601 .get_settings = netdev_get_settings, 602 .get_link = netdev_get_link, 603 .get_sg = ethtool_op_get_sg, 604 .get_tx_csum = ethtool_op_get_tx_csum, 605}; 606 607static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 608{ 609 return -EOPNOTSUPP; 610} 611 612#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 613 614static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) 615{ 616 struct ibmveth_adapter *adapter = netdev->priv; 617 union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; 618 unsigned long lpar_rc; 619 int nfrags = 0, curfrag; 620 unsigned long correlator; 621 unsigned long flags; 622 unsigned int retry_count; 623 unsigned int tx_dropped = 0; 624 unsigned int tx_bytes = 0; 625 unsigned int tx_packets = 0; 626 unsigned int tx_send_failed = 0; 627 unsigned int tx_map_failed = 0; 628 629 630 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 631 tx_dropped++; 632 goto out; 633 } 634 635 memset(&desc, 0, sizeof(desc)); 636 637 /* nfrags = number of frags after the initial fragment */ 638 nfrags = skb_shinfo(skb)->nr_frags; 639 640 if(nfrags) 641 adapter->tx_multidesc_send++; 642 643 /* map the initial fragment */ 644 desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; 645 desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 646 desc[0].fields.length, DMA_TO_DEVICE); 647 desc[0].fields.valid = 1; 648 649 if(dma_mapping_error(desc[0].fields.address)) { 650 ibmveth_error_printk("tx: unable to map initial fragment\n"); 651 tx_map_failed++; 652 tx_dropped++; 653 goto out; 654 } 655 656 curfrag = nfrags; 657 658 /* map fragments past the initial portion if there are any */ 659 while(curfrag--) { 660 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; 661 desc[curfrag+1].fields.address 662 = dma_map_single(&adapter->vdev->dev, 663 page_address(frag->page) + frag->page_offset, 664 frag->size, DMA_TO_DEVICE); 665 desc[curfrag+1].fields.length = frag->size; 666 desc[curfrag+1].fields.valid = 1; 667 668 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 669 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 670 tx_map_failed++; 671 tx_dropped++; 672 /* Free all the mappings we just created */ 673 while(curfrag < nfrags) { 674 dma_unmap_single(&adapter->vdev->dev, 675 desc[curfrag+1].fields.address, 676 desc[curfrag+1].fields.length, 677 DMA_TO_DEVICE); 678 curfrag++; 679 } 680 goto out; 681 } 682 } 683 684 /* send the frame. Arbitrarily set retrycount to 1024 */ 685 correlator = 0; 686 retry_count = 1024; 687 do { 688 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, 689 desc[0].desc, 690 desc[1].desc, 691 desc[2].desc, 692 desc[3].desc, 693 desc[4].desc, 694 desc[5].desc, 695 correlator); 696 } while ((lpar_rc == H_Busy) && (retry_count--)); 697 698 if(lpar_rc != H_Success && lpar_rc != H_Dropped) { 699 int i; 700 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 701 for(i = 0; i < 6; i++) { 702 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 703 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 704 } 705 tx_send_failed++; 706 tx_dropped++; 707 } else { 708 tx_packets++; 709 tx_bytes += skb->len; 710 netdev->trans_start = jiffies; 711 } 712 713 do { 714 dma_unmap_single(&adapter->vdev->dev, 715 desc[nfrags].fields.address, 716 desc[nfrags].fields.length, DMA_TO_DEVICE); 717 } while(--nfrags >= 0); 718 719out: spin_lock_irqsave(&adapter->stats_lock, flags); 720 adapter->stats.tx_dropped += tx_dropped; 721 adapter->stats.tx_bytes += tx_bytes; 722 adapter->stats.tx_packets += tx_packets; 723 adapter->tx_send_failed += tx_send_failed; 724 adapter->tx_map_failed += tx_map_failed; 725 spin_unlock_irqrestore(&adapter->stats_lock, flags); 726 727 dev_kfree_skb(skb); 728 return 0; 729} 730 731static int ibmveth_poll(struct net_device *netdev, int *budget) 732{ 733 struct ibmveth_adapter *adapter = netdev->priv; 734 int max_frames_to_process = netdev->quota; 735 int frames_processed = 0; 736 int more_work = 1; 737 unsigned long lpar_rc; 738 739 restart_poll: 740 do { 741 struct net_device *netdev = adapter->netdev; 742 743 if(ibmveth_rxq_pending_buffer(adapter)) { 744 struct sk_buff *skb; 745 746 rmb(); 747 748 if(!ibmveth_rxq_buffer_valid(adapter)) { 749 wmb(); /* suggested by larson1 */ 750 adapter->rx_invalid_buffer++; 751 ibmveth_debug_printk("recycling invalid buffer\n"); 752 ibmveth_rxq_recycle_buffer(adapter); 753 } else { 754 int length = ibmveth_rxq_frame_length(adapter); 755 int offset = ibmveth_rxq_frame_offset(adapter); 756 skb = ibmveth_rxq_get_buffer(adapter); 757 758 ibmveth_rxq_harvest_buffer(adapter); 759 760 skb_reserve(skb, offset); 761 skb_put(skb, length); 762 skb->dev = netdev; 763 skb->protocol = eth_type_trans(skb, netdev); 764 765 netif_receive_skb(skb); /* send it up */ 766 767 adapter->stats.rx_packets++; 768 adapter->stats.rx_bytes += length; 769 frames_processed++; 770 netdev->last_rx = jiffies; 771 } 772 } else { 773 more_work = 0; 774 } 775 } while(more_work && (frames_processed < max_frames_to_process)); 776 777 ibmveth_replenish_task(adapter); 778 779 if(more_work) { 780 /* more work to do - return that we are not done yet */ 781 netdev->quota -= frames_processed; 782 *budget -= frames_processed; 783 return 1; 784 } 785 786 /* we think we are done - reenable interrupts, then check once more to make sure we are done */ 787 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); 788 789 ibmveth_assert(lpar_rc == H_Success); 790 791 netif_rx_complete(netdev); 792 793 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) 794 { 795 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 796 ibmveth_assert(lpar_rc == H_Success); 797 more_work = 1; 798 goto restart_poll; 799 } 800 801 netdev->quota -= frames_processed; 802 *budget -= frames_processed; 803 804 /* we really are done */ 805 return 0; 806} 807 808static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 809{ 810 struct net_device *netdev = dev_instance; 811 struct ibmveth_adapter *adapter = netdev->priv; 812 unsigned long lpar_rc; 813 814 if(netif_rx_schedule_prep(netdev)) { 815 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 816 ibmveth_assert(lpar_rc == H_Success); 817 __netif_rx_schedule(netdev); 818 } 819 return IRQ_HANDLED; 820} 821 822static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) 823{ 824 struct ibmveth_adapter *adapter = dev->priv; 825 return &adapter->stats; 826} 827 828static void ibmveth_set_multicast_list(struct net_device *netdev) 829{ 830 struct ibmveth_adapter *adapter = netdev->priv; 831 unsigned long lpar_rc; 832 833 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { 834 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 835 IbmVethMcastEnableRecv | 836 IbmVethMcastDisableFiltering, 837 0); 838 if(lpar_rc != H_Success) { 839 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 840 } 841 } else { 842 struct dev_mc_list *mclist = netdev->mc_list; 843 int i; 844 /* clear the filter table & disable filtering */ 845 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 846 IbmVethMcastEnableRecv | 847 IbmVethMcastDisableFiltering | 848 IbmVethMcastClearFilterTable, 849 0); 850 if(lpar_rc != H_Success) { 851 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 852 } 853 /* add the addresses to the filter table */ 854 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) { 855 // add the multicast address to the filter table 856 unsigned long mcast_addr = 0; 857 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); 858 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 859 IbmVethMcastAddFilter, 860 mcast_addr); 861 if(lpar_rc != H_Success) { 862 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 863 } 864 } 865 866 /* re-enable filtering */ 867 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 868 IbmVethMcastEnableFiltering, 869 0); 870 if(lpar_rc != H_Success) { 871 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 872 } 873 } 874} 875 876static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 877{ 878 struct ibmveth_adapter *adapter = dev->priv; 879 int i; 880 int prev_smaller = 1; 881 882 if ((new_mtu < 68) || 883 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH)) 884 return -EINVAL; 885 886 for(i = 0; i<IbmVethNumBufferPools; i++) { 887 int activate = 0; 888 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) { 889 activate = 1; 890 prev_smaller= 1; 891 } else { 892 if (prev_smaller) 893 activate = 1; 894 prev_smaller= 0; 895 } 896 897 if (activate && !adapter->rx_buff_pool[i].active) { 898 struct ibmveth_buff_pool *pool = 899 &adapter->rx_buff_pool[i]; 900 if(ibmveth_alloc_buffer_pool(pool)) { 901 ibmveth_error_printk("unable to alloc pool\n"); 902 return -ENOMEM; 903 } 904 adapter->rx_buff_pool[i].active = 1; 905 } else if (!activate && adapter->rx_buff_pool[i].active) { 906 adapter->rx_buff_pool[i].active = 0; 907 h_free_logical_lan_buffer(adapter->vdev->unit_address, 908 (u64)pool_size[i]); 909 } 910 911 } 912 913 /* kick the interrupt handler so that the new buffer pools get 914 replenished or deallocated */ 915 ibmveth_interrupt(dev->irq, dev, NULL); 916 917 dev->mtu = new_mtu; 918 return 0; 919} 920 921static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 922{ 923 int rc, i; 924 struct net_device *netdev; 925 struct ibmveth_adapter *adapter = NULL; 926 927 unsigned char *mac_addr_p; 928 unsigned int *mcastFilterSize_p; 929 930 931 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 932 dev->unit_address); 933 934 mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); 935 if(!mac_addr_p) { 936 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR " 937 "attribute\n", __FILE__, __LINE__); 938 return 0; 939 } 940 941 mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); 942 if(!mcastFilterSize_p) { 943 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 944 "VETH_MCAST_FILTER_SIZE attribute\n", 945 __FILE__, __LINE__); 946 return 0; 947 } 948 949 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 950 951 if(!netdev) 952 return -ENOMEM; 953 954 SET_MODULE_OWNER(netdev); 955 956 adapter = netdev->priv; 957 memset(adapter, 0, sizeof(adapter)); 958 dev->dev.driver_data = netdev; 959 960 adapter->vdev = dev; 961 adapter->netdev = netdev; 962 adapter->mcastFilterSize= *mcastFilterSize_p; 963 964 /* Some older boxes running PHYP non-natively have an OF that 965 returns a 8-byte local-mac-address field (and the first 966 2 bytes have to be ignored) while newer boxes' OF return 967 a 6-byte field. Note that IEEE 1275 specifies that 968 local-mac-address must be a 6-byte field. 969 The RPA doc specifies that the first byte must be 10b, so 970 we'll just look for it to solve this 8 vs. 6 byte field issue */ 971 972 if ((*mac_addr_p & 0x3) != 0x02) 973 mac_addr_p += 2; 974 975 adapter->mac_addr = 0; 976 memcpy(&adapter->mac_addr, mac_addr_p, 6); 977 978 adapter->liobn = dev->iommu_table->it_index; 979 980 netdev->irq = dev->irq; 981 netdev->open = ibmveth_open; 982 netdev->poll = ibmveth_poll; 983 netdev->weight = 16; 984 netdev->stop = ibmveth_close; 985 netdev->hard_start_xmit = ibmveth_start_xmit; 986 netdev->get_stats = ibmveth_get_stats; 987 netdev->set_multicast_list = ibmveth_set_multicast_list; 988 netdev->do_ioctl = ibmveth_ioctl; 989 netdev->ethtool_ops = &netdev_ethtool_ops; 990 netdev->change_mtu = ibmveth_change_mtu; 991 SET_NETDEV_DEV(netdev, &dev->dev); 992 netdev->features |= NETIF_F_LLTX; 993 spin_lock_init(&adapter->stats_lock); 994 995 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 996 997 for(i = 0; i<IbmVethNumBufferPools; i++) 998 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 999 pool_count[i], pool_size[i]); 1000 1001 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1002 1003 adapter->buffer_list_dma = DMA_ERROR_CODE; 1004 adapter->filter_list_dma = DMA_ERROR_CODE; 1005 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1006 1007 ibmveth_debug_printk("registering netdev...\n"); 1008 1009 rc = register_netdev(netdev); 1010 1011 if(rc) { 1012 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); 1013 free_netdev(netdev); 1014 return rc; 1015 } 1016 1017 ibmveth_debug_printk("registered\n"); 1018 1019 ibmveth_proc_register_adapter(adapter); 1020 1021 return 0; 1022} 1023 1024static int __devexit ibmveth_remove(struct vio_dev *dev) 1025{ 1026 struct net_device *netdev = dev->dev.driver_data; 1027 struct ibmveth_adapter *adapter = netdev->priv; 1028 1029 unregister_netdev(netdev); 1030 1031 ibmveth_proc_unregister_adapter(adapter); 1032 1033 free_netdev(netdev); 1034 return 0; 1035} 1036 1037#ifdef CONFIG_PROC_FS 1038static void ibmveth_proc_register_driver(void) 1039{ 1040 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL); 1041 if (ibmveth_proc_dir) { 1042 SET_MODULE_OWNER(ibmveth_proc_dir); 1043 } 1044} 1045 1046static void ibmveth_proc_unregister_driver(void) 1047{ 1048 remove_proc_entry(IBMVETH_PROC_DIR, NULL); 1049} 1050 1051static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1052{ 1053 if (*pos == 0) { 1054 return (void *)1; 1055 } else { 1056 return NULL; 1057 } 1058} 1059 1060static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1061{ 1062 ++*pos; 1063 return NULL; 1064} 1065 1066static void ibmveth_seq_stop(struct seq_file *seq, void *v) 1067{ 1068} 1069 1070static int ibmveth_seq_show(struct seq_file *seq, void *v) 1071{ 1072 struct ibmveth_adapter *adapter = seq->private; 1073 char *current_mac = ((char*) &adapter->netdev->dev_addr); 1074 char *firmware_mac = ((char*) &adapter->mac_addr) ; 1075 1076 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); 1077 1078 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); 1079 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); 1080 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", 1081 current_mac[0], current_mac[1], current_mac[2], 1082 current_mac[3], current_mac[4], current_mac[5]); 1083 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", 1084 firmware_mac[0], firmware_mac[1], firmware_mac[2], 1085 firmware_mac[3], firmware_mac[4], firmware_mac[5]); 1086 1087 seq_printf(seq, "\nAdapter Statistics:\n"); 1088 seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); 1089 seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); 1090 seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); 1091 seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); 1092 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); 1093 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); 1094 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); 1095 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); 1096 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); 1097 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); 1098 1099 return 0; 1100} 1101static struct seq_operations ibmveth_seq_ops = { 1102 .start = ibmveth_seq_start, 1103 .next = ibmveth_seq_next, 1104 .stop = ibmveth_seq_stop, 1105 .show = ibmveth_seq_show, 1106}; 1107 1108static int ibmveth_proc_open(struct inode *inode, struct file *file) 1109{ 1110 struct seq_file *seq; 1111 struct proc_dir_entry *proc; 1112 int rc; 1113 1114 rc = seq_open(file, &ibmveth_seq_ops); 1115 if (!rc) { 1116 /* recover the pointer buried in proc_dir_entry data */ 1117 seq = file->private_data; 1118 proc = PDE(inode); 1119 seq->private = proc->data; 1120 } 1121 return rc; 1122} 1123 1124static struct file_operations ibmveth_proc_fops = { 1125 .owner = THIS_MODULE, 1126 .open = ibmveth_proc_open, 1127 .read = seq_read, 1128 .llseek = seq_lseek, 1129 .release = seq_release, 1130}; 1131 1132static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1133{ 1134 struct proc_dir_entry *entry; 1135 if (ibmveth_proc_dir) { 1136 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); 1137 if (!entry) { 1138 ibmveth_error_printk("Cannot create adapter proc entry"); 1139 } else { 1140 entry->data = (void *) adapter; 1141 entry->proc_fops = &ibmveth_proc_fops; 1142 SET_MODULE_OWNER(entry); 1143 } 1144 } 1145 return; 1146} 1147 1148static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1149{ 1150 if (ibmveth_proc_dir) { 1151 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); 1152 } 1153} 1154 1155#else /* CONFIG_PROC_FS */ 1156static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1157{ 1158} 1159 1160static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1161{ 1162} 1163static void ibmveth_proc_register_driver(void) 1164{ 1165} 1166 1167static void ibmveth_proc_unregister_driver(void) 1168{ 1169} 1170#endif /* CONFIG_PROC_FS */ 1171 1172static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1173 { "network", "IBM,l-lan"}, 1174 { "", "" } 1175}; 1176MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1177 1178static struct vio_driver ibmveth_driver = { 1179 .id_table = ibmveth_device_table, 1180 .probe = ibmveth_probe, 1181 .remove = ibmveth_remove, 1182 .driver = { 1183 .name = ibmveth_driver_name, 1184 .owner = THIS_MODULE, 1185 } 1186}; 1187 1188static int __init ibmveth_module_init(void) 1189{ 1190 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1191 1192 ibmveth_proc_register_driver(); 1193 1194 return vio_register_driver(&ibmveth_driver); 1195} 1196 1197static void __exit ibmveth_module_exit(void) 1198{ 1199 vio_unregister_driver(&ibmveth_driver); 1200 ibmveth_proc_unregister_driver(); 1201} 1202 1203module_init(ibmveth_module_init); 1204module_exit(ibmveth_module_exit);