Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 77b2555b52a894a2e39a42e43d993df875c46a6a 1175 lines 36 kB view raw
1/**************************************************************************/ 2/* */ 3/* IBM eServer i/pSeries Virtual Ethernet Device Driver */ 4/* Copyright (C) 2003 IBM Corp. */ 5/* Originally written by Dave Larson (larson1@us.ibm.com) */ 6/* Maintained by Santiago Leon (santil@us.ibm.com) */ 7/* */ 8/* This program is free software; you can redistribute it and/or modify */ 9/* it under the terms of the GNU General Public License as published by */ 10/* the Free Software Foundation; either version 2 of the License, or */ 11/* (at your option) any later version. */ 12/* */ 13/* This program is distributed in the hope that it will be useful, */ 14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 16/* GNU General Public License for more details. */ 17/* */ 18/* You should have received a copy of the GNU General Public License */ 19/* along with this program; if not, write to the Free Software */ 20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ 21/* USA */ 22/* */ 23/* This module contains the implementation of a virtual ethernet device */ 24/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ 25/* option of the RS/6000 Platform Architechture to interface with virtual */ 26/* ethernet NICs that are presented to the partition by the hypervisor. */ 27/* */ 28/**************************************************************************/ 29/* 30 TODO: 31 - remove frag processing code - no longer needed 32 - add support for sysfs 33 - possibly remove procfs support 34*/ 35 36#include <linux/config.h> 37#include <linux/module.h> 38#include <linux/version.h> 39#include <linux/types.h> 40#include <linux/errno.h> 41#include <linux/ioport.h> 42#include <linux/dma-mapping.h> 43#include <linux/kernel.h> 44#include <linux/netdevice.h> 45#include <linux/etherdevice.h> 46#include <linux/skbuff.h> 47#include <linux/init.h> 48#include <linux/delay.h> 49#include <linux/mm.h> 50#include <linux/ethtool.h> 51#include <linux/proc_fs.h> 52#include <asm/semaphore.h> 53#include <asm/hvcall.h> 54#include <asm/atomic.h> 55#include <asm/iommu.h> 56#include <asm/vio.h> 57#include <asm/uaccess.h> 58#include <linux/seq_file.h> 59 60#include "ibmveth.h" 61 62#define DEBUG 1 63 64#define ibmveth_printk(fmt, args...) \ 65 printk(KERN_INFO "%s: " fmt, __FILE__, ## args) 66 67#define ibmveth_error_printk(fmt, args...) \ 68 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) 69 70#ifdef DEBUG 71#define ibmveth_debug_printk_no_adapter(fmt, args...) \ 72 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args) 73#define ibmveth_debug_printk(fmt, args...) \ 74 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args) 75#define ibmveth_assert(expr) \ 76 if(!(expr)) { \ 77 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \ 78 BUG(); \ 79 } 80#else 81#define ibmveth_debug_printk_no_adapter(fmt, args...) 82#define ibmveth_debug_printk(fmt, args...) 83#define ibmveth_assert(expr) 84#endif 85 86static int ibmveth_open(struct net_device *dev); 87static int ibmveth_close(struct net_device *dev); 88static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 89static int ibmveth_poll(struct net_device *dev, int *budget); 90static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); 91static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); 92static void ibmveth_set_multicast_list(struct net_device *dev); 93static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); 94static void ibmveth_proc_register_driver(void); 95static void ibmveth_proc_unregister_driver(void); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 100 101#ifdef CONFIG_PROC_FS 102#define IBMVETH_PROC_DIR "ibmveth" 103static struct proc_dir_entry *ibmveth_proc_dir; 104#endif 105 106static const char ibmveth_driver_name[] = "ibmveth"; 107static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; 108#define ibmveth_driver_version "1.03" 109 110MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); 111MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); 112MODULE_LICENSE("GPL"); 113MODULE_VERSION(ibmveth_driver_version); 114 115/* simple methods of getting data from the current rxq entry */ 116static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 117{ 118 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); 119} 120 121static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 122{ 123 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); 124} 125 126static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 127{ 128 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); 129} 130 131static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 132{ 133 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 134} 135 136/* setup the initial settings for a buffer pool */ 137static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size) 138{ 139 pool->size = pool_size; 140 pool->index = pool_index; 141 pool->buff_size = buff_size; 142 pool->threshold = pool_size / 2; 143} 144 145/* allocate and setup an buffer pool - called during open */ 146static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) 147{ 148 int i; 149 150 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 151 152 if(!pool->free_map) { 153 return -1; 154 } 155 156 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 157 if(!pool->dma_addr) { 158 kfree(pool->free_map); 159 pool->free_map = NULL; 160 return -1; 161 } 162 163 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL); 164 165 if(!pool->skbuff) { 166 kfree(pool->dma_addr); 167 pool->dma_addr = NULL; 168 169 kfree(pool->free_map); 170 pool->free_map = NULL; 171 return -1; 172 } 173 174 memset(pool->skbuff, 0, sizeof(void*) * pool->size); 175 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 176 177 for(i = 0; i < pool->size; ++i) { 178 pool->free_map[i] = i; 179 } 180 181 atomic_set(&pool->available, 0); 182 pool->producer_index = 0; 183 pool->consumer_index = 0; 184 185 return 0; 186} 187 188/* replenish the buffers for a pool. note that we don't need to 189 * skb_reserve these since they are used for incoming... 190 */ 191static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 192{ 193 u32 i; 194 u32 count = pool->size - atomic_read(&pool->available); 195 u32 buffers_added = 0; 196 197 mb(); 198 199 for(i = 0; i < count; ++i) { 200 struct sk_buff *skb; 201 unsigned int free_index, index; 202 u64 correlator; 203 union ibmveth_buf_desc desc; 204 unsigned long lpar_rc; 205 dma_addr_t dma_addr; 206 207 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 208 209 if(!skb) { 210 ibmveth_debug_printk("replenish: unable to allocate skb\n"); 211 adapter->replenish_no_mem++; 212 break; 213 } 214 215 free_index = pool->consumer_index++ % pool->size; 216 index = pool->free_map[free_index]; 217 218 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 219 ibmveth_assert(pool->skbuff[index] == NULL); 220 221 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 222 pool->buff_size, DMA_FROM_DEVICE); 223 224 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 225 pool->dma_addr[index] = dma_addr; 226 pool->skbuff[index] = skb; 227 228 correlator = ((u64)pool->index << 32) | index; 229 *(u64*)skb->data = correlator; 230 231 desc.desc = 0; 232 desc.fields.valid = 1; 233 desc.fields.length = pool->buff_size; 234 desc.fields.address = dma_addr; 235 236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 238 if(lpar_rc != H_Success) { 239 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 240 pool->skbuff[index] = NULL; 241 pool->consumer_index--; 242 dma_unmap_single(&adapter->vdev->dev, 243 pool->dma_addr[index], pool->buff_size, 244 DMA_FROM_DEVICE); 245 dev_kfree_skb_any(skb); 246 adapter->replenish_add_buff_failure++; 247 break; 248 } else { 249 buffers_added++; 250 adapter->replenish_add_buff_success++; 251 } 252 } 253 254 mb(); 255 atomic_add(buffers_added, &(pool->available)); 256} 257 258/* check if replenishing is needed. */ 259static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter) 260{ 261 return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) || 262 (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) || 263 (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold)); 264} 265 266/* kick the replenish tasklet if we need replenishing and it isn't already running */ 267static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter) 268{ 269 if(ibmveth_is_replenishing_needed(adapter) && 270 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) { 271 schedule_work(&adapter->replenish_task); 272 } 273} 274 275/* replenish tasklet routine */ 276static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 277{ 278 adapter->replenish_task_cycles++; 279 280 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 281 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 282 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 283 284 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 285 286 atomic_inc(&adapter->not_replenishing); 287 288 ibmveth_schedule_replenishing(adapter); 289} 290 291/* empty and free ana buffer pool - also used to do cleanup in error paths */ 292static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 293{ 294 int i; 295 296 if(pool->free_map) { 297 kfree(pool->free_map); 298 pool->free_map = NULL; 299 } 300 301 if(pool->skbuff && pool->dma_addr) { 302 for(i = 0; i < pool->size; ++i) { 303 struct sk_buff *skb = pool->skbuff[i]; 304 if(skb) { 305 dma_unmap_single(&adapter->vdev->dev, 306 pool->dma_addr[i], 307 pool->buff_size, 308 DMA_FROM_DEVICE); 309 dev_kfree_skb_any(skb); 310 pool->skbuff[i] = NULL; 311 } 312 } 313 } 314 315 if(pool->dma_addr) { 316 kfree(pool->dma_addr); 317 pool->dma_addr = NULL; 318 } 319 320 if(pool->skbuff) { 321 kfree(pool->skbuff); 322 pool->skbuff = NULL; 323 } 324} 325 326/* remove a buffer from a pool */ 327static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) 328{ 329 unsigned int pool = correlator >> 32; 330 unsigned int index = correlator & 0xffffffffUL; 331 unsigned int free_index; 332 struct sk_buff *skb; 333 334 ibmveth_assert(pool < IbmVethNumBufferPools); 335 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 336 337 skb = adapter->rx_buff_pool[pool].skbuff[index]; 338 339 ibmveth_assert(skb != NULL); 340 341 adapter->rx_buff_pool[pool].skbuff[index] = NULL; 342 343 dma_unmap_single(&adapter->vdev->dev, 344 adapter->rx_buff_pool[pool].dma_addr[index], 345 adapter->rx_buff_pool[pool].buff_size, 346 DMA_FROM_DEVICE); 347 348 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; 349 adapter->rx_buff_pool[pool].free_map[free_index] = index; 350 351 mb(); 352 353 atomic_dec(&(adapter->rx_buff_pool[pool].available)); 354} 355 356/* get the current buffer on the rx queue */ 357static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) 358{ 359 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; 360 unsigned int pool = correlator >> 32; 361 unsigned int index = correlator & 0xffffffffUL; 362 363 ibmveth_assert(pool < IbmVethNumBufferPools); 364 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 365 366 return adapter->rx_buff_pool[pool].skbuff[index]; 367} 368 369/* recycle the current buffer on the rx queue */ 370static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 371{ 372 u32 q_index = adapter->rx_queue.index; 373 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 374 unsigned int pool = correlator >> 32; 375 unsigned int index = correlator & 0xffffffffUL; 376 union ibmveth_buf_desc desc; 377 unsigned long lpar_rc; 378 379 ibmveth_assert(pool < IbmVethNumBufferPools); 380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 381 382 desc.desc = 0; 383 desc.fields.valid = 1; 384 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 385 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; 386 387 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 388 389 if(lpar_rc != H_Success) { 390 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 391 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 392 } 393 394 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 395 adapter->rx_queue.index = 0; 396 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 397 } 398} 399 400static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 401{ 402 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 403 404 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 405 adapter->rx_queue.index = 0; 406 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 407 } 408} 409 410static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 411{ 412 if(adapter->buffer_list_addr != NULL) { 413 if(!dma_mapping_error(adapter->buffer_list_dma)) { 414 dma_unmap_single(&adapter->vdev->dev, 415 adapter->buffer_list_dma, 4096, 416 DMA_BIDIRECTIONAL); 417 adapter->buffer_list_dma = DMA_ERROR_CODE; 418 } 419 free_page((unsigned long)adapter->buffer_list_addr); 420 adapter->buffer_list_addr = NULL; 421 } 422 423 if(adapter->filter_list_addr != NULL) { 424 if(!dma_mapping_error(adapter->filter_list_dma)) { 425 dma_unmap_single(&adapter->vdev->dev, 426 adapter->filter_list_dma, 4096, 427 DMA_BIDIRECTIONAL); 428 adapter->filter_list_dma = DMA_ERROR_CODE; 429 } 430 free_page((unsigned long)adapter->filter_list_addr); 431 adapter->filter_list_addr = NULL; 432 } 433 434 if(adapter->rx_queue.queue_addr != NULL) { 435 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { 436 dma_unmap_single(&adapter->vdev->dev, 437 adapter->rx_queue.queue_dma, 438 adapter->rx_queue.queue_len, 439 DMA_BIDIRECTIONAL); 440 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 441 } 442 kfree(adapter->rx_queue.queue_addr); 443 adapter->rx_queue.queue_addr = NULL; 444 } 445 446 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 447 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 448 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 449} 450 451static int ibmveth_open(struct net_device *netdev) 452{ 453 struct ibmveth_adapter *adapter = netdev->priv; 454 u64 mac_address = 0; 455 int rxq_entries; 456 unsigned long lpar_rc; 457 int rc; 458 union ibmveth_buf_desc rxq_desc; 459 460 ibmveth_debug_printk("open starting\n"); 461 462 rxq_entries = 463 adapter->rx_buff_pool[0].size + 464 adapter->rx_buff_pool[1].size + 465 adapter->rx_buff_pool[2].size + 1; 466 467 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 468 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 469 470 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 471 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 472 ibmveth_cleanup(adapter); 473 return -ENOMEM; 474 } 475 476 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; 477 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 478 479 if(!adapter->rx_queue.queue_addr) { 480 ibmveth_error_printk("unable to allocate rx queue pages\n"); 481 ibmveth_cleanup(adapter); 482 return -ENOMEM; 483 } 484 485 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, 486 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 487 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, 488 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 489 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, 490 adapter->rx_queue.queue_addr, 491 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); 492 493 if((dma_mapping_error(adapter->buffer_list_dma) ) || 494 (dma_mapping_error(adapter->filter_list_dma)) || 495 (dma_mapping_error(adapter->rx_queue.queue_dma))) { 496 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 497 ibmveth_cleanup(adapter); 498 return -ENOMEM; 499 } 500 501 adapter->rx_queue.index = 0; 502 adapter->rx_queue.num_slots = rxq_entries; 503 adapter->rx_queue.toggle = 1; 504 505 if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 506 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 507 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2])) 508 { 509 ibmveth_error_printk("unable to allocate buffer pools\n"); 510 ibmveth_cleanup(adapter); 511 return -ENOMEM; 512 } 513 514 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 515 mac_address = mac_address >> 16; 516 517 rxq_desc.desc = 0; 518 rxq_desc.fields.valid = 1; 519 rxq_desc.fields.length = adapter->rx_queue.queue_len; 520 rxq_desc.fields.address = adapter->rx_queue.queue_dma; 521 522 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); 523 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 524 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 525 526 527 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, 528 adapter->buffer_list_dma, 529 rxq_desc.desc, 530 adapter->filter_list_dma, 531 mac_address); 532 533 if(lpar_rc != H_Success) { 534 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 535 ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n", 536 adapter->buffer_list_dma, 537 adapter->filter_list_dma, 538 rxq_desc.desc, 539 mac_address); 540 ibmveth_cleanup(adapter); 541 return -ENONET; 542 } 543 544 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 545 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 546 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 547 do { 548 rc = h_free_logical_lan(adapter->vdev->unit_address); 549 } while (H_isLongBusy(rc) || (rc == H_Busy)); 550 551 ibmveth_cleanup(adapter); 552 return rc; 553 } 554 555 netif_start_queue(netdev); 556 557 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 558 ibmveth_schedule_replenishing(adapter); 559 560 ibmveth_debug_printk("open complete\n"); 561 562 return 0; 563} 564 565static int ibmveth_close(struct net_device *netdev) 566{ 567 struct ibmveth_adapter *adapter = netdev->priv; 568 long lpar_rc; 569 570 ibmveth_debug_printk("close starting\n"); 571 572 netif_stop_queue(netdev); 573 574 free_irq(netdev->irq, netdev); 575 576 cancel_delayed_work(&adapter->replenish_task); 577 flush_scheduled_work(); 578 579 do { 580 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 581 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 582 583 if(lpar_rc != H_Success) 584 { 585 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 586 lpar_rc); 587 } 588 589 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 590 591 ibmveth_cleanup(adapter); 592 593 ibmveth_debug_printk("close complete\n"); 594 595 return 0; 596} 597 598static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 599 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 600 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); 601 cmd->speed = SPEED_1000; 602 cmd->duplex = DUPLEX_FULL; 603 cmd->port = PORT_FIBRE; 604 cmd->phy_address = 0; 605 cmd->transceiver = XCVR_INTERNAL; 606 cmd->autoneg = AUTONEG_ENABLE; 607 cmd->maxtxpkt = 0; 608 cmd->maxrxpkt = 1; 609 return 0; 610} 611 612static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { 613 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); 614 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); 615} 616 617static u32 netdev_get_link(struct net_device *dev) { 618 return 1; 619} 620 621static struct ethtool_ops netdev_ethtool_ops = { 622 .get_drvinfo = netdev_get_drvinfo, 623 .get_settings = netdev_get_settings, 624 .get_link = netdev_get_link, 625 .get_sg = ethtool_op_get_sg, 626 .get_tx_csum = ethtool_op_get_tx_csum, 627}; 628 629static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 630{ 631 return -EOPNOTSUPP; 632} 633 634#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 635 636static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) 637{ 638 struct ibmveth_adapter *adapter = netdev->priv; 639 union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; 640 unsigned long lpar_rc; 641 int nfrags = 0, curfrag; 642 unsigned long correlator; 643 unsigned int retry_count; 644 645 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 646 adapter->stats.tx_dropped++; 647 dev_kfree_skb(skb); 648 return 0; 649 } 650 651 memset(&desc, 0, sizeof(desc)); 652 653 /* nfrags = number of frags after the initial fragment */ 654 nfrags = skb_shinfo(skb)->nr_frags; 655 656 if(nfrags) 657 adapter->tx_multidesc_send++; 658 659 /* map the initial fragment */ 660 desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; 661 desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 662 desc[0].fields.length, DMA_TO_DEVICE); 663 desc[0].fields.valid = 1; 664 665 if(dma_mapping_error(desc[0].fields.address)) { 666 ibmveth_error_printk("tx: unable to map initial fragment\n"); 667 adapter->tx_map_failed++; 668 adapter->stats.tx_dropped++; 669 dev_kfree_skb(skb); 670 return 0; 671 } 672 673 curfrag = nfrags; 674 675 /* map fragments past the initial portion if there are any */ 676 while(curfrag--) { 677 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; 678 desc[curfrag+1].fields.address 679 = dma_map_single(&adapter->vdev->dev, 680 page_address(frag->page) + frag->page_offset, 681 frag->size, DMA_TO_DEVICE); 682 desc[curfrag+1].fields.length = frag->size; 683 desc[curfrag+1].fields.valid = 1; 684 685 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 687 adapter->tx_map_failed++; 688 adapter->stats.tx_dropped++; 689 /* Free all the mappings we just created */ 690 while(curfrag < nfrags) { 691 dma_unmap_single(&adapter->vdev->dev, 692 desc[curfrag+1].fields.address, 693 desc[curfrag+1].fields.length, 694 DMA_TO_DEVICE); 695 curfrag++; 696 } 697 dev_kfree_skb(skb); 698 return 0; 699 } 700 } 701 702 /* send the frame. Arbitrarily set retrycount to 1024 */ 703 correlator = 0; 704 retry_count = 1024; 705 do { 706 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, 707 desc[0].desc, 708 desc[1].desc, 709 desc[2].desc, 710 desc[3].desc, 711 desc[4].desc, 712 desc[5].desc, 713 correlator); 714 } while ((lpar_rc == H_Busy) && (retry_count--)); 715 716 if(lpar_rc != H_Success && lpar_rc != H_Dropped) { 717 int i; 718 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 719 for(i = 0; i < 6; i++) { 720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 721 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 722 } 723 adapter->tx_send_failed++; 724 adapter->stats.tx_dropped++; 725 } else { 726 adapter->stats.tx_packets++; 727 adapter->stats.tx_bytes += skb->len; 728 } 729 730 do { 731 dma_unmap_single(&adapter->vdev->dev, 732 desc[nfrags].fields.address, 733 desc[nfrags].fields.length, DMA_TO_DEVICE); 734 } while(--nfrags >= 0); 735 736 dev_kfree_skb(skb); 737 return 0; 738} 739 740static int ibmveth_poll(struct net_device *netdev, int *budget) 741{ 742 struct ibmveth_adapter *adapter = netdev->priv; 743 int max_frames_to_process = netdev->quota; 744 int frames_processed = 0; 745 int more_work = 1; 746 unsigned long lpar_rc; 747 748 restart_poll: 749 do { 750 struct net_device *netdev = adapter->netdev; 751 752 if(ibmveth_rxq_pending_buffer(adapter)) { 753 struct sk_buff *skb; 754 755 rmb(); 756 757 if(!ibmveth_rxq_buffer_valid(adapter)) { 758 wmb(); /* suggested by larson1 */ 759 adapter->rx_invalid_buffer++; 760 ibmveth_debug_printk("recycling invalid buffer\n"); 761 ibmveth_rxq_recycle_buffer(adapter); 762 } else { 763 int length = ibmveth_rxq_frame_length(adapter); 764 int offset = ibmveth_rxq_frame_offset(adapter); 765 skb = ibmveth_rxq_get_buffer(adapter); 766 767 ibmveth_rxq_harvest_buffer(adapter); 768 769 skb_reserve(skb, offset); 770 skb_put(skb, length); 771 skb->dev = netdev; 772 skb->protocol = eth_type_trans(skb, netdev); 773 774 netif_receive_skb(skb); /* send it up */ 775 776 adapter->stats.rx_packets++; 777 adapter->stats.rx_bytes += length; 778 frames_processed++; 779 } 780 } else { 781 more_work = 0; 782 } 783 } while(more_work && (frames_processed < max_frames_to_process)); 784 785 ibmveth_schedule_replenishing(adapter); 786 787 if(more_work) { 788 /* more work to do - return that we are not done yet */ 789 netdev->quota -= frames_processed; 790 *budget -= frames_processed; 791 return 1; 792 } 793 794 /* we think we are done - reenable interrupts, then check once more to make sure we are done */ 795 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); 796 797 ibmveth_assert(lpar_rc == H_Success); 798 799 netif_rx_complete(netdev); 800 801 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) 802 { 803 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 804 ibmveth_assert(lpar_rc == H_Success); 805 more_work = 1; 806 goto restart_poll; 807 } 808 809 netdev->quota -= frames_processed; 810 *budget -= frames_processed; 811 812 /* we really are done */ 813 return 0; 814} 815 816static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 817{ 818 struct net_device *netdev = dev_instance; 819 struct ibmveth_adapter *adapter = netdev->priv; 820 unsigned long lpar_rc; 821 822 if(netif_rx_schedule_prep(netdev)) { 823 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 824 ibmveth_assert(lpar_rc == H_Success); 825 __netif_rx_schedule(netdev); 826 } 827 return IRQ_HANDLED; 828} 829 830static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) 831{ 832 struct ibmveth_adapter *adapter = dev->priv; 833 return &adapter->stats; 834} 835 836static void ibmveth_set_multicast_list(struct net_device *netdev) 837{ 838 struct ibmveth_adapter *adapter = netdev->priv; 839 unsigned long lpar_rc; 840 841 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { 842 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 843 IbmVethMcastEnableRecv | 844 IbmVethMcastDisableFiltering, 845 0); 846 if(lpar_rc != H_Success) { 847 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 848 } 849 } else { 850 struct dev_mc_list *mclist = netdev->mc_list; 851 int i; 852 /* clear the filter table & disable filtering */ 853 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 854 IbmVethMcastEnableRecv | 855 IbmVethMcastDisableFiltering | 856 IbmVethMcastClearFilterTable, 857 0); 858 if(lpar_rc != H_Success) { 859 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 860 } 861 /* add the addresses to the filter table */ 862 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) { 863 // add the multicast address to the filter table 864 unsigned long mcast_addr = 0; 865 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); 866 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 867 IbmVethMcastAddFilter, 868 mcast_addr); 869 if(lpar_rc != H_Success) { 870 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 871 } 872 } 873 874 /* re-enable filtering */ 875 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 876 IbmVethMcastEnableFiltering, 877 0); 878 if(lpar_rc != H_Success) { 879 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 880 } 881 } 882} 883 884static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 885{ 886 if ((new_mtu < 68) || (new_mtu > (1<<20))) 887 return -EINVAL; 888 dev->mtu = new_mtu; 889 return 0; 890} 891 892static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 893{ 894 int rc; 895 struct net_device *netdev; 896 struct ibmveth_adapter *adapter; 897 898 unsigned char *mac_addr_p; 899 unsigned int *mcastFilterSize_p; 900 901 902 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 903 dev->unit_address); 904 905 mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0); 906 if(!mac_addr_p) { 907 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR " 908 "attribute\n", __FILE__, __LINE__); 909 return 0; 910 } 911 912 mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0); 913 if(!mcastFilterSize_p) { 914 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 915 "VETH_MCAST_FILTER_SIZE attribute\n", 916 __FILE__, __LINE__); 917 return 0; 918 } 919 920 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 921 922 if(!netdev) 923 return -ENOMEM; 924 925 SET_MODULE_OWNER(netdev); 926 927 adapter = netdev->priv; 928 memset(adapter, 0, sizeof(adapter)); 929 dev->dev.driver_data = netdev; 930 931 adapter->vdev = dev; 932 adapter->netdev = netdev; 933 adapter->mcastFilterSize= *mcastFilterSize_p; 934 935 /* Some older boxes running PHYP non-natively have an OF that 936 returns a 8-byte local-mac-address field (and the first 937 2 bytes have to be ignored) while newer boxes' OF return 938 a 6-byte field. Note that IEEE 1275 specifies that 939 local-mac-address must be a 6-byte field. 940 The RPA doc specifies that the first byte must be 10b, so 941 we'll just look for it to solve this 8 vs. 6 byte field issue */ 942 943 if ((*mac_addr_p & 0x3) != 0x02) 944 mac_addr_p += 2; 945 946 adapter->mac_addr = 0; 947 memcpy(&adapter->mac_addr, mac_addr_p, 6); 948 949 adapter->liobn = dev->iommu_table->it_index; 950 951 netdev->irq = dev->irq; 952 netdev->open = ibmveth_open; 953 netdev->poll = ibmveth_poll; 954 netdev->weight = 16; 955 netdev->stop = ibmveth_close; 956 netdev->hard_start_xmit = ibmveth_start_xmit; 957 netdev->get_stats = ibmveth_get_stats; 958 netdev->set_multicast_list = ibmveth_set_multicast_list; 959 netdev->do_ioctl = ibmveth_ioctl; 960 netdev->ethtool_ops = &netdev_ethtool_ops; 961 netdev->change_mtu = ibmveth_change_mtu; 962 SET_NETDEV_DEV(netdev, &dev->dev); 963 964 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 965 966 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 967 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 968 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 969 970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 971 972 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter); 973 974 adapter->buffer_list_dma = DMA_ERROR_CODE; 975 adapter->filter_list_dma = DMA_ERROR_CODE; 976 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 977 978 atomic_set(&adapter->not_replenishing, 1); 979 980 ibmveth_debug_printk("registering netdev...\n"); 981 982 rc = register_netdev(netdev); 983 984 if(rc) { 985 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); 986 free_netdev(netdev); 987 return rc; 988 } 989 990 ibmveth_debug_printk("registered\n"); 991 992 ibmveth_proc_register_adapter(adapter); 993 994 return 0; 995} 996 997static int __devexit ibmveth_remove(struct vio_dev *dev) 998{ 999 struct net_device *netdev = dev->dev.driver_data; 1000 struct ibmveth_adapter *adapter = netdev->priv; 1001 1002 unregister_netdev(netdev); 1003 1004 ibmveth_proc_unregister_adapter(adapter); 1005 1006 free_netdev(netdev); 1007 return 0; 1008} 1009 1010#ifdef CONFIG_PROC_FS 1011static void ibmveth_proc_register_driver(void) 1012{ 1013 ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net); 1014 if (ibmveth_proc_dir) { 1015 SET_MODULE_OWNER(ibmveth_proc_dir); 1016 } 1017} 1018 1019static void ibmveth_proc_unregister_driver(void) 1020{ 1021 remove_proc_entry(IBMVETH_PROC_DIR, proc_net); 1022} 1023 1024static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1025{ 1026 if (*pos == 0) { 1027 return (void *)1; 1028 } else { 1029 return NULL; 1030 } 1031} 1032 1033static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1034{ 1035 ++*pos; 1036 return NULL; 1037} 1038 1039static void ibmveth_seq_stop(struct seq_file *seq, void *v) 1040{ 1041} 1042 1043static int ibmveth_seq_show(struct seq_file *seq, void *v) 1044{ 1045 struct ibmveth_adapter *adapter = seq->private; 1046 char *current_mac = ((char*) &adapter->netdev->dev_addr); 1047 char *firmware_mac = ((char*) &adapter->mac_addr) ; 1048 1049 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); 1050 1051 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); 1052 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn); 1053 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", 1054 current_mac[0], current_mac[1], current_mac[2], 1055 current_mac[3], current_mac[4], current_mac[5]); 1056 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", 1057 firmware_mac[0], firmware_mac[1], firmware_mac[2], 1058 firmware_mac[3], firmware_mac[4], firmware_mac[5]); 1059 1060 seq_printf(seq, "\nAdapter Statistics:\n"); 1061 seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); 1062 seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); 1063 seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); 1064 seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); 1065 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); 1066 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); 1067 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); 1068 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); 1069 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); 1070 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); 1071 1072 return 0; 1073} 1074static struct seq_operations ibmveth_seq_ops = { 1075 .start = ibmveth_seq_start, 1076 .next = ibmveth_seq_next, 1077 .stop = ibmveth_seq_stop, 1078 .show = ibmveth_seq_show, 1079}; 1080 1081static int ibmveth_proc_open(struct inode *inode, struct file *file) 1082{ 1083 struct seq_file *seq; 1084 struct proc_dir_entry *proc; 1085 int rc; 1086 1087 rc = seq_open(file, &ibmveth_seq_ops); 1088 if (!rc) { 1089 /* recover the pointer buried in proc_dir_entry data */ 1090 seq = file->private_data; 1091 proc = PDE(inode); 1092 seq->private = proc->data; 1093 } 1094 return rc; 1095} 1096 1097static struct file_operations ibmveth_proc_fops = { 1098 .owner = THIS_MODULE, 1099 .open = ibmveth_proc_open, 1100 .read = seq_read, 1101 .llseek = seq_lseek, 1102 .release = seq_release, 1103}; 1104 1105static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1106{ 1107 struct proc_dir_entry *entry; 1108 if (ibmveth_proc_dir) { 1109 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); 1110 if (!entry) { 1111 ibmveth_error_printk("Cannot create adapter proc entry"); 1112 } else { 1113 entry->data = (void *) adapter; 1114 entry->proc_fops = &ibmveth_proc_fops; 1115 SET_MODULE_OWNER(entry); 1116 } 1117 } 1118 return; 1119} 1120 1121static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1122{ 1123 if (ibmveth_proc_dir) { 1124 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); 1125 } 1126} 1127 1128#else /* CONFIG_PROC_FS */ 1129static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1130{ 1131} 1132 1133static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1134{ 1135} 1136static void ibmveth_proc_register_driver(void) 1137{ 1138} 1139 1140static void ibmveth_proc_unregister_driver(void) 1141{ 1142} 1143#endif /* CONFIG_PROC_FS */ 1144 1145static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1146 { "network", "IBM,l-lan"}, 1147 { "", "" } 1148}; 1149 1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1151 1152static struct vio_driver ibmveth_driver = { 1153 .name = (char *)ibmveth_driver_name, 1154 .id_table = ibmveth_device_table, 1155 .probe = ibmveth_probe, 1156 .remove = ibmveth_remove 1157}; 1158 1159static int __init ibmveth_module_init(void) 1160{ 1161 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1162 1163 ibmveth_proc_register_driver(); 1164 1165 return vio_register_driver(&ibmveth_driver); 1166} 1167 1168static void __exit ibmveth_module_exit(void) 1169{ 1170 vio_unregister_driver(&ibmveth_driver); 1171 ibmveth_proc_unregister_driver(); 1172} 1173 1174module_init(ibmveth_module_init); 1175module_exit(ibmveth_module_exit);