Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.4-rc7 2283 lines 65 kB view raw
1/* 2 * Copyright 2012 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/module.h> 16#include <linux/init.h> 17#include <linux/moduleparam.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> /* printk() */ 20#include <linux/slab.h> /* kmalloc() */ 21#include <linux/errno.h> /* error codes */ 22#include <linux/types.h> /* size_t */ 23#include <linux/interrupt.h> 24#include <linux/in.h> 25#include <linux/irq.h> 26#include <linux/netdevice.h> /* struct device, and other headers */ 27#include <linux/etherdevice.h> /* eth_type_trans */ 28#include <linux/skbuff.h> 29#include <linux/ioctl.h> 30#include <linux/cdev.h> 31#include <linux/hugetlb.h> 32#include <linux/in6.h> 33#include <linux/timer.h> 34#include <linux/hrtimer.h> 35#include <linux/ktime.h> 36#include <linux/io.h> 37#include <linux/ctype.h> 38#include <linux/ip.h> 39#include <linux/ipv6.h> 40#include <linux/tcp.h> 41#include <linux/net_tstamp.h> 42#include <linux/ptp_clock_kernel.h> 43#include <linux/tick.h> 44 45#include <asm/checksum.h> 46#include <asm/homecache.h> 47#include <gxio/mpipe.h> 48#include <arch/sim.h> 49 50/* Default transmit lockup timeout period, in jiffies. */ 51#define TILE_NET_TIMEOUT (5 * HZ) 52 53/* The maximum number of distinct channels (idesc.channel is 5 bits). */ 54#define TILE_NET_CHANNELS 32 55 56/* Maximum number of idescs to handle per "poll". */ 57#define TILE_NET_BATCH 128 58 59/* Maximum number of packets to handle per "poll". */ 60#define TILE_NET_WEIGHT 64 61 62/* Number of entries in each iqueue. */ 63#define IQUEUE_ENTRIES 512 64 65/* Number of entries in each equeue. */ 66#define EQUEUE_ENTRIES 2048 67 68/* Total header bytes per equeue slot. Must be big enough for 2 bytes 69 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to 70 * 60 bytes of actual TCP header. We round up to align to cache lines. 71 */ 72#define HEADER_BYTES 128 73 74/* Maximum completions per cpu per device (must be a power of two). 75 * ISSUE: What is the right number here? If this is too small, then 76 * egress might block waiting for free space in a completions array. 77 * ISSUE: At the least, allocate these only for initialized echannels. 78 */ 79#define TILE_NET_MAX_COMPS 64 80 81#define MAX_FRAGS (MAX_SKB_FRAGS + 1) 82 83/* The "kinds" of buffer stacks (small/large/jumbo). */ 84#define MAX_KINDS 3 85 86/* Size of completions data to allocate. 87 * ISSUE: Probably more than needed since we don't use all the channels. 88 */ 89#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) 90 91/* Size of NotifRing data to allocate. */ 92#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) 93 94/* Timeout to wake the per-device TX timer after we stop the queue. 95 * We don't want the timeout too short (adds overhead, and might end 96 * up causing stop/wake/stop/wake cycles) or too long (affects performance). 97 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. 98 */ 99#define TX_TIMER_DELAY_USEC 30 100 101/* Timeout to wake the per-cpu egress timer to free completions. */ 102#define EGRESS_TIMER_DELAY_USEC 1000 103 104MODULE_AUTHOR("Tilera Corporation"); 105MODULE_LICENSE("GPL"); 106 107/* A "packet fragment" (a chunk of memory). */ 108struct frag { 109 void *buf; 110 size_t length; 111}; 112 113/* A single completion. */ 114struct tile_net_comp { 115 /* The "complete_count" when the completion will be complete. */ 116 s64 when; 117 /* The buffer to be freed when the completion is complete. */ 118 struct sk_buff *skb; 119}; 120 121/* The completions for a given cpu and echannel. */ 122struct tile_net_comps { 123 /* The completions. */ 124 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; 125 /* The number of completions used. */ 126 unsigned long comp_next; 127 /* The number of completions freed. */ 128 unsigned long comp_last; 129}; 130 131/* The transmit wake timer for a given cpu and echannel. */ 132struct tile_net_tx_wake { 133 int tx_queue_idx; 134 struct hrtimer timer; 135 struct net_device *dev; 136}; 137 138/* Info for a specific cpu. */ 139struct tile_net_info { 140 /* Our cpu. */ 141 int my_cpu; 142 /* A timer for handling egress completions. */ 143 struct hrtimer egress_timer; 144 /* True if "egress_timer" is scheduled. */ 145 bool egress_timer_scheduled; 146 struct info_mpipe { 147 /* Packet queue. */ 148 gxio_mpipe_iqueue_t iqueue; 149 /* The NAPI struct. */ 150 struct napi_struct napi; 151 /* Number of buffers (by kind) which must still be provided. */ 152 unsigned int num_needed_buffers[MAX_KINDS]; 153 /* instance id. */ 154 int instance; 155 /* True if iqueue is valid. */ 156 bool has_iqueue; 157 /* NAPI flags. */ 158 bool napi_added; 159 bool napi_enabled; 160 /* Comps for each egress channel. */ 161 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; 162 /* Transmit wake timer for each egress channel. */ 163 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; 164 } mpipe[NR_MPIPE_MAX]; 165}; 166 167/* Info for egress on a particular egress channel. */ 168struct tile_net_egress { 169 /* The "equeue". */ 170 gxio_mpipe_equeue_t *equeue; 171 /* The headers for TSO. */ 172 unsigned char *headers; 173}; 174 175/* Info for a specific device. */ 176struct tile_net_priv { 177 /* Our network device. */ 178 struct net_device *dev; 179 /* The primary link. */ 180 gxio_mpipe_link_t link; 181 /* The primary channel, if open, else -1. */ 182 int channel; 183 /* The "loopify" egress link, if needed. */ 184 gxio_mpipe_link_t loopify_link; 185 /* The "loopify" egress channel, if open, else -1. */ 186 int loopify_channel; 187 /* The egress channel (channel or loopify_channel). */ 188 int echannel; 189 /* mPIPE instance, 0 or 1. */ 190 int instance; 191 /* The timestamp config. */ 192 struct hwtstamp_config stamp_cfg; 193}; 194 195static struct mpipe_data { 196 /* The ingress irq. */ 197 int ingress_irq; 198 199 /* The "context" for all devices. */ 200 gxio_mpipe_context_t context; 201 202 /* Egress info, indexed by "priv->echannel" 203 * (lazily created as needed). 204 */ 205 struct tile_net_egress 206 egress_for_echannel[TILE_NET_CHANNELS]; 207 208 /* Devices currently associated with each channel. 209 * NOTE: The array entry can become NULL after ifconfig down, but 210 * we do not free the underlying net_device structures, so it is 211 * safe to use a pointer after reading it from this array. 212 */ 213 struct net_device 214 *tile_net_devs_for_channel[TILE_NET_CHANNELS]; 215 216 /* The actual memory allocated for the buffer stacks. */ 217 void *buffer_stack_vas[MAX_KINDS]; 218 219 /* The amount of memory allocated for each buffer stack. */ 220 size_t buffer_stack_bytes[MAX_KINDS]; 221 222 /* The first buffer stack index 223 * (small = +0, large = +1, jumbo = +2). 224 */ 225 int first_buffer_stack; 226 227 /* The buckets. */ 228 int first_bucket; 229 int num_buckets; 230 231 /* PTP-specific data. */ 232 struct ptp_clock *ptp_clock; 233 struct ptp_clock_info caps; 234 235 /* Lock for ptp accessors. */ 236 struct mutex ptp_lock; 237 238} mpipe_data[NR_MPIPE_MAX] = { 239 [0 ... (NR_MPIPE_MAX - 1)] { 240 .ingress_irq = -1, 241 .first_buffer_stack = -1, 242 .first_bucket = -1, 243 .num_buckets = 1 244 } 245}; 246 247/* A mutex for "tile_net_devs_for_channel". */ 248static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); 249 250/* The per-cpu info. */ 251static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); 252 253 254/* The buffer size enums for each buffer stack. 255 * See arch/tile/include/gxio/mpipe.h for the set of possible values. 256 * We avoid the "10384" size because it can induce "false chaining" 257 * on "cut-through" jumbo packets. 258 */ 259static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { 260 GXIO_MPIPE_BUFFER_SIZE_128, 261 GXIO_MPIPE_BUFFER_SIZE_1664, 262 GXIO_MPIPE_BUFFER_SIZE_16384 263}; 264 265/* Text value of tile_net.cpus if passed as a module parameter. */ 266static char *network_cpus_string; 267 268/* The actual cpus in "network_cpus". */ 269static struct cpumask network_cpus_map; 270 271/* If "tile_net.loopify=LINK" was specified, this is "LINK". */ 272static char *loopify_link_name; 273 274/* If "tile_net.custom" was specified, this is true. */ 275static bool custom_flag; 276 277/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ 278static uint jumbo_num; 279 280/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */ 281static inline int mpipe_instance(struct net_device *dev) 282{ 283 struct tile_net_priv *priv = netdev_priv(dev); 284 return priv->instance; 285} 286 287/* The "tile_net.cpus" argument specifies the cpus that are dedicated 288 * to handle ingress packets. 289 * 290 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where 291 * m, n, x, y are integer numbers that represent the cpus that can be 292 * neither a dedicated cpu nor a dataplane cpu. 293 */ 294static bool network_cpus_init(void) 295{ 296 int rc; 297 298 if (network_cpus_string == NULL) 299 return false; 300 301 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); 302 if (rc != 0) { 303 pr_warn("tile_net.cpus=%s: malformed cpu list\n", 304 network_cpus_string); 305 return false; 306 } 307 308 /* Remove dedicated cpus. */ 309 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); 310 311 if (cpumask_empty(&network_cpus_map)) { 312 pr_warn("Ignoring empty tile_net.cpus='%s'.\n", 313 network_cpus_string); 314 return false; 315 } 316 317 pr_info("Linux network CPUs: %*pbl\n", 318 cpumask_pr_args(&network_cpus_map)); 319 return true; 320} 321 322module_param_named(cpus, network_cpus_string, charp, 0444); 323MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); 324 325/* The "tile_net.loopify=LINK" argument causes the named device to 326 * actually use "loop0" for ingress, and "loop1" for egress. This 327 * allows an app to sit between the actual link and linux, passing 328 * (some) packets along to linux, and forwarding (some) packets sent 329 * out by linux. 330 */ 331module_param_named(loopify, loopify_link_name, charp, 0444); 332MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); 333 334/* The "tile_net.custom" argument causes us to ignore the "conventional" 335 * classifier metadata, in particular, the "l2_offset". 336 */ 337module_param_named(custom, custom_flag, bool, 0444); 338MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); 339 340/* The "tile_net.jumbo" argument causes us to support "jumbo" packets, 341 * and to allocate the given number of "jumbo" buffers. 342 */ 343module_param_named(jumbo, jumbo_num, uint, 0444); 344MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets"); 345 346/* Atomically update a statistics field. 347 * Note that on TILE-Gx, this operation is fire-and-forget on the 348 * issuing core (single-cycle dispatch) and takes only a few cycles 349 * longer than a regular store when the request reaches the home cache. 350 * No expensive bus management overhead is required. 351 */ 352static void tile_net_stats_add(unsigned long value, unsigned long *field) 353{ 354 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); 355 atomic_long_add(value, (atomic_long_t *)field); 356} 357 358/* Allocate and push a buffer. */ 359static bool tile_net_provide_buffer(int instance, int kind) 360{ 361 struct mpipe_data *md = &mpipe_data[instance]; 362 gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; 363 size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); 364 const unsigned long buffer_alignment = 128; 365 struct sk_buff *skb; 366 int len; 367 368 len = sizeof(struct sk_buff **) + buffer_alignment + bs; 369 skb = dev_alloc_skb(len); 370 if (skb == NULL) 371 return false; 372 373 /* Make room for a back-pointer to 'skb' and guarantee alignment. */ 374 skb_reserve(skb, sizeof(struct sk_buff **)); 375 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); 376 377 /* Save a back-pointer to 'skb'. */ 378 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; 379 380 /* Make sure "skb" and the back-pointer have been flushed. */ 381 wmb(); 382 383 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, 384 (void *)va_to_tile_io_addr(skb->data)); 385 386 return true; 387} 388 389/* Convert a raw mpipe buffer to its matching skb pointer. */ 390static struct sk_buff *mpipe_buf_to_skb(void *va) 391{ 392 /* Acquire the associated "skb". */ 393 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); 394 struct sk_buff *skb = *skb_ptr; 395 396 /* Paranoia. */ 397 if (skb->data != va) { 398 /* Panic here since there's a reasonable chance 399 * that corrupt buffers means generic memory 400 * corruption, with unpredictable system effects. 401 */ 402 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", 403 va, skb, skb->data); 404 } 405 406 return skb; 407} 408 409static void tile_net_pop_all_buffers(int instance, int stack) 410{ 411 struct mpipe_data *md = &mpipe_data[instance]; 412 413 for (;;) { 414 tile_io_addr_t addr = 415 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, 416 stack); 417 if (addr == 0) 418 break; 419 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); 420 } 421} 422 423/* Provide linux buffers to mPIPE. */ 424static void tile_net_provide_needed_buffers(void) 425{ 426 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 427 int instance, kind; 428 for (instance = 0; instance < NR_MPIPE_MAX && 429 info->mpipe[instance].has_iqueue; instance++) { 430 for (kind = 0; kind < MAX_KINDS; kind++) { 431 while (info->mpipe[instance].num_needed_buffers[kind] 432 != 0) { 433 if (!tile_net_provide_buffer(instance, kind)) { 434 pr_notice("Tile %d still needs" 435 " some buffers\n", 436 info->my_cpu); 437 return; 438 } 439 info->mpipe[instance]. 440 num_needed_buffers[kind]--; 441 } 442 } 443 } 444} 445 446/* Get RX timestamp, and store it in the skb. */ 447static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb, 448 gxio_mpipe_idesc_t *idesc) 449{ 450 if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) { 451 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 452 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 453 shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec, 454 idesc->time_stamp_ns); 455 } 456} 457 458/* Get TX timestamp, and store it in the skb. */ 459static void tile_tx_timestamp(struct sk_buff *skb, int instance) 460{ 461 struct skb_shared_info *shtx = skb_shinfo(skb); 462 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { 463 struct mpipe_data *md = &mpipe_data[instance]; 464 struct skb_shared_hwtstamps shhwtstamps; 465 struct timespec ts; 466 467 shtx->tx_flags |= SKBTX_IN_PROGRESS; 468 gxio_mpipe_get_timestamp(&md->context, &ts); 469 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 470 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 471 skb_tstamp_tx(skb, &shhwtstamps); 472 } 473} 474 475/* Use ioctl() to enable or disable TX or RX timestamping. */ 476static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq) 477{ 478 struct hwtstamp_config config; 479 struct tile_net_priv *priv = netdev_priv(dev); 480 481 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 482 return -EFAULT; 483 484 if (config.flags) /* reserved for future extensions */ 485 return -EINVAL; 486 487 switch (config.tx_type) { 488 case HWTSTAMP_TX_OFF: 489 case HWTSTAMP_TX_ON: 490 break; 491 default: 492 return -ERANGE; 493 } 494 495 switch (config.rx_filter) { 496 case HWTSTAMP_FILTER_NONE: 497 break; 498 case HWTSTAMP_FILTER_ALL: 499 case HWTSTAMP_FILTER_SOME: 500 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 501 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 502 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 503 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 504 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 505 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 506 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 507 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 508 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 509 case HWTSTAMP_FILTER_PTP_V2_EVENT: 510 case HWTSTAMP_FILTER_PTP_V2_SYNC: 511 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 512 config.rx_filter = HWTSTAMP_FILTER_ALL; 513 break; 514 default: 515 return -ERANGE; 516 } 517 518 if (copy_to_user(rq->ifr_data, &config, sizeof(config))) 519 return -EFAULT; 520 521 priv->stamp_cfg = config; 522 return 0; 523} 524 525static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq) 526{ 527 struct tile_net_priv *priv = netdev_priv(dev); 528 529 if (copy_to_user(rq->ifr_data, &priv->stamp_cfg, 530 sizeof(priv->stamp_cfg))) 531 return -EFAULT; 532 533 return 0; 534} 535 536static inline bool filter_packet(struct net_device *dev, void *buf) 537{ 538 /* Filter packets received before we're up. */ 539 if (dev == NULL || !(dev->flags & IFF_UP)) 540 return true; 541 542 /* Filter out packets that aren't for us. */ 543 if (!(dev->flags & IFF_PROMISC) && 544 !is_multicast_ether_addr(buf) && 545 !ether_addr_equal(dev->dev_addr, buf)) 546 return true; 547 548 return false; 549} 550 551static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, 552 gxio_mpipe_idesc_t *idesc, unsigned long len) 553{ 554 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 555 struct tile_net_priv *priv = netdev_priv(dev); 556 int instance = priv->instance; 557 558 /* Encode the actual packet length. */ 559 skb_put(skb, len); 560 561 skb->protocol = eth_type_trans(skb, dev); 562 563 /* Acknowledge "good" hardware checksums. */ 564 if (idesc->cs && idesc->csum_seed_val == 0xFFFF) 565 skb->ip_summed = CHECKSUM_UNNECESSARY; 566 567 /* Get RX timestamp from idesc. */ 568 tile_rx_timestamp(priv, skb, idesc); 569 570 napi_gro_receive(&info->mpipe[instance].napi, skb); 571 572 /* Update stats. */ 573 tile_net_stats_add(1, &dev->stats.rx_packets); 574 tile_net_stats_add(len, &dev->stats.rx_bytes); 575 576 /* Need a new buffer. */ 577 if (idesc->size == buffer_size_enums[0]) 578 info->mpipe[instance].num_needed_buffers[0]++; 579 else if (idesc->size == buffer_size_enums[1]) 580 info->mpipe[instance].num_needed_buffers[1]++; 581 else 582 info->mpipe[instance].num_needed_buffers[2]++; 583} 584 585/* Handle a packet. Return true if "processed", false if "filtered". */ 586static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) 587{ 588 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 589 struct mpipe_data *md = &mpipe_data[instance]; 590 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; 591 uint8_t l2_offset; 592 void *va; 593 void *buf; 594 unsigned long len; 595 bool filter; 596 597 /* Drop packets for which no buffer was available (which can 598 * happen under heavy load), or for which the me/tr/ce flags 599 * are set (which can happen for jumbo cut-through packets, 600 * or with a customized classifier). 601 */ 602 if (idesc->be || idesc->me || idesc->tr || idesc->ce) { 603 if (dev) 604 tile_net_stats_add(1, &dev->stats.rx_errors); 605 goto drop; 606 } 607 608 /* Get the "l2_offset", if allowed. */ 609 l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); 610 611 /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ 612 va = tile_io_addr_to_va((unsigned long)idesc->va); 613 614 /* Get the actual packet start/length. */ 615 buf = va + l2_offset; 616 len = idesc->l2_size - l2_offset; 617 618 /* Point "va" at the raw buffer. */ 619 va -= NET_IP_ALIGN; 620 621 filter = filter_packet(dev, buf); 622 if (filter) { 623 if (dev) 624 tile_net_stats_add(1, &dev->stats.rx_dropped); 625drop: 626 gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc); 627 } else { 628 struct sk_buff *skb = mpipe_buf_to_skb(va); 629 630 /* Skip headroom, and any custom header. */ 631 skb_reserve(skb, NET_IP_ALIGN + l2_offset); 632 633 tile_net_receive_skb(dev, skb, idesc, len); 634 } 635 636 gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc); 637 return !filter; 638} 639 640/* Handle some packets for the current CPU. 641 * 642 * This function handles up to TILE_NET_BATCH idescs per call. 643 * 644 * ISSUE: Since we do not provide new buffers until this function is 645 * complete, we must initially provide enough buffers for each network 646 * cpu to fill its iqueue and also its batched idescs. 647 * 648 * ISSUE: The "rotting packet" race condition occurs if a packet 649 * arrives after the queue appears to be empty, and before the 650 * hypervisor interrupt is re-enabled. 651 */ 652static int tile_net_poll(struct napi_struct *napi, int budget) 653{ 654 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 655 unsigned int work = 0; 656 gxio_mpipe_idesc_t *idesc; 657 int instance, i, n; 658 struct mpipe_data *md; 659 struct info_mpipe *info_mpipe = 660 container_of(napi, struct info_mpipe, napi); 661 662 if (budget <= 0) 663 goto done; 664 665 instance = info_mpipe->instance; 666 while ((n = gxio_mpipe_iqueue_try_peek( 667 &info_mpipe->iqueue, 668 &idesc)) > 0) { 669 for (i = 0; i < n; i++) { 670 if (i == TILE_NET_BATCH) 671 goto done; 672 if (tile_net_handle_packet(instance, 673 idesc + i)) { 674 if (++work >= budget) 675 goto done; 676 } 677 } 678 } 679 680 /* There are no packets left. */ 681 napi_complete(&info_mpipe->napi); 682 683 md = &mpipe_data[instance]; 684 /* Re-enable hypervisor interrupts. */ 685 gxio_mpipe_enable_notif_ring_interrupt( 686 &md->context, info->mpipe[instance].iqueue.ring); 687 688 /* HACK: Avoid the "rotting packet" problem. */ 689 if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0) 690 napi_schedule(&info_mpipe->napi); 691 692 /* ISSUE: Handle completions? */ 693 694done: 695 tile_net_provide_needed_buffers(); 696 697 return work; 698} 699 700/* Handle an ingress interrupt from an instance on the current cpu. */ 701static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) 702{ 703 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 704 napi_schedule(&info->mpipe[(uint64_t)id].napi); 705 return IRQ_HANDLED; 706} 707 708/* Free some completions. This must be called with interrupts blocked. */ 709static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, 710 struct tile_net_comps *comps, 711 int limit, bool force_update) 712{ 713 int n = 0; 714 while (comps->comp_last < comps->comp_next) { 715 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; 716 struct tile_net_comp *comp = &comps->comp_queue[cid]; 717 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, 718 force_update || n == 0)) 719 break; 720 dev_kfree_skb_irq(comp->skb); 721 comps->comp_last++; 722 if (++n == limit) 723 break; 724 } 725 return n; 726} 727 728/* Add a completion. This must be called with interrupts blocked. 729 * tile_net_equeue_try_reserve() will have ensured a free completion entry. 730 */ 731static void add_comp(gxio_mpipe_equeue_t *equeue, 732 struct tile_net_comps *comps, 733 uint64_t when, struct sk_buff *skb) 734{ 735 int cid = comps->comp_next % TILE_NET_MAX_COMPS; 736 comps->comp_queue[cid].when = when; 737 comps->comp_queue[cid].skb = skb; 738 comps->comp_next++; 739} 740 741static void tile_net_schedule_tx_wake_timer(struct net_device *dev, 742 int tx_queue_idx) 743{ 744 struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); 745 struct tile_net_priv *priv = netdev_priv(dev); 746 int instance = priv->instance; 747 struct tile_net_tx_wake *tx_wake = 748 &info->mpipe[instance].tx_wake[priv->echannel]; 749 750 hrtimer_start(&tx_wake->timer, 751 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 752 HRTIMER_MODE_REL_PINNED); 753} 754 755static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) 756{ 757 struct tile_net_tx_wake *tx_wake = 758 container_of(t, struct tile_net_tx_wake, timer); 759 netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx); 760 return HRTIMER_NORESTART; 761} 762 763/* Make sure the egress timer is scheduled. */ 764static void tile_net_schedule_egress_timer(void) 765{ 766 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 767 768 if (!info->egress_timer_scheduled) { 769 hrtimer_start(&info->egress_timer, 770 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), 771 HRTIMER_MODE_REL_PINNED); 772 info->egress_timer_scheduled = true; 773 } 774} 775 776/* The "function" for "info->egress_timer". 777 * 778 * This timer will reschedule itself as long as there are any pending 779 * completions expected for this tile. 780 */ 781static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) 782{ 783 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 784 unsigned long irqflags; 785 bool pending = false; 786 int i, instance; 787 788 local_irq_save(irqflags); 789 790 /* The timer is no longer scheduled. */ 791 info->egress_timer_scheduled = false; 792 793 /* Free all possible comps for this tile. */ 794 for (instance = 0; instance < NR_MPIPE_MAX && 795 info->mpipe[instance].has_iqueue; instance++) { 796 for (i = 0; i < TILE_NET_CHANNELS; i++) { 797 struct tile_net_egress *egress = 798 &mpipe_data[instance].egress_for_echannel[i]; 799 struct tile_net_comps *comps = 800 info->mpipe[instance].comps_for_echannel[i]; 801 if (!egress || comps->comp_last >= comps->comp_next) 802 continue; 803 tile_net_free_comps(egress->equeue, comps, -1, true); 804 pending = pending || 805 (comps->comp_last < comps->comp_next); 806 } 807 } 808 809 /* Reschedule timer if needed. */ 810 if (pending) 811 tile_net_schedule_egress_timer(); 812 813 local_irq_restore(irqflags); 814 815 return HRTIMER_NORESTART; 816} 817 818/* PTP clock operations. */ 819 820static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 821{ 822 int ret = 0; 823 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); 824 mutex_lock(&md->ptp_lock); 825 if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) 826 ret = -EINVAL; 827 mutex_unlock(&md->ptp_lock); 828 return ret; 829} 830 831static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) 832{ 833 int ret = 0; 834 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); 835 mutex_lock(&md->ptp_lock); 836 if (gxio_mpipe_adjust_timestamp(&md->context, delta)) 837 ret = -EBUSY; 838 mutex_unlock(&md->ptp_lock); 839 return ret; 840} 841 842static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, 843 struct timespec64 *ts) 844{ 845 int ret = 0; 846 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); 847 mutex_lock(&md->ptp_lock); 848 if (gxio_mpipe_get_timestamp(&md->context, ts)) 849 ret = -EBUSY; 850 mutex_unlock(&md->ptp_lock); 851 return ret; 852} 853 854static int ptp_mpipe_settime(struct ptp_clock_info *ptp, 855 const struct timespec64 *ts) 856{ 857 int ret = 0; 858 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); 859 mutex_lock(&md->ptp_lock); 860 if (gxio_mpipe_set_timestamp(&md->context, ts)) 861 ret = -EBUSY; 862 mutex_unlock(&md->ptp_lock); 863 return ret; 864} 865 866static int ptp_mpipe_enable(struct ptp_clock_info *ptp, 867 struct ptp_clock_request *request, int on) 868{ 869 return -EOPNOTSUPP; 870} 871 872static struct ptp_clock_info ptp_mpipe_caps = { 873 .owner = THIS_MODULE, 874 .name = "mPIPE clock", 875 .max_adj = 999999999, 876 .n_ext_ts = 0, 877 .n_pins = 0, 878 .pps = 0, 879 .adjfreq = ptp_mpipe_adjfreq, 880 .adjtime = ptp_mpipe_adjtime, 881 .gettime64 = ptp_mpipe_gettime, 882 .settime64 = ptp_mpipe_settime, 883 .enable = ptp_mpipe_enable, 884}; 885 886/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ 887static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) 888{ 889 struct timespec ts; 890 891 getnstimeofday(&ts); 892 gxio_mpipe_set_timestamp(&md->context, &ts); 893 894 mutex_init(&md->ptp_lock); 895 md->caps = ptp_mpipe_caps; 896 md->ptp_clock = ptp_clock_register(&md->caps, NULL); 897 if (IS_ERR(md->ptp_clock)) 898 netdev_err(dev, "ptp_clock_register failed %ld\n", 899 PTR_ERR(md->ptp_clock)); 900} 901 902/* Initialize PTP fields in a new device. */ 903static void init_ptp_dev(struct tile_net_priv *priv) 904{ 905 priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 906 priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; 907} 908 909/* Helper functions for "tile_net_update()". */ 910static void enable_ingress_irq(void *irq) 911{ 912 enable_percpu_irq((long)irq, 0); 913} 914 915static void disable_ingress_irq(void *irq) 916{ 917 disable_percpu_irq((long)irq); 918} 919 920/* Helper function for tile_net_open() and tile_net_stop(). 921 * Always called under tile_net_devs_for_channel_mutex. 922 */ 923static int tile_net_update(struct net_device *dev) 924{ 925 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ 926 bool saw_channel = false; 927 int instance = mpipe_instance(dev); 928 struct mpipe_data *md = &mpipe_data[instance]; 929 int channel; 930 int rc; 931 int cpu; 932 933 saw_channel = false; 934 gxio_mpipe_rules_init(&rules, &md->context); 935 936 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { 937 if (md->tile_net_devs_for_channel[channel] == NULL) 938 continue; 939 if (!saw_channel) { 940 saw_channel = true; 941 gxio_mpipe_rules_begin(&rules, md->first_bucket, 942 md->num_buckets, NULL); 943 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); 944 } 945 gxio_mpipe_rules_add_channel(&rules, channel); 946 } 947 948 /* NOTE: This can fail if there is no classifier. 949 * ISSUE: Can anything else cause it to fail? 950 */ 951 rc = gxio_mpipe_rules_commit(&rules); 952 if (rc != 0) { 953 netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n", 954 instance, rc); 955 return -EIO; 956 } 957 958 /* Update all cpus, sequentially (to protect "netif_napi_add()"). 959 * We use on_each_cpu to handle the IPI mask or unmask. 960 */ 961 if (!saw_channel) 962 on_each_cpu(disable_ingress_irq, 963 (void *)(long)(md->ingress_irq), 1); 964 for_each_online_cpu(cpu) { 965 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 966 967 if (!info->mpipe[instance].has_iqueue) 968 continue; 969 if (saw_channel) { 970 if (!info->mpipe[instance].napi_added) { 971 netif_napi_add(dev, &info->mpipe[instance].napi, 972 tile_net_poll, TILE_NET_WEIGHT); 973 info->mpipe[instance].napi_added = true; 974 } 975 if (!info->mpipe[instance].napi_enabled) { 976 napi_enable(&info->mpipe[instance].napi); 977 info->mpipe[instance].napi_enabled = true; 978 } 979 } else { 980 if (info->mpipe[instance].napi_enabled) { 981 napi_disable(&info->mpipe[instance].napi); 982 info->mpipe[instance].napi_enabled = false; 983 } 984 /* FIXME: Drain the iqueue. */ 985 } 986 } 987 if (saw_channel) 988 on_each_cpu(enable_ingress_irq, 989 (void *)(long)(md->ingress_irq), 1); 990 991 /* HACK: Allow packets to flow in the simulator. */ 992 if (saw_channel) 993 sim_enable_mpipe_links(instance, -1); 994 995 return 0; 996} 997 998/* Initialize a buffer stack. */ 999static int create_buffer_stack(struct net_device *dev, 1000 int kind, size_t num_buffers) 1001{ 1002 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 1003 int instance = mpipe_instance(dev); 1004 struct mpipe_data *md = &mpipe_data[instance]; 1005 size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); 1006 int stack_idx = md->first_buffer_stack + kind; 1007 void *va; 1008 int i, rc; 1009 1010 /* Round up to 64KB and then use alloc_pages() so we get the 1011 * required 64KB alignment. 1012 */ 1013 md->buffer_stack_bytes[kind] = 1014 ALIGN(needed, 64 * 1024); 1015 1016 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); 1017 if (va == NULL) { 1018 netdev_err(dev, 1019 "Could not alloc %zd bytes for buffer stack %d\n", 1020 md->buffer_stack_bytes[kind], kind); 1021 return -ENOMEM; 1022 } 1023 1024 /* Initialize the buffer stack. */ 1025 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, 1026 buffer_size_enums[kind], va, 1027 md->buffer_stack_bytes[kind], 0); 1028 if (rc != 0) { 1029 netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n", 1030 instance, rc); 1031 free_pages_exact(va, md->buffer_stack_bytes[kind]); 1032 return rc; 1033 } 1034 1035 md->buffer_stack_vas[kind] = va; 1036 1037 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, 1038 hash_pte, 0); 1039 if (rc != 0) { 1040 netdev_err(dev, 1041 "gxio_mpipe_register_client_memory: mpipe[%d] %d\n", 1042 instance, rc); 1043 return rc; 1044 } 1045 1046 /* Provide initial buffers. */ 1047 for (i = 0; i < num_buffers; i++) { 1048 if (!tile_net_provide_buffer(instance, kind)) { 1049 netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); 1050 return -ENOMEM; 1051 } 1052 } 1053 1054 return 0; 1055} 1056 1057/* Allocate and initialize mpipe buffer stacks, and register them in 1058 * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes. 1059 * This routine supports tile_net_init_mpipe(), below. 1060 */ 1061static int init_buffer_stacks(struct net_device *dev, 1062 int network_cpus_count) 1063{ 1064 int num_kinds = MAX_KINDS - (jumbo_num == 0); 1065 size_t num_buffers; 1066 int rc; 1067 int instance = mpipe_instance(dev); 1068 struct mpipe_data *md = &mpipe_data[instance]; 1069 1070 /* Allocate the buffer stacks. */ 1071 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); 1072 if (rc < 0) { 1073 netdev_err(dev, 1074 "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n", 1075 instance, rc); 1076 return rc; 1077 } 1078 md->first_buffer_stack = rc; 1079 1080 /* Enough small/large buffers to (normally) avoid buffer errors. */ 1081 num_buffers = 1082 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); 1083 1084 /* Allocate the small memory stack. */ 1085 if (rc >= 0) 1086 rc = create_buffer_stack(dev, 0, num_buffers); 1087 1088 /* Allocate the large buffer stack. */ 1089 if (rc >= 0) 1090 rc = create_buffer_stack(dev, 1, num_buffers); 1091 1092 /* Allocate the jumbo buffer stack if needed. */ 1093 if (rc >= 0 && jumbo_num != 0) 1094 rc = create_buffer_stack(dev, 2, jumbo_num); 1095 1096 return rc; 1097} 1098 1099/* Allocate per-cpu resources (memory for completions and idescs). 1100 * This routine supports tile_net_init_mpipe(), below. 1101 */ 1102static int alloc_percpu_mpipe_resources(struct net_device *dev, 1103 int cpu, int ring) 1104{ 1105 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1106 int order, i, rc; 1107 int instance = mpipe_instance(dev); 1108 struct mpipe_data *md = &mpipe_data[instance]; 1109 struct page *page; 1110 void *addr; 1111 1112 /* Allocate the "comps". */ 1113 order = get_order(COMPS_SIZE); 1114 page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 1115 if (page == NULL) { 1116 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", 1117 COMPS_SIZE); 1118 return -ENOMEM; 1119 } 1120 addr = pfn_to_kaddr(page_to_pfn(page)); 1121 memset(addr, 0, COMPS_SIZE); 1122 for (i = 0; i < TILE_NET_CHANNELS; i++) 1123 info->mpipe[instance].comps_for_echannel[i] = 1124 addr + i * sizeof(struct tile_net_comps); 1125 1126 /* If this is a network cpu, create an iqueue. */ 1127 if (cpumask_test_cpu(cpu, &network_cpus_map)) { 1128 order = get_order(NOTIF_RING_SIZE); 1129 page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 1130 if (page == NULL) { 1131 netdev_err(dev, 1132 "Failed to alloc %zd bytes iqueue memory\n", 1133 NOTIF_RING_SIZE); 1134 return -ENOMEM; 1135 } 1136 addr = pfn_to_kaddr(page_to_pfn(page)); 1137 rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue, 1138 &md->context, ring++, addr, 1139 NOTIF_RING_SIZE, 0); 1140 if (rc < 0) { 1141 netdev_err(dev, 1142 "gxio_mpipe_iqueue_init failed: %d\n", rc); 1143 return rc; 1144 } 1145 info->mpipe[instance].has_iqueue = true; 1146 } 1147 1148 return ring; 1149} 1150 1151/* Initialize NotifGroup and buckets. 1152 * This routine supports tile_net_init_mpipe(), below. 1153 */ 1154static int init_notif_group_and_buckets(struct net_device *dev, 1155 int ring, int network_cpus_count) 1156{ 1157 int group, rc; 1158 int instance = mpipe_instance(dev); 1159 struct mpipe_data *md = &mpipe_data[instance]; 1160 1161 /* Allocate one NotifGroup. */ 1162 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); 1163 if (rc < 0) { 1164 netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n", 1165 instance, rc); 1166 return rc; 1167 } 1168 group = rc; 1169 1170 /* Initialize global num_buckets value. */ 1171 if (network_cpus_count > 4) 1172 md->num_buckets = 256; 1173 else if (network_cpus_count > 1) 1174 md->num_buckets = 16; 1175 1176 /* Allocate some buckets, and set global first_bucket value. */ 1177 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); 1178 if (rc < 0) { 1179 netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n", 1180 instance, rc); 1181 return rc; 1182 } 1183 md->first_bucket = rc; 1184 1185 /* Init group and buckets. */ 1186 rc = gxio_mpipe_init_notif_group_and_buckets( 1187 &md->context, group, ring, network_cpus_count, 1188 md->first_bucket, md->num_buckets, 1189 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); 1190 if (rc != 0) { 1191 netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: " 1192 "mpipe[%d] %d\n", instance, rc); 1193 return rc; 1194 } 1195 1196 return 0; 1197} 1198 1199/* Create an irq and register it, then activate the irq and request 1200 * interrupts on all cores. Note that "ingress_irq" being initialized 1201 * is how we know not to call tile_net_init_mpipe() again. 1202 * This routine supports tile_net_init_mpipe(), below. 1203 */ 1204static int tile_net_setup_interrupts(struct net_device *dev) 1205{ 1206 int cpu, rc, irq; 1207 int instance = mpipe_instance(dev); 1208 struct mpipe_data *md = &mpipe_data[instance]; 1209 1210 irq = md->ingress_irq; 1211 if (irq < 0) { 1212 irq = irq_alloc_hwirq(-1); 1213 if (!irq) { 1214 netdev_err(dev, 1215 "create_irq failed: mpipe[%d] %d\n", 1216 instance, irq); 1217 return irq; 1218 } 1219 tile_irq_activate(irq, TILE_IRQ_PERCPU); 1220 1221 rc = request_irq(irq, tile_net_handle_ingress_irq, 1222 0, "tile_net", (void *)((uint64_t)instance)); 1223 1224 if (rc != 0) { 1225 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", 1226 instance, rc); 1227 irq_free_hwirq(irq); 1228 return rc; 1229 } 1230 md->ingress_irq = irq; 1231 } 1232 1233 for_each_online_cpu(cpu) { 1234 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1235 if (info->mpipe[instance].has_iqueue) { 1236 gxio_mpipe_request_notif_ring_interrupt(&md->context, 1237 cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, 1238 info->mpipe[instance].iqueue.ring); 1239 } 1240 } 1241 1242 return 0; 1243} 1244 1245/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 1246static void tile_net_init_mpipe_fail(int instance) 1247{ 1248 int kind, cpu; 1249 struct mpipe_data *md = &mpipe_data[instance]; 1250 1251 /* Do cleanups that require the mpipe context first. */ 1252 for (kind = 0; kind < MAX_KINDS; kind++) { 1253 if (md->buffer_stack_vas[kind] != NULL) { 1254 tile_net_pop_all_buffers(instance, 1255 md->first_buffer_stack + 1256 kind); 1257 } 1258 } 1259 1260 /* Destroy mpipe context so the hardware no longer owns any memory. */ 1261 gxio_mpipe_destroy(&md->context); 1262 1263 for_each_online_cpu(cpu) { 1264 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1265 free_pages( 1266 (unsigned long)( 1267 info->mpipe[instance].comps_for_echannel[0]), 1268 get_order(COMPS_SIZE)); 1269 info->mpipe[instance].comps_for_echannel[0] = NULL; 1270 free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs), 1271 get_order(NOTIF_RING_SIZE)); 1272 info->mpipe[instance].iqueue.idescs = NULL; 1273 } 1274 1275 for (kind = 0; kind < MAX_KINDS; kind++) { 1276 if (md->buffer_stack_vas[kind] != NULL) { 1277 free_pages_exact(md->buffer_stack_vas[kind], 1278 md->buffer_stack_bytes[kind]); 1279 md->buffer_stack_vas[kind] = NULL; 1280 } 1281 } 1282 1283 md->first_buffer_stack = -1; 1284 md->first_bucket = -1; 1285} 1286 1287/* The first time any tilegx network device is opened, we initialize 1288 * the global mpipe state. If this step fails, we fail to open the 1289 * device, but if it succeeds, we never need to do it again, and since 1290 * tile_net can't be unloaded, we never undo it. 1291 * 1292 * Note that some resources in this path (buffer stack indices, 1293 * bindings from init_buffer_stack, etc.) are hypervisor resources 1294 * that are freed implicitly by gxio_mpipe_destroy(). 1295 */ 1296static int tile_net_init_mpipe(struct net_device *dev) 1297{ 1298 int rc; 1299 int cpu; 1300 int first_ring, ring; 1301 int instance = mpipe_instance(dev); 1302 struct mpipe_data *md = &mpipe_data[instance]; 1303 int network_cpus_count = cpumask_weight(&network_cpus_map); 1304 1305 if (!hash_default) { 1306 netdev_err(dev, "Networking requires hash_default!\n"); 1307 return -EIO; 1308 } 1309 1310 rc = gxio_mpipe_init(&md->context, instance); 1311 if (rc != 0) { 1312 netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n", 1313 instance, rc); 1314 return -EIO; 1315 } 1316 1317 /* Set up the buffer stacks. */ 1318 rc = init_buffer_stacks(dev, network_cpus_count); 1319 if (rc != 0) 1320 goto fail; 1321 1322 /* Allocate one NotifRing for each network cpu. */ 1323 rc = gxio_mpipe_alloc_notif_rings(&md->context, 1324 network_cpus_count, 0, 0); 1325 if (rc < 0) { 1326 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", 1327 rc); 1328 goto fail; 1329 } 1330 1331 /* Init NotifRings per-cpu. */ 1332 first_ring = rc; 1333 ring = first_ring; 1334 for_each_online_cpu(cpu) { 1335 rc = alloc_percpu_mpipe_resources(dev, cpu, ring); 1336 if (rc < 0) 1337 goto fail; 1338 ring = rc; 1339 } 1340 1341 /* Initialize NotifGroup and buckets. */ 1342 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); 1343 if (rc != 0) 1344 goto fail; 1345 1346 /* Create and enable interrupts. */ 1347 rc = tile_net_setup_interrupts(dev); 1348 if (rc != 0) 1349 goto fail; 1350 1351 /* Register PTP clock and set mPIPE timestamp, if configured. */ 1352 register_ptp_clock(dev, md); 1353 1354 return 0; 1355 1356fail: 1357 tile_net_init_mpipe_fail(instance); 1358 return rc; 1359} 1360 1361/* Create persistent egress info for a given egress channel. 1362 * Note that this may be shared between, say, "gbe0" and "xgbe0". 1363 * ISSUE: Defer header allocation until TSO is actually needed? 1364 */ 1365static int tile_net_init_egress(struct net_device *dev, int echannel) 1366{ 1367 static int ering = -1; 1368 struct page *headers_page, *edescs_page, *equeue_page; 1369 gxio_mpipe_edesc_t *edescs; 1370 gxio_mpipe_equeue_t *equeue; 1371 unsigned char *headers; 1372 int headers_order, edescs_order, equeue_order; 1373 size_t edescs_size; 1374 int rc = -ENOMEM; 1375 int instance = mpipe_instance(dev); 1376 struct mpipe_data *md = &mpipe_data[instance]; 1377 1378 /* Only initialize once. */ 1379 if (md->egress_for_echannel[echannel].equeue != NULL) 1380 return 0; 1381 1382 /* Allocate memory for the "headers". */ 1383 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); 1384 headers_page = alloc_pages(GFP_KERNEL, headers_order); 1385 if (headers_page == NULL) { 1386 netdev_warn(dev, 1387 "Could not alloc %zd bytes for TSO headers.\n", 1388 PAGE_SIZE << headers_order); 1389 goto fail; 1390 } 1391 headers = pfn_to_kaddr(page_to_pfn(headers_page)); 1392 1393 /* Allocate memory for the "edescs". */ 1394 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); 1395 edescs_order = get_order(edescs_size); 1396 edescs_page = alloc_pages(GFP_KERNEL, edescs_order); 1397 if (edescs_page == NULL) { 1398 netdev_warn(dev, 1399 "Could not alloc %zd bytes for eDMA ring.\n", 1400 edescs_size); 1401 goto fail_headers; 1402 } 1403 edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); 1404 1405 /* Allocate memory for the "equeue". */ 1406 equeue_order = get_order(sizeof(*equeue)); 1407 equeue_page = alloc_pages(GFP_KERNEL, equeue_order); 1408 if (equeue_page == NULL) { 1409 netdev_warn(dev, 1410 "Could not alloc %zd bytes for equeue info.\n", 1411 PAGE_SIZE << equeue_order); 1412 goto fail_edescs; 1413 } 1414 equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); 1415 1416 /* Allocate an edma ring (using a one entry "free list"). */ 1417 if (ering < 0) { 1418 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); 1419 if (rc < 0) { 1420 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: " 1421 "mpipe[%d] %d\n", instance, rc); 1422 goto fail_equeue; 1423 } 1424 ering = rc; 1425 } 1426 1427 /* Initialize the equeue. */ 1428 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, 1429 edescs, edescs_size, 0); 1430 if (rc != 0) { 1431 netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n", 1432 instance, rc); 1433 goto fail_equeue; 1434 } 1435 1436 /* Don't reuse the ering later. */ 1437 ering = -1; 1438 1439 if (jumbo_num != 0) { 1440 /* Make sure "jumbo" packets can be egressed safely. */ 1441 if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) { 1442 /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */ 1443 netdev_warn(dev, "Jumbo packets may not be egressed" 1444 " properly on channel %d\n", echannel); 1445 } 1446 } 1447 1448 /* Done. */ 1449 md->egress_for_echannel[echannel].equeue = equeue; 1450 md->egress_for_echannel[echannel].headers = headers; 1451 return 0; 1452 1453fail_equeue: 1454 __free_pages(equeue_page, equeue_order); 1455 1456fail_edescs: 1457 __free_pages(edescs_page, edescs_order); 1458 1459fail_headers: 1460 __free_pages(headers_page, headers_order); 1461 1462fail: 1463 return rc; 1464} 1465 1466/* Return channel number for a newly-opened link. */ 1467static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, 1468 const char *link_name) 1469{ 1470 int instance = mpipe_instance(dev); 1471 struct mpipe_data *md = &mpipe_data[instance]; 1472 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); 1473 if (rc < 0) { 1474 netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n", 1475 link_name, instance, rc); 1476 return rc; 1477 } 1478 if (jumbo_num != 0) { 1479 u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO; 1480 rc = gxio_mpipe_link_set_attr(link, attr, 1); 1481 if (rc != 0) { 1482 netdev_err(dev, 1483 "Cannot receive jumbo packets on '%s'\n", 1484 link_name); 1485 gxio_mpipe_link_close(link); 1486 return rc; 1487 } 1488 } 1489 rc = gxio_mpipe_link_channel(link); 1490 if (rc < 0 || rc >= TILE_NET_CHANNELS) { 1491 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); 1492 gxio_mpipe_link_close(link); 1493 return -EINVAL; 1494 } 1495 return rc; 1496} 1497 1498/* Help the kernel activate the given network interface. */ 1499static int tile_net_open(struct net_device *dev) 1500{ 1501 struct tile_net_priv *priv = netdev_priv(dev); 1502 int cpu, rc, instance; 1503 1504 mutex_lock(&tile_net_devs_for_channel_mutex); 1505 1506 /* Get the instance info. */ 1507 rc = gxio_mpipe_link_instance(dev->name); 1508 if (rc < 0 || rc >= NR_MPIPE_MAX) { 1509 mutex_unlock(&tile_net_devs_for_channel_mutex); 1510 return -EIO; 1511 } 1512 1513 priv->instance = rc; 1514 instance = rc; 1515 if (!mpipe_data[rc].context.mmio_fast_base) { 1516 /* Do one-time initialization per instance the first time 1517 * any device is opened. 1518 */ 1519 rc = tile_net_init_mpipe(dev); 1520 if (rc != 0) 1521 goto fail; 1522 } 1523 1524 /* Determine if this is the "loopify" device. */ 1525 if (unlikely((loopify_link_name != NULL) && 1526 !strcmp(dev->name, loopify_link_name))) { 1527 rc = tile_net_link_open(dev, &priv->link, "loop0"); 1528 if (rc < 0) 1529 goto fail; 1530 priv->channel = rc; 1531 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); 1532 if (rc < 0) 1533 goto fail; 1534 priv->loopify_channel = rc; 1535 priv->echannel = rc; 1536 } else { 1537 rc = tile_net_link_open(dev, &priv->link, dev->name); 1538 if (rc < 0) 1539 goto fail; 1540 priv->channel = rc; 1541 priv->echannel = rc; 1542 } 1543 1544 /* Initialize egress info (if needed). Once ever, per echannel. */ 1545 rc = tile_net_init_egress(dev, priv->echannel); 1546 if (rc != 0) 1547 goto fail; 1548 1549 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev; 1550 1551 rc = tile_net_update(dev); 1552 if (rc != 0) 1553 goto fail; 1554 1555 mutex_unlock(&tile_net_devs_for_channel_mutex); 1556 1557 /* Initialize the transmit wake timer for this device for each cpu. */ 1558 for_each_online_cpu(cpu) { 1559 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1560 struct tile_net_tx_wake *tx_wake = 1561 &info->mpipe[instance].tx_wake[priv->echannel]; 1562 1563 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, 1564 HRTIMER_MODE_REL); 1565 tx_wake->tx_queue_idx = cpu; 1566 tx_wake->timer.function = tile_net_handle_tx_wake_timer; 1567 tx_wake->dev = dev; 1568 } 1569 1570 for_each_online_cpu(cpu) 1571 netif_start_subqueue(dev, cpu); 1572 netif_carrier_on(dev); 1573 return 0; 1574 1575fail: 1576 if (priv->loopify_channel >= 0) { 1577 if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1578 netdev_warn(dev, "Failed to close loopify link!\n"); 1579 priv->loopify_channel = -1; 1580 } 1581 if (priv->channel >= 0) { 1582 if (gxio_mpipe_link_close(&priv->link) != 0) 1583 netdev_warn(dev, "Failed to close link!\n"); 1584 priv->channel = -1; 1585 } 1586 priv->echannel = -1; 1587 mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL; 1588 mutex_unlock(&tile_net_devs_for_channel_mutex); 1589 1590 /* Don't return raw gxio error codes to generic Linux. */ 1591 return (rc > -512) ? rc : -EIO; 1592} 1593 1594/* Help the kernel deactivate the given network interface. */ 1595static int tile_net_stop(struct net_device *dev) 1596{ 1597 struct tile_net_priv *priv = netdev_priv(dev); 1598 int cpu; 1599 int instance = priv->instance; 1600 struct mpipe_data *md = &mpipe_data[instance]; 1601 1602 for_each_online_cpu(cpu) { 1603 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1604 struct tile_net_tx_wake *tx_wake = 1605 &info->mpipe[instance].tx_wake[priv->echannel]; 1606 1607 hrtimer_cancel(&tx_wake->timer); 1608 netif_stop_subqueue(dev, cpu); 1609 } 1610 1611 mutex_lock(&tile_net_devs_for_channel_mutex); 1612 md->tile_net_devs_for_channel[priv->channel] = NULL; 1613 (void)tile_net_update(dev); 1614 if (priv->loopify_channel >= 0) { 1615 if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1616 netdev_warn(dev, "Failed to close loopify link!\n"); 1617 priv->loopify_channel = -1; 1618 } 1619 if (priv->channel >= 0) { 1620 if (gxio_mpipe_link_close(&priv->link) != 0) 1621 netdev_warn(dev, "Failed to close link!\n"); 1622 priv->channel = -1; 1623 } 1624 priv->echannel = -1; 1625 mutex_unlock(&tile_net_devs_for_channel_mutex); 1626 1627 return 0; 1628} 1629 1630/* Determine the VA for a fragment. */ 1631static inline void *tile_net_frag_buf(skb_frag_t *f) 1632{ 1633 unsigned long pfn = page_to_pfn(skb_frag_page(f)); 1634 return pfn_to_kaddr(pfn) + f->page_offset; 1635} 1636 1637/* Acquire a completion entry and an egress slot, or if we can't, 1638 * stop the queue and schedule the tx_wake timer. 1639 */ 1640static s64 tile_net_equeue_try_reserve(struct net_device *dev, 1641 int tx_queue_idx, 1642 struct tile_net_comps *comps, 1643 gxio_mpipe_equeue_t *equeue, 1644 int num_edescs) 1645{ 1646 /* Try to acquire a completion entry. */ 1647 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || 1648 tile_net_free_comps(equeue, comps, 32, false) != 0) { 1649 1650 /* Try to acquire an egress slot. */ 1651 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); 1652 if (slot >= 0) 1653 return slot; 1654 1655 /* Freeing some completions gives the equeue time to drain. */ 1656 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); 1657 1658 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); 1659 if (slot >= 0) 1660 return slot; 1661 } 1662 1663 /* Still nothing; give up and stop the queue for a short while. */ 1664 netif_stop_subqueue(dev, tx_queue_idx); 1665 tile_net_schedule_tx_wake_timer(dev, tx_queue_idx); 1666 return -1; 1667} 1668 1669/* Determine how many edesc's are needed for TSO. 1670 * 1671 * Sometimes, if "sendfile()" requires copying, we will be called with 1672 * "data" containing the header and payload, with "frags" being empty. 1673 * Sometimes, for example when using NFS over TCP, a single segment can 1674 * span 3 fragments. This requires special care. 1675 */ 1676static int tso_count_edescs(struct sk_buff *skb) 1677{ 1678 struct skb_shared_info *sh = skb_shinfo(skb); 1679 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1680 unsigned int data_len = skb->len - sh_len; 1681 unsigned int p_len = sh->gso_size; 1682 long f_id = -1; /* id of the current fragment */ 1683 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1684 long f_used = 0; /* bytes used from the current fragment */ 1685 long n; /* size of the current piece of payload */ 1686 int num_edescs = 0; 1687 int segment; 1688 1689 for (segment = 0; segment < sh->gso_segs; segment++) { 1690 1691 unsigned int p_used = 0; 1692 1693 /* One edesc for header and for each piece of the payload. */ 1694 for (num_edescs++; p_used < p_len; num_edescs++) { 1695 1696 /* Advance as needed. */ 1697 while (f_used >= f_size) { 1698 f_id++; 1699 f_size = skb_frag_size(&sh->frags[f_id]); 1700 f_used = 0; 1701 } 1702 1703 /* Use bytes from the current fragment. */ 1704 n = p_len - p_used; 1705 if (n > f_size - f_used) 1706 n = f_size - f_used; 1707 f_used += n; 1708 p_used += n; 1709 } 1710 1711 /* The last segment may be less than gso_size. */ 1712 data_len -= p_len; 1713 if (data_len < p_len) 1714 p_len = data_len; 1715 } 1716 1717 return num_edescs; 1718} 1719 1720/* Prepare modified copies of the skbuff headers. */ 1721static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, 1722 s64 slot) 1723{ 1724 struct skb_shared_info *sh = skb_shinfo(skb); 1725 struct iphdr *ih; 1726 struct ipv6hdr *ih6; 1727 struct tcphdr *th; 1728 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1729 unsigned int data_len = skb->len - sh_len; 1730 unsigned char *data = skb->data; 1731 unsigned int ih_off, th_off, p_len; 1732 unsigned int isum_seed, tsum_seed, seq; 1733 unsigned int uninitialized_var(id); 1734 int is_ipv6; 1735 long f_id = -1; /* id of the current fragment */ 1736 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1737 long f_used = 0; /* bytes used from the current fragment */ 1738 long n; /* size of the current piece of payload */ 1739 int segment; 1740 1741 /* Locate original headers and compute various lengths. */ 1742 is_ipv6 = skb_is_gso_v6(skb); 1743 if (is_ipv6) { 1744 ih6 = ipv6_hdr(skb); 1745 ih_off = skb_network_offset(skb); 1746 } else { 1747 ih = ip_hdr(skb); 1748 ih_off = skb_network_offset(skb); 1749 isum_seed = ((0xFFFF - ih->check) + 1750 (0xFFFF - ih->tot_len) + 1751 (0xFFFF - ih->id)); 1752 id = ntohs(ih->id); 1753 } 1754 1755 th = tcp_hdr(skb); 1756 th_off = skb_transport_offset(skb); 1757 p_len = sh->gso_size; 1758 1759 tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); 1760 seq = ntohl(th->seq); 1761 1762 /* Prepare all the headers. */ 1763 for (segment = 0; segment < sh->gso_segs; segment++) { 1764 unsigned char *buf; 1765 unsigned int p_used = 0; 1766 1767 /* Copy to the header memory for this segment. */ 1768 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + 1769 NET_IP_ALIGN; 1770 memcpy(buf, data, sh_len); 1771 1772 /* Update copied ip header. */ 1773 if (is_ipv6) { 1774 ih6 = (struct ipv6hdr *)(buf + ih_off); 1775 ih6->payload_len = htons(sh_len + p_len - ih_off - 1776 sizeof(*ih6)); 1777 } else { 1778 ih = (struct iphdr *)(buf + ih_off); 1779 ih->tot_len = htons(sh_len + p_len - ih_off); 1780 ih->id = htons(id++); 1781 ih->check = csum_long(isum_seed + ih->tot_len + 1782 ih->id) ^ 0xffff; 1783 } 1784 1785 /* Update copied tcp header. */ 1786 th = (struct tcphdr *)(buf + th_off); 1787 th->seq = htonl(seq); 1788 th->check = csum_long(tsum_seed + htons(sh_len + p_len)); 1789 if (segment != sh->gso_segs - 1) { 1790 th->fin = 0; 1791 th->psh = 0; 1792 } 1793 1794 /* Skip past the header. */ 1795 slot++; 1796 1797 /* Skip past the payload. */ 1798 while (p_used < p_len) { 1799 1800 /* Advance as needed. */ 1801 while (f_used >= f_size) { 1802 f_id++; 1803 f_size = skb_frag_size(&sh->frags[f_id]); 1804 f_used = 0; 1805 } 1806 1807 /* Use bytes from the current fragment. */ 1808 n = p_len - p_used; 1809 if (n > f_size - f_used) 1810 n = f_size - f_used; 1811 f_used += n; 1812 p_used += n; 1813 1814 slot++; 1815 } 1816 1817 seq += p_len; 1818 1819 /* The last segment may be less than gso_size. */ 1820 data_len -= p_len; 1821 if (data_len < p_len) 1822 p_len = data_len; 1823 } 1824 1825 /* Flush the headers so they are ready for hardware DMA. */ 1826 wmb(); 1827} 1828 1829/* Pass all the data to mpipe for egress. */ 1830static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, 1831 struct sk_buff *skb, unsigned char *headers, s64 slot) 1832{ 1833 struct skb_shared_info *sh = skb_shinfo(skb); 1834 int instance = mpipe_instance(dev); 1835 struct mpipe_data *md = &mpipe_data[instance]; 1836 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1837 unsigned int data_len = skb->len - sh_len; 1838 unsigned int p_len = sh->gso_size; 1839 gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1840 gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1841 long f_id = -1; /* id of the current fragment */ 1842 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ 1843 long f_used = 0; /* bytes used from the current fragment */ 1844 void *f_data = skb->data + sh_len; 1845 long n; /* size of the current piece of payload */ 1846 unsigned long tx_packets = 0, tx_bytes = 0; 1847 unsigned int csum_start; 1848 int segment; 1849 1850 /* Prepare to egress the headers: set up header edesc. */ 1851 csum_start = skb_checksum_start_offset(skb); 1852 edesc_head.csum = 1; 1853 edesc_head.csum_start = csum_start; 1854 edesc_head.csum_dest = csum_start + skb->csum_offset; 1855 edesc_head.xfer_size = sh_len; 1856 1857 /* This is only used to specify the TLB. */ 1858 edesc_head.stack_idx = md->first_buffer_stack; 1859 edesc_body.stack_idx = md->first_buffer_stack; 1860 1861 /* Egress all the edescs. */ 1862 for (segment = 0; segment < sh->gso_segs; segment++) { 1863 unsigned char *buf; 1864 unsigned int p_used = 0; 1865 1866 /* Egress the header. */ 1867 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + 1868 NET_IP_ALIGN; 1869 edesc_head.va = va_to_tile_io_addr(buf); 1870 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); 1871 slot++; 1872 1873 /* Egress the payload. */ 1874 while (p_used < p_len) { 1875 void *va; 1876 1877 /* Advance as needed. */ 1878 while (f_used >= f_size) { 1879 f_id++; 1880 f_size = skb_frag_size(&sh->frags[f_id]); 1881 f_data = tile_net_frag_buf(&sh->frags[f_id]); 1882 f_used = 0; 1883 } 1884 1885 va = f_data + f_used; 1886 1887 /* Use bytes from the current fragment. */ 1888 n = p_len - p_used; 1889 if (n > f_size - f_used) 1890 n = f_size - f_used; 1891 f_used += n; 1892 p_used += n; 1893 1894 /* Egress a piece of the payload. */ 1895 edesc_body.va = va_to_tile_io_addr(va); 1896 edesc_body.xfer_size = n; 1897 edesc_body.bound = !(p_used < p_len); 1898 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1899 slot++; 1900 } 1901 1902 tx_packets++; 1903 tx_bytes += sh_len + p_len; 1904 1905 /* The last segment may be less than gso_size. */ 1906 data_len -= p_len; 1907 if (data_len < p_len) 1908 p_len = data_len; 1909 } 1910 1911 /* Update stats. */ 1912 tile_net_stats_add(tx_packets, &dev->stats.tx_packets); 1913 tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes); 1914} 1915 1916/* Do "TSO" handling for egress. 1917 * 1918 * Normally drivers set NETIF_F_TSO only to support hardware TSO; 1919 * otherwise the stack uses scatter-gather to implement GSO in software. 1920 * On our testing, enabling GSO support (via NETIF_F_SG) drops network 1921 * performance down to around 7.5 Gbps on the 10G interfaces, although 1922 * also dropping cpu utilization way down, to under 8%. But 1923 * implementing "TSO" in the driver brings performance back up to line 1924 * rate, while dropping cpu usage even further, to less than 4%. In 1925 * practice, profiling of GSO shows that skb_segment() is what causes 1926 * the performance overheads; we benefit in the driver from using 1927 * preallocated memory to duplicate the TCP/IP headers. 1928 */ 1929static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) 1930{ 1931 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 1932 struct tile_net_priv *priv = netdev_priv(dev); 1933 int channel = priv->echannel; 1934 int instance = priv->instance; 1935 struct mpipe_data *md = &mpipe_data[instance]; 1936 struct tile_net_egress *egress = &md->egress_for_echannel[channel]; 1937 struct tile_net_comps *comps = 1938 info->mpipe[instance].comps_for_echannel[channel]; 1939 gxio_mpipe_equeue_t *equeue = egress->equeue; 1940 unsigned long irqflags; 1941 int num_edescs; 1942 s64 slot; 1943 1944 /* Determine how many mpipe edesc's are needed. */ 1945 num_edescs = tso_count_edescs(skb); 1946 1947 local_irq_save(irqflags); 1948 1949 /* Try to acquire a completion entry and an egress slot. */ 1950 slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, 1951 equeue, num_edescs); 1952 if (slot < 0) { 1953 local_irq_restore(irqflags); 1954 return NETDEV_TX_BUSY; 1955 } 1956 1957 /* Set up copies of header data properly. */ 1958 tso_headers_prepare(skb, egress->headers, slot); 1959 1960 /* Actually pass the data to the network hardware. */ 1961 tso_egress(dev, equeue, skb, egress->headers, slot); 1962 1963 /* Add a completion record. */ 1964 add_comp(equeue, comps, slot + num_edescs - 1, skb); 1965 1966 local_irq_restore(irqflags); 1967 1968 /* Make sure the egress timer is scheduled. */ 1969 tile_net_schedule_egress_timer(); 1970 1971 return NETDEV_TX_OK; 1972} 1973 1974/* Analyze the body and frags for a transmit request. */ 1975static unsigned int tile_net_tx_frags(struct frag *frags, 1976 struct sk_buff *skb, 1977 void *b_data, unsigned int b_len) 1978{ 1979 unsigned int i, n = 0; 1980 1981 struct skb_shared_info *sh = skb_shinfo(skb); 1982 1983 if (b_len != 0) { 1984 frags[n].buf = b_data; 1985 frags[n++].length = b_len; 1986 } 1987 1988 for (i = 0; i < sh->nr_frags; i++) { 1989 skb_frag_t *f = &sh->frags[i]; 1990 frags[n].buf = tile_net_frag_buf(f); 1991 frags[n++].length = skb_frag_size(f); 1992 } 1993 1994 return n; 1995} 1996 1997/* Help the kernel transmit a packet. */ 1998static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) 1999{ 2000 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 2001 struct tile_net_priv *priv = netdev_priv(dev); 2002 int instance = priv->instance; 2003 struct mpipe_data *md = &mpipe_data[instance]; 2004 struct tile_net_egress *egress = 2005 &md->egress_for_echannel[priv->echannel]; 2006 gxio_mpipe_equeue_t *equeue = egress->equeue; 2007 struct tile_net_comps *comps = 2008 info->mpipe[instance].comps_for_echannel[priv->echannel]; 2009 unsigned int len = skb->len; 2010 unsigned char *data = skb->data; 2011 unsigned int num_edescs; 2012 struct frag frags[MAX_FRAGS]; 2013 gxio_mpipe_edesc_t edescs[MAX_FRAGS]; 2014 unsigned long irqflags; 2015 gxio_mpipe_edesc_t edesc = { { 0 } }; 2016 unsigned int i; 2017 s64 slot; 2018 2019 if (skb_is_gso(skb)) 2020 return tile_net_tx_tso(skb, dev); 2021 2022 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 2023 2024 /* This is only used to specify the TLB. */ 2025 edesc.stack_idx = md->first_buffer_stack; 2026 2027 /* Prepare the edescs. */ 2028 for (i = 0; i < num_edescs; i++) { 2029 edesc.xfer_size = frags[i].length; 2030 edesc.va = va_to_tile_io_addr(frags[i].buf); 2031 edescs[i] = edesc; 2032 } 2033 2034 /* Mark the final edesc. */ 2035 edescs[num_edescs - 1].bound = 1; 2036 2037 /* Add checksum info to the initial edesc, if needed. */ 2038 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2039 unsigned int csum_start = skb_checksum_start_offset(skb); 2040 edescs[0].csum = 1; 2041 edescs[0].csum_start = csum_start; 2042 edescs[0].csum_dest = csum_start + skb->csum_offset; 2043 } 2044 2045 local_irq_save(irqflags); 2046 2047 /* Try to acquire a completion entry and an egress slot. */ 2048 slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, 2049 equeue, num_edescs); 2050 if (slot < 0) { 2051 local_irq_restore(irqflags); 2052 return NETDEV_TX_BUSY; 2053 } 2054 2055 for (i = 0; i < num_edescs; i++) 2056 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); 2057 2058 /* Store TX timestamp if needed. */ 2059 tile_tx_timestamp(skb, instance); 2060 2061 /* Add a completion record. */ 2062 add_comp(equeue, comps, slot - 1, skb); 2063 2064 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ 2065 tile_net_stats_add(1, &dev->stats.tx_packets); 2066 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), 2067 &dev->stats.tx_bytes); 2068 2069 local_irq_restore(irqflags); 2070 2071 /* Make sure the egress timer is scheduled. */ 2072 tile_net_schedule_egress_timer(); 2073 2074 return NETDEV_TX_OK; 2075} 2076 2077/* Return subqueue id on this core (one per core). */ 2078static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2079 void *accel_priv, select_queue_fallback_t fallback) 2080{ 2081 return smp_processor_id(); 2082} 2083 2084/* Deal with a transmit timeout. */ 2085static void tile_net_tx_timeout(struct net_device *dev) 2086{ 2087 int cpu; 2088 2089 for_each_online_cpu(cpu) 2090 netif_wake_subqueue(dev, cpu); 2091} 2092 2093/* Ioctl commands. */ 2094static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2095{ 2096 if (cmd == SIOCSHWTSTAMP) 2097 return tile_hwtstamp_set(dev, rq); 2098 if (cmd == SIOCGHWTSTAMP) 2099 return tile_hwtstamp_get(dev, rq); 2100 2101 return -EOPNOTSUPP; 2102} 2103 2104/* Change the MTU. */ 2105static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 2106{ 2107 if (new_mtu < 68) 2108 return -EINVAL; 2109 if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500)) 2110 return -EINVAL; 2111 dev->mtu = new_mtu; 2112 return 0; 2113} 2114 2115/* Change the Ethernet address of the NIC. 2116 * 2117 * The hypervisor driver does not support changing MAC address. However, 2118 * the hardware does not do anything with the MAC address, so the address 2119 * which gets used on outgoing packets, and which is accepted on incoming 2120 * packets, is completely up to us. 2121 * 2122 * Returns 0 on success, negative on failure. 2123 */ 2124static int tile_net_set_mac_address(struct net_device *dev, void *p) 2125{ 2126 struct sockaddr *addr = p; 2127 2128 if (!is_valid_ether_addr(addr->sa_data)) 2129 return -EINVAL; 2130 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2131 return 0; 2132} 2133 2134#ifdef CONFIG_NET_POLL_CONTROLLER 2135/* Polling 'interrupt' - used by things like netconsole to send skbs 2136 * without having to re-enable interrupts. It's not called while 2137 * the interrupt routine is executing. 2138 */ 2139static void tile_net_netpoll(struct net_device *dev) 2140{ 2141 int instance = mpipe_instance(dev); 2142 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 2143 struct mpipe_data *md = &mpipe_data[instance]; 2144 2145 disable_percpu_irq(md->ingress_irq); 2146 napi_schedule(&info->mpipe[instance].napi); 2147 enable_percpu_irq(md->ingress_irq, 0); 2148} 2149#endif 2150 2151static const struct net_device_ops tile_net_ops = { 2152 .ndo_open = tile_net_open, 2153 .ndo_stop = tile_net_stop, 2154 .ndo_start_xmit = tile_net_tx, 2155 .ndo_select_queue = tile_net_select_queue, 2156 .ndo_do_ioctl = tile_net_ioctl, 2157 .ndo_change_mtu = tile_net_change_mtu, 2158 .ndo_tx_timeout = tile_net_tx_timeout, 2159 .ndo_set_mac_address = tile_net_set_mac_address, 2160#ifdef CONFIG_NET_POLL_CONTROLLER 2161 .ndo_poll_controller = tile_net_netpoll, 2162#endif 2163}; 2164 2165/* The setup function. 2166 * 2167 * This uses ether_setup() to assign various fields in dev, including 2168 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. 2169 */ 2170static void tile_net_setup(struct net_device *dev) 2171{ 2172 netdev_features_t features = 0; 2173 2174 ether_setup(dev); 2175 dev->netdev_ops = &tile_net_ops; 2176 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2177 dev->mtu = 1500; 2178 2179 features |= NETIF_F_HW_CSUM; 2180 features |= NETIF_F_SG; 2181 features |= NETIF_F_TSO; 2182 features |= NETIF_F_TSO6; 2183 2184 dev->hw_features |= features; 2185 dev->vlan_features |= features; 2186 dev->features |= features; 2187} 2188 2189/* Allocate the device structure, register the device, and obtain the 2190 * MAC address from the hypervisor. 2191 */ 2192static void tile_net_dev_init(const char *name, const uint8_t *mac) 2193{ 2194 int ret; 2195 struct net_device *dev; 2196 struct tile_net_priv *priv; 2197 2198 /* HACK: Ignore "loop" links. */ 2199 if (strncmp(name, "loop", 4) == 0) 2200 return; 2201 2202 /* Allocate the device structure. Normally, "name" is a 2203 * template, instantiated by register_netdev(), but not for us. 2204 */ 2205 dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN, 2206 tile_net_setup, NR_CPUS, 1); 2207 if (!dev) { 2208 pr_err("alloc_netdev_mqs(%s) failed\n", name); 2209 return; 2210 } 2211 2212 /* Initialize "priv". */ 2213 priv = netdev_priv(dev); 2214 priv->dev = dev; 2215 priv->channel = -1; 2216 priv->loopify_channel = -1; 2217 priv->echannel = -1; 2218 init_ptp_dev(priv); 2219 2220 /* Get the MAC address and set it in the device struct; this must 2221 * be done before the device is opened. If the MAC is all zeroes, 2222 * we use a random address, since we're probably on the simulator. 2223 */ 2224 if (!is_zero_ether_addr(mac)) 2225 ether_addr_copy(dev->dev_addr, mac); 2226 else 2227 eth_hw_addr_random(dev); 2228 2229 /* Register the network device. */ 2230 ret = register_netdev(dev); 2231 if (ret) { 2232 netdev_err(dev, "register_netdev failed %d\n", ret); 2233 free_netdev(dev); 2234 return; 2235 } 2236} 2237 2238/* Per-cpu module initialization. */ 2239static void tile_net_init_module_percpu(void *unused) 2240{ 2241 struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); 2242 int my_cpu = smp_processor_id(); 2243 int instance; 2244 2245 for (instance = 0; instance < NR_MPIPE_MAX; instance++) { 2246 info->mpipe[instance].has_iqueue = false; 2247 info->mpipe[instance].instance = instance; 2248 } 2249 info->my_cpu = my_cpu; 2250 2251 /* Initialize the egress timer. */ 2252 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2253 info->egress_timer.function = tile_net_handle_egress_timer; 2254} 2255 2256/* Module initialization. */ 2257static int __init tile_net_init_module(void) 2258{ 2259 int i; 2260 char name[GXIO_MPIPE_LINK_NAME_LEN]; 2261 uint8_t mac[6]; 2262 2263 pr_info("Tilera Network Driver\n"); 2264 2265 BUILD_BUG_ON(NR_MPIPE_MAX != 2); 2266 2267 mutex_init(&tile_net_devs_for_channel_mutex); 2268 2269 /* Initialize each CPU. */ 2270 on_each_cpu(tile_net_init_module_percpu, NULL, 1); 2271 2272 /* Find out what devices we have, and initialize them. */ 2273 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) 2274 tile_net_dev_init(name, mac); 2275 2276 if (!network_cpus_init()) 2277 cpumask_and(&network_cpus_map, housekeeping_cpumask(), 2278 cpu_online_mask); 2279 2280 return 0; 2281} 2282 2283module_init(tile_net_init_module);