Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7 2987 lines 90 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2006 Fen Systems Ltd. 5 * Copyright 2006-2013 Solarflare Communications Inc. 6 */ 7 8#include <linux/bitops.h> 9#include <linux/delay.h> 10#include <linux/interrupt.h> 11#include <linux/pci.h> 12#include <linux/module.h> 13#include <linux/seq_file.h> 14#include <linux/crc32.h> 15#include "net_driver.h" 16#include "bitfield.h" 17#include "efx.h" 18#include "rx_common.h" 19#include "nic.h" 20#include "farch_regs.h" 21#include "sriov.h" 22#include "siena_sriov.h" 23#include "io.h" 24#include "workarounds.h" 25 26/* Falcon-architecture (SFC9000-family) support */ 27 28/************************************************************************** 29 * 30 * Configurable values 31 * 32 ************************************************************************** 33 */ 34 35/* This is set to 16 for a good reason. In summary, if larger than 36 * 16, the descriptor cache holds more than a default socket 37 * buffer's worth of packets (for UDP we can only have at most one 38 * socket buffer's worth outstanding). This combined with the fact 39 * that we only get 1 TX event per descriptor cache means the NIC 40 * goes idle. 41 */ 42#define TX_DC_ENTRIES 16 43#define TX_DC_ENTRIES_ORDER 1 44 45#define RX_DC_ENTRIES 64 46#define RX_DC_ENTRIES_ORDER 3 47 48/* If EFX_MAX_INT_ERRORS internal errors occur within 49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 50 * disable it. 51 */ 52#define EFX_INT_ERROR_EXPIRE 3600 53#define EFX_MAX_INT_ERRORS 5 54 55/* Depth of RX flush request fifo */ 56#define EFX_RX_FLUSH_COUNT 4 57 58/* Driver generated events */ 59#define _EFX_CHANNEL_MAGIC_TEST 0x000101 60#define _EFX_CHANNEL_MAGIC_FILL 0x000102 61#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 62#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 63 64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 66 67#define EFX_CHANNEL_MAGIC_TEST(_channel) \ 68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 71 efx_rx_queue_index(_rx_queue)) 72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 74 efx_rx_queue_index(_rx_queue)) 75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 77 (_tx_queue)->queue) 78 79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); 80 81/************************************************************************** 82 * 83 * Hardware access 84 * 85 **************************************************************************/ 86 87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 88 unsigned int index) 89{ 90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 91 value, index); 92} 93 94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 95 const efx_oword_t *mask) 96{ 97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 99} 100 101int efx_farch_test_registers(struct efx_nic *efx, 102 const struct efx_farch_register_test *regs, 103 size_t n_regs) 104{ 105 unsigned address = 0; 106 int i, j; 107 efx_oword_t mask, imask, original, reg, buf; 108 109 for (i = 0; i < n_regs; ++i) { 110 address = regs[i].address; 111 mask = imask = regs[i].mask; 112 EFX_INVERT_OWORD(imask); 113 114 efx_reado(efx, &original, address); 115 116 /* bit sweep on and off */ 117 for (j = 0; j < 128; j++) { 118 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 119 continue; 120 121 /* Test this testable bit can be set in isolation */ 122 EFX_AND_OWORD(reg, original, mask); 123 EFX_SET_OWORD32(reg, j, j, 1); 124 125 efx_writeo(efx, &reg, address); 126 efx_reado(efx, &buf, address); 127 128 if (efx_masked_compare_oword(&reg, &buf, &mask)) 129 goto fail; 130 131 /* Test this testable bit can be cleared in isolation */ 132 EFX_OR_OWORD(reg, original, mask); 133 EFX_SET_OWORD32(reg, j, j, 0); 134 135 efx_writeo(efx, &reg, address); 136 efx_reado(efx, &buf, address); 137 138 if (efx_masked_compare_oword(&reg, &buf, &mask)) 139 goto fail; 140 } 141 142 efx_writeo(efx, &original, address); 143 } 144 145 return 0; 146 147fail: 148 netif_err(efx, hw, efx->net_dev, 149 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 150 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 151 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 152 return -EIO; 153} 154 155/************************************************************************** 156 * 157 * Special buffer handling 158 * Special buffers are used for event queues and the TX and RX 159 * descriptor rings. 160 * 161 *************************************************************************/ 162 163/* 164 * Initialise a special buffer 165 * 166 * This will define a buffer (previously allocated via 167 * efx_alloc_special_buffer()) in the buffer table, allowing 168 * it to be used for event queues, descriptor rings etc. 169 */ 170static void 171efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 172{ 173 efx_qword_t buf_desc; 174 unsigned int index; 175 dma_addr_t dma_addr; 176 int i; 177 178 EFX_WARN_ON_PARANOID(!buffer->buf.addr); 179 180 /* Write buffer descriptors to NIC */ 181 for (i = 0; i < buffer->entries; i++) { 182 index = buffer->index + i; 183 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); 184 netif_dbg(efx, probe, efx->net_dev, 185 "mapping special buffer %d at %llx\n", 186 index, (unsigned long long)dma_addr); 187 EFX_POPULATE_QWORD_3(buf_desc, 188 FRF_AZ_BUF_ADR_REGION, 0, 189 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 190 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 191 efx_write_buf_tbl(efx, &buf_desc, index); 192 } 193} 194 195/* Unmaps a buffer and clears the buffer table entries */ 196static void 197efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 198{ 199 efx_oword_t buf_tbl_upd; 200 unsigned int start = buffer->index; 201 unsigned int end = (buffer->index + buffer->entries - 1); 202 203 if (!buffer->entries) 204 return; 205 206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 207 buffer->index, buffer->index + buffer->entries - 1); 208 209 EFX_POPULATE_OWORD_4(buf_tbl_upd, 210 FRF_AZ_BUF_UPD_CMD, 0, 211 FRF_AZ_BUF_CLR_CMD, 1, 212 FRF_AZ_BUF_CLR_END_ID, end, 213 FRF_AZ_BUF_CLR_START_ID, start); 214 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 215} 216 217/* 218 * Allocate a new special buffer 219 * 220 * This allocates memory for a new buffer, clears it and allocates a 221 * new buffer ID range. It does not write into the buffer table. 222 * 223 * This call will allocate 4KB buffers, since 8KB buffers can't be 224 * used for event queues and descriptor rings. 225 */ 226static int efx_alloc_special_buffer(struct efx_nic *efx, 227 struct efx_special_buffer *buffer, 228 unsigned int len) 229{ 230#ifdef CONFIG_SFC_SRIOV 231 struct siena_nic_data *nic_data = efx->nic_data; 232#endif 233 len = ALIGN(len, EFX_BUF_SIZE); 234 235 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 236 return -ENOMEM; 237 buffer->entries = len / EFX_BUF_SIZE; 238 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); 239 240 /* Select new buffer ID */ 241 buffer->index = efx->next_buffer_table; 242 efx->next_buffer_table += buffer->entries; 243#ifdef CONFIG_SFC_SRIOV 244 BUG_ON(efx_siena_sriov_enabled(efx) && 245 nic_data->vf_buftbl_base < efx->next_buffer_table); 246#endif 247 248 netif_dbg(efx, probe, efx->net_dev, 249 "allocating special buffers %d-%d at %llx+%x " 250 "(virt %p phys %llx)\n", buffer->index, 251 buffer->index + buffer->entries - 1, 252 (u64)buffer->buf.dma_addr, len, 253 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 254 255 return 0; 256} 257 258static void 259efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 260{ 261 if (!buffer->buf.addr) 262 return; 263 264 netif_dbg(efx, hw, efx->net_dev, 265 "deallocating special buffers %d-%d at %llx+%x " 266 "(virt %p phys %llx)\n", buffer->index, 267 buffer->index + buffer->entries - 1, 268 (u64)buffer->buf.dma_addr, buffer->buf.len, 269 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 270 271 efx_nic_free_buffer(efx, &buffer->buf); 272 buffer->entries = 0; 273} 274 275/************************************************************************** 276 * 277 * TX path 278 * 279 **************************************************************************/ 280 281/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 282static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) 283{ 284 unsigned write_ptr; 285 efx_dword_t reg; 286 287 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 288 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 289 efx_writed_page(tx_queue->efx, &reg, 290 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 291} 292 293/* Write pointer and first descriptor for TX descriptor ring */ 294static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, 295 const efx_qword_t *txd) 296{ 297 unsigned write_ptr; 298 efx_oword_t reg; 299 300 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 301 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 302 303 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 304 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 305 FRF_AZ_TX_DESC_WPTR, write_ptr); 306 reg.qword[0] = *txd; 307 efx_writeo_page(tx_queue->efx, &reg, 308 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 309} 310 311 312/* For each entry inserted into the software descriptor ring, create a 313 * descriptor in the hardware TX descriptor ring (in host memory), and 314 * write a doorbell. 315 */ 316void efx_farch_tx_write(struct efx_tx_queue *tx_queue) 317{ 318 struct efx_tx_buffer *buffer; 319 efx_qword_t *txd; 320 unsigned write_ptr; 321 unsigned old_write_count = tx_queue->write_count; 322 323 tx_queue->xmit_more_available = false; 324 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 325 return; 326 327 do { 328 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 329 buffer = &tx_queue->buffer[write_ptr]; 330 txd = efx_tx_desc(tx_queue, write_ptr); 331 ++tx_queue->write_count; 332 333 EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); 334 335 /* Create TX descriptor ring entry */ 336 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 337 EFX_POPULATE_QWORD_4(*txd, 338 FSF_AZ_TX_KER_CONT, 339 buffer->flags & EFX_TX_BUF_CONT, 340 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 341 FSF_AZ_TX_KER_BUF_REGION, 0, 342 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 343 } while (tx_queue->write_count != tx_queue->insert_count); 344 345 wmb(); /* Ensure descriptors are written before they are fetched */ 346 347 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 348 txd = efx_tx_desc(tx_queue, 349 old_write_count & tx_queue->ptr_mask); 350 efx_farch_push_tx_desc(tx_queue, txd); 351 ++tx_queue->pushes; 352 } else { 353 efx_farch_notify_tx_desc(tx_queue); 354 } 355} 356 357unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, 358 dma_addr_t dma_addr, unsigned int len) 359{ 360 /* Don't cross 4K boundaries with descriptors. */ 361 unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; 362 363 len = min(limit, len); 364 365 return len; 366} 367 368 369/* Allocate hardware resources for a TX queue */ 370int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) 371{ 372 struct efx_nic *efx = tx_queue->efx; 373 unsigned entries; 374 375 entries = tx_queue->ptr_mask + 1; 376 return efx_alloc_special_buffer(efx, &tx_queue->txd, 377 entries * sizeof(efx_qword_t)); 378} 379 380void efx_farch_tx_init(struct efx_tx_queue *tx_queue) 381{ 382 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 383 struct efx_nic *efx = tx_queue->efx; 384 efx_oword_t reg; 385 386 /* Pin TX descriptor ring */ 387 efx_init_special_buffer(efx, &tx_queue->txd); 388 389 /* Push TX descriptor ring to card */ 390 EFX_POPULATE_OWORD_10(reg, 391 FRF_AZ_TX_DESCQ_EN, 1, 392 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 393 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 394 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 395 FRF_AZ_TX_DESCQ_EVQ_ID, 396 tx_queue->channel->channel, 397 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 398 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 399 FRF_AZ_TX_DESCQ_SIZE, 400 __ffs(tx_queue->txd.entries), 401 FRF_AZ_TX_DESCQ_TYPE, 0, 402 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 403 404 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 405 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); 406 407 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base, 408 tx_queue->queue); 409 410 EFX_POPULATE_OWORD_1(reg, 411 FRF_BZ_TX_PACE, 412 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 413 FFE_BZ_TX_PACE_OFF : 414 FFE_BZ_TX_PACE_RESERVED); 415 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); 416} 417 418static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) 419{ 420 struct efx_nic *efx = tx_queue->efx; 421 efx_oword_t tx_flush_descq; 422 423 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 424 atomic_set(&tx_queue->flush_outstanding, 1); 425 426 EFX_POPULATE_OWORD_2(tx_flush_descq, 427 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 428 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 429 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 430} 431 432void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) 433{ 434 struct efx_nic *efx = tx_queue->efx; 435 efx_oword_t tx_desc_ptr; 436 437 /* Remove TX descriptor ring from card */ 438 EFX_ZERO_OWORD(tx_desc_ptr); 439 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 440 tx_queue->queue); 441 442 /* Unpin TX descriptor ring */ 443 efx_fini_special_buffer(efx, &tx_queue->txd); 444} 445 446/* Free buffers backing TX queue */ 447void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) 448{ 449 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 450} 451 452/************************************************************************** 453 * 454 * RX path 455 * 456 **************************************************************************/ 457 458/* This creates an entry in the RX descriptor queue */ 459static inline void 460efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 461{ 462 struct efx_rx_buffer *rx_buf; 463 efx_qword_t *rxd; 464 465 rxd = efx_rx_desc(rx_queue, index); 466 rx_buf = efx_rx_buffer(rx_queue, index); 467 EFX_POPULATE_QWORD_3(*rxd, 468 FSF_AZ_RX_KER_BUF_SIZE, 469 rx_buf->len - 470 rx_queue->efx->type->rx_buffer_padding, 471 FSF_AZ_RX_KER_BUF_REGION, 0, 472 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 473} 474 475/* This writes to the RX_DESC_WPTR register for the specified receive 476 * descriptor ring. 477 */ 478void efx_farch_rx_write(struct efx_rx_queue *rx_queue) 479{ 480 struct efx_nic *efx = rx_queue->efx; 481 efx_dword_t reg; 482 unsigned write_ptr; 483 484 while (rx_queue->notified_count != rx_queue->added_count) { 485 efx_farch_build_rx_desc( 486 rx_queue, 487 rx_queue->notified_count & rx_queue->ptr_mask); 488 ++rx_queue->notified_count; 489 } 490 491 wmb(); 492 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 493 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 494 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0, 495 efx_rx_queue_index(rx_queue)); 496} 497 498int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) 499{ 500 struct efx_nic *efx = rx_queue->efx; 501 unsigned entries; 502 503 entries = rx_queue->ptr_mask + 1; 504 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 505 entries * sizeof(efx_qword_t)); 506} 507 508void efx_farch_rx_init(struct efx_rx_queue *rx_queue) 509{ 510 efx_oword_t rx_desc_ptr; 511 struct efx_nic *efx = rx_queue->efx; 512 bool jumbo_en; 513 514 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ 515 jumbo_en = efx->rx_scatter; 516 517 netif_dbg(efx, hw, efx->net_dev, 518 "RX queue %d ring in special buffers %d-%d\n", 519 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 520 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 521 522 rx_queue->scatter_n = 0; 523 524 /* Pin RX descriptor ring */ 525 efx_init_special_buffer(efx, &rx_queue->rxd); 526 527 /* Push RX descriptor ring to card */ 528 EFX_POPULATE_OWORD_10(rx_desc_ptr, 529 FRF_AZ_RX_ISCSI_DDIG_EN, true, 530 FRF_AZ_RX_ISCSI_HDIG_EN, true, 531 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 532 FRF_AZ_RX_DESCQ_EVQ_ID, 533 efx_rx_queue_channel(rx_queue)->channel, 534 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 535 FRF_AZ_RX_DESCQ_LABEL, 536 efx_rx_queue_index(rx_queue), 537 FRF_AZ_RX_DESCQ_SIZE, 538 __ffs(rx_queue->rxd.entries), 539 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 540 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 541 FRF_AZ_RX_DESCQ_EN, 1); 542 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 543 efx_rx_queue_index(rx_queue)); 544} 545 546static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) 547{ 548 struct efx_nic *efx = rx_queue->efx; 549 efx_oword_t rx_flush_descq; 550 551 EFX_POPULATE_OWORD_2(rx_flush_descq, 552 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 553 FRF_AZ_RX_FLUSH_DESCQ, 554 efx_rx_queue_index(rx_queue)); 555 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 556} 557 558void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) 559{ 560 efx_oword_t rx_desc_ptr; 561 struct efx_nic *efx = rx_queue->efx; 562 563 /* Remove RX descriptor ring from card */ 564 EFX_ZERO_OWORD(rx_desc_ptr); 565 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 566 efx_rx_queue_index(rx_queue)); 567 568 /* Unpin RX descriptor ring */ 569 efx_fini_special_buffer(efx, &rx_queue->rxd); 570} 571 572/* Free buffers backing RX queue */ 573void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) 574{ 575 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 576} 577 578/************************************************************************** 579 * 580 * Flush handling 581 * 582 **************************************************************************/ 583 584/* efx_farch_flush_queues() must be woken up when all flushes are completed, 585 * or more RX flushes can be kicked off. 586 */ 587static bool efx_farch_flush_wake(struct efx_nic *efx) 588{ 589 /* Ensure that all updates are visible to efx_farch_flush_queues() */ 590 smp_mb(); 591 592 return (atomic_read(&efx->active_queues) == 0 || 593 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 594 && atomic_read(&efx->rxq_flush_pending) > 0)); 595} 596 597static bool efx_check_tx_flush_complete(struct efx_nic *efx) 598{ 599 bool i = true; 600 efx_oword_t txd_ptr_tbl; 601 struct efx_channel *channel; 602 struct efx_tx_queue *tx_queue; 603 604 efx_for_each_channel(channel, efx) { 605 efx_for_each_channel_tx_queue(tx_queue, channel) { 606 efx_reado_table(efx, &txd_ptr_tbl, 607 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 608 if (EFX_OWORD_FIELD(txd_ptr_tbl, 609 FRF_AZ_TX_DESCQ_FLUSH) || 610 EFX_OWORD_FIELD(txd_ptr_tbl, 611 FRF_AZ_TX_DESCQ_EN)) { 612 netif_dbg(efx, hw, efx->net_dev, 613 "flush did not complete on TXQ %d\n", 614 tx_queue->queue); 615 i = false; 616 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 617 1, 0)) { 618 /* The flush is complete, but we didn't 619 * receive a flush completion event 620 */ 621 netif_dbg(efx, hw, efx->net_dev, 622 "flush complete on TXQ %d, so drain " 623 "the queue\n", tx_queue->queue); 624 /* Don't need to increment active_queues as it 625 * has already been incremented for the queues 626 * which did not drain 627 */ 628 efx_farch_magic_event(channel, 629 EFX_CHANNEL_MAGIC_TX_DRAIN( 630 tx_queue)); 631 } 632 } 633 } 634 635 return i; 636} 637 638/* Flush all the transmit queues, and continue flushing receive queues until 639 * they're all flushed. Wait for the DRAIN events to be received so that there 640 * are no more RX and TX events left on any channel. */ 641static int efx_farch_do_flush(struct efx_nic *efx) 642{ 643 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 644 struct efx_channel *channel; 645 struct efx_rx_queue *rx_queue; 646 struct efx_tx_queue *tx_queue; 647 int rc = 0; 648 649 efx_for_each_channel(channel, efx) { 650 efx_for_each_channel_tx_queue(tx_queue, channel) { 651 efx_farch_flush_tx_queue(tx_queue); 652 } 653 efx_for_each_channel_rx_queue(rx_queue, channel) { 654 rx_queue->flush_pending = true; 655 atomic_inc(&efx->rxq_flush_pending); 656 } 657 } 658 659 while (timeout && atomic_read(&efx->active_queues) > 0) { 660 /* If SRIOV is enabled, then offload receive queue flushing to 661 * the firmware (though we will still have to poll for 662 * completion). If that fails, fall back to the old scheme. 663 */ 664 if (efx_siena_sriov_enabled(efx)) { 665 rc = efx_mcdi_flush_rxqs(efx); 666 if (!rc) 667 goto wait; 668 } 669 670 /* The hardware supports four concurrent rx flushes, each of 671 * which may need to be retried if there is an outstanding 672 * descriptor fetch 673 */ 674 efx_for_each_channel(channel, efx) { 675 efx_for_each_channel_rx_queue(rx_queue, channel) { 676 if (atomic_read(&efx->rxq_flush_outstanding) >= 677 EFX_RX_FLUSH_COUNT) 678 break; 679 680 if (rx_queue->flush_pending) { 681 rx_queue->flush_pending = false; 682 atomic_dec(&efx->rxq_flush_pending); 683 atomic_inc(&efx->rxq_flush_outstanding); 684 efx_farch_flush_rx_queue(rx_queue); 685 } 686 } 687 } 688 689 wait: 690 timeout = wait_event_timeout(efx->flush_wq, 691 efx_farch_flush_wake(efx), 692 timeout); 693 } 694 695 if (atomic_read(&efx->active_queues) && 696 !efx_check_tx_flush_complete(efx)) { 697 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 698 "(rx %d+%d)\n", atomic_read(&efx->active_queues), 699 atomic_read(&efx->rxq_flush_outstanding), 700 atomic_read(&efx->rxq_flush_pending)); 701 rc = -ETIMEDOUT; 702 703 atomic_set(&efx->active_queues, 0); 704 atomic_set(&efx->rxq_flush_pending, 0); 705 atomic_set(&efx->rxq_flush_outstanding, 0); 706 } 707 708 return rc; 709} 710 711int efx_farch_fini_dmaq(struct efx_nic *efx) 712{ 713 struct efx_channel *channel; 714 struct efx_tx_queue *tx_queue; 715 struct efx_rx_queue *rx_queue; 716 int rc = 0; 717 718 /* Do not attempt to write to the NIC during EEH recovery */ 719 if (efx->state != STATE_RECOVERY) { 720 /* Only perform flush if DMA is enabled */ 721 if (efx->pci_dev->is_busmaster) { 722 efx->type->prepare_flush(efx); 723 rc = efx_farch_do_flush(efx); 724 efx->type->finish_flush(efx); 725 } 726 727 efx_for_each_channel(channel, efx) { 728 efx_for_each_channel_rx_queue(rx_queue, channel) 729 efx_farch_rx_fini(rx_queue); 730 efx_for_each_channel_tx_queue(tx_queue, channel) 731 efx_farch_tx_fini(tx_queue); 732 } 733 } 734 735 return rc; 736} 737 738/* Reset queue and flush accounting after FLR 739 * 740 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus 741 * mastering was disabled), in which case we don't receive (RXQ) flush 742 * completion events. This means that efx->rxq_flush_outstanding remained at 4 743 * after the FLR; also, efx->active_queues was non-zero (as no flush completion 744 * events were received, and we didn't go through efx_check_tx_flush_complete()) 745 * If we don't fix this up, on the next call to efx_realloc_channels() we won't 746 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 747 * for batched flush requests; and the efx->active_queues gets messed up because 748 * we keep incrementing for the newly initialised queues, but it never went to 749 * zero previously. Then we get a timeout every time we try to restart the 750 * queues, as it doesn't go back to zero when we should be flushing the queues. 751 */ 752void efx_farch_finish_flr(struct efx_nic *efx) 753{ 754 atomic_set(&efx->rxq_flush_pending, 0); 755 atomic_set(&efx->rxq_flush_outstanding, 0); 756 atomic_set(&efx->active_queues, 0); 757} 758 759 760/************************************************************************** 761 * 762 * Event queue processing 763 * Event queues are processed by per-channel tasklets. 764 * 765 **************************************************************************/ 766 767/* Update a channel's event queue's read pointer (RPTR) register 768 * 769 * This writes the EVQ_RPTR_REG register for the specified channel's 770 * event queue. 771 */ 772void efx_farch_ev_read_ack(struct efx_channel *channel) 773{ 774 efx_dword_t reg; 775 struct efx_nic *efx = channel->efx; 776 777 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 778 channel->eventq_read_ptr & channel->eventq_mask); 779 780 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 781 * of 4 bytes, but it is really 16 bytes just like later revisions. 782 */ 783 efx_writed(efx, &reg, 784 efx->type->evq_rptr_tbl_base + 785 FR_BZ_EVQ_RPTR_STEP * channel->channel); 786} 787 788/* Use HW to insert a SW defined event */ 789void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 790 efx_qword_t *event) 791{ 792 efx_oword_t drv_ev_reg; 793 794 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 795 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 796 drv_ev_reg.u32[0] = event->u32[0]; 797 drv_ev_reg.u32[1] = event->u32[1]; 798 drv_ev_reg.u32[2] = 0; 799 drv_ev_reg.u32[3] = 0; 800 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 801 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 802} 803 804static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) 805{ 806 efx_qword_t event; 807 808 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 809 FSE_AZ_EV_CODE_DRV_GEN_EV, 810 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 811 efx_farch_generate_event(channel->efx, channel->channel, &event); 812} 813 814/* Handle a transmit completion event 815 * 816 * The NIC batches TX completion events; the message we receive is of 817 * the form "complete all TX events up to this index". 818 */ 819static void 820efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 821{ 822 unsigned int tx_ev_desc_ptr; 823 unsigned int tx_ev_q_label; 824 struct efx_tx_queue *tx_queue; 825 struct efx_nic *efx = channel->efx; 826 827 if (unlikely(READ_ONCE(efx->reset_pending))) 828 return; 829 830 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 831 /* Transmit completion */ 832 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 833 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 834 tx_queue = efx_channel_get_tx_queue( 835 channel, tx_ev_q_label % EFX_TXQ_TYPES); 836 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 837 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 838 /* Rewrite the FIFO write pointer */ 839 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 840 tx_queue = efx_channel_get_tx_queue( 841 channel, tx_ev_q_label % EFX_TXQ_TYPES); 842 843 netif_tx_lock(efx->net_dev); 844 efx_farch_notify_tx_desc(tx_queue); 845 netif_tx_unlock(efx->net_dev); 846 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { 847 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 848 } else { 849 netif_err(efx, tx_err, efx->net_dev, 850 "channel %d unexpected TX event " 851 EFX_QWORD_FMT"\n", channel->channel, 852 EFX_QWORD_VAL(*event)); 853 } 854} 855 856/* Detect errors included in the rx_evt_pkt_ok bit. */ 857static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 858 const efx_qword_t *event) 859{ 860 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 861 struct efx_nic *efx = rx_queue->efx; 862 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 863 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 864 bool rx_ev_frm_trunc, rx_ev_tobe_disc; 865 bool rx_ev_other_err, rx_ev_pause_frm; 866 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 867 unsigned rx_ev_pkt_type; 868 869 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 870 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 871 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 872 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 873 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 874 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 875 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 876 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 877 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 878 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 879 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 880 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 881 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 882 883 /* Every error apart from tobe_disc and pause_frm */ 884 rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | 885 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 886 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 887 888 /* Count errors that are not in MAC stats. Ignore expected 889 * checksum errors during self-test. */ 890 if (rx_ev_frm_trunc) 891 ++channel->n_rx_frm_trunc; 892 else if (rx_ev_tobe_disc) 893 ++channel->n_rx_tobe_disc; 894 else if (!efx->loopback_selftest) { 895 if (rx_ev_ip_hdr_chksum_err) 896 ++channel->n_rx_ip_hdr_chksum_err; 897 else if (rx_ev_tcp_udp_chksum_err) 898 ++channel->n_rx_tcp_udp_chksum_err; 899 } 900 901 /* TOBE_DISC is expected on unicast mismatches; don't print out an 902 * error message. FRM_TRUNC indicates RXDP dropped the packet due 903 * to a FIFO overflow. 904 */ 905#ifdef DEBUG 906 if (rx_ev_other_err && net_ratelimit()) { 907 netif_dbg(efx, rx_err, efx->net_dev, 908 " RX queue %d unexpected RX event " 909 EFX_QWORD_FMT "%s%s%s%s%s%s%s\n", 910 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 911 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 912 rx_ev_ip_hdr_chksum_err ? 913 " [IP_HDR_CHKSUM_ERR]" : "", 914 rx_ev_tcp_udp_chksum_err ? 915 " [TCP_UDP_CHKSUM_ERR]" : "", 916 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 917 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 918 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 919 rx_ev_pause_frm ? " [PAUSE]" : ""); 920 } 921#endif 922 923 if (efx->net_dev->features & NETIF_F_RXALL) 924 /* don't discard frame for CRC error */ 925 rx_ev_eth_crc_err = false; 926 927 /* The frame must be discarded if any of these are true. */ 928 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | 929 rx_ev_tobe_disc | rx_ev_pause_frm) ? 930 EFX_RX_PKT_DISCARD : 0; 931} 932 933/* Handle receive events that are not in-order. Return true if this 934 * can be handled as a partial packet discard, false if it's more 935 * serious. 936 */ 937static bool 938efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 939{ 940 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 941 struct efx_nic *efx = rx_queue->efx; 942 unsigned expected, dropped; 943 944 if (rx_queue->scatter_n && 945 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 946 rx_queue->ptr_mask)) { 947 ++channel->n_rx_nodesc_trunc; 948 return true; 949 } 950 951 expected = rx_queue->removed_count & rx_queue->ptr_mask; 952 dropped = (index - expected) & rx_queue->ptr_mask; 953 netif_info(efx, rx_err, efx->net_dev, 954 "dropped %d events (index=%d expected=%d)\n", 955 dropped, index, expected); 956 957 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 958 return false; 959} 960 961/* Handle a packet received event 962 * 963 * The NIC gives a "discard" flag if it's a unicast packet with the 964 * wrong destination address 965 * Also "is multicast" and "matches multicast filter" flags can be used to 966 * discard non-matching multicast packets. 967 */ 968static void 969efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 970{ 971 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 972 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 973 unsigned expected_ptr; 974 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 975 u16 flags; 976 struct efx_rx_queue *rx_queue; 977 struct efx_nic *efx = channel->efx; 978 979 if (unlikely(READ_ONCE(efx->reset_pending))) 980 return; 981 982 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 983 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 984 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 985 channel->channel); 986 987 rx_queue = efx_channel_get_rx_queue(channel); 988 989 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 990 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 991 rx_queue->ptr_mask); 992 993 /* Check for partial drops and other errors */ 994 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 995 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 996 if (rx_ev_desc_ptr != expected_ptr && 997 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 998 return; 999 1000 /* Discard all pending fragments */ 1001 if (rx_queue->scatter_n) { 1002 efx_rx_packet( 1003 rx_queue, 1004 rx_queue->removed_count & rx_queue->ptr_mask, 1005 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1006 rx_queue->removed_count += rx_queue->scatter_n; 1007 rx_queue->scatter_n = 0; 1008 } 1009 1010 /* Return if there is no new fragment */ 1011 if (rx_ev_desc_ptr != expected_ptr) 1012 return; 1013 1014 /* Discard new fragment if not SOP */ 1015 if (!rx_ev_sop) { 1016 efx_rx_packet( 1017 rx_queue, 1018 rx_queue->removed_count & rx_queue->ptr_mask, 1019 1, 0, EFX_RX_PKT_DISCARD); 1020 ++rx_queue->removed_count; 1021 return; 1022 } 1023 } 1024 1025 ++rx_queue->scatter_n; 1026 if (rx_ev_cont) 1027 return; 1028 1029 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1030 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1031 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1032 1033 if (likely(rx_ev_pkt_ok)) { 1034 /* If packet is marked as OK then we can rely on the 1035 * hardware checksum and classification. 1036 */ 1037 flags = 0; 1038 switch (rx_ev_hdr_type) { 1039 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 1040 flags |= EFX_RX_PKT_TCP; 1041 /* fall through */ 1042 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 1043 flags |= EFX_RX_PKT_CSUMMED; 1044 /* fall through */ 1045 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 1046 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 1047 break; 1048 } 1049 } else { 1050 flags = efx_farch_handle_rx_not_ok(rx_queue, event); 1051 } 1052 1053 /* Detect multicast packets that didn't match the filter */ 1054 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1055 if (rx_ev_mcast_pkt) { 1056 unsigned int rx_ev_mcast_hash_match = 1057 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1058 1059 if (unlikely(!rx_ev_mcast_hash_match)) { 1060 ++channel->n_rx_mcast_mismatch; 1061 flags |= EFX_RX_PKT_DISCARD; 1062 } 1063 } 1064 1065 channel->irq_mod_score += 2; 1066 1067 /* Handle received packet */ 1068 efx_rx_packet(rx_queue, 1069 rx_queue->removed_count & rx_queue->ptr_mask, 1070 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1071 rx_queue->removed_count += rx_queue->scatter_n; 1072 rx_queue->scatter_n = 0; 1073} 1074 1075/* If this flush done event corresponds to a &struct efx_tx_queue, then 1076 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1077 * of all transmit completions. 1078 */ 1079static void 1080efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1081{ 1082 struct efx_tx_queue *tx_queue; 1083 int qid; 1084 1085 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1086 if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) { 1087 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1088 qid % EFX_TXQ_TYPES); 1089 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1090 efx_farch_magic_event(tx_queue->channel, 1091 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1092 } 1093 } 1094} 1095 1096/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1097 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1098 * the RX queue back to the mask of RX queues in need of flushing. 1099 */ 1100static void 1101efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1102{ 1103 struct efx_channel *channel; 1104 struct efx_rx_queue *rx_queue; 1105 int qid; 1106 bool failed; 1107 1108 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1109 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1110 if (qid >= efx->n_channels) 1111 return; 1112 channel = efx_get_channel(efx, qid); 1113 if (!efx_channel_has_rx_queue(channel)) 1114 return; 1115 rx_queue = efx_channel_get_rx_queue(channel); 1116 1117 if (failed) { 1118 netif_info(efx, hw, efx->net_dev, 1119 "RXQ %d flush retry\n", qid); 1120 rx_queue->flush_pending = true; 1121 atomic_inc(&efx->rxq_flush_pending); 1122 } else { 1123 efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 1124 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1125 } 1126 atomic_dec(&efx->rxq_flush_outstanding); 1127 if (efx_farch_flush_wake(efx)) 1128 wake_up(&efx->flush_wq); 1129} 1130 1131static void 1132efx_farch_handle_drain_event(struct efx_channel *channel) 1133{ 1134 struct efx_nic *efx = channel->efx; 1135 1136 WARN_ON(atomic_read(&efx->active_queues) == 0); 1137 atomic_dec(&efx->active_queues); 1138 if (efx_farch_flush_wake(efx)) 1139 wake_up(&efx->flush_wq); 1140} 1141 1142static void efx_farch_handle_generated_event(struct efx_channel *channel, 1143 efx_qword_t *event) 1144{ 1145 struct efx_nic *efx = channel->efx; 1146 struct efx_rx_queue *rx_queue = 1147 efx_channel_has_rx_queue(channel) ? 1148 efx_channel_get_rx_queue(channel) : NULL; 1149 unsigned magic, code; 1150 1151 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1152 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1153 1154 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1155 channel->event_test_cpu = raw_smp_processor_id(); 1156 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1157 /* The queue must be empty, so we won't receive any rx 1158 * events, so efx_process_channel() won't refill the 1159 * queue. Refill it here */ 1160 efx_fast_push_rx_descriptors(rx_queue, true); 1161 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1162 efx_farch_handle_drain_event(channel); 1163 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1164 efx_farch_handle_drain_event(channel); 1165 } else { 1166 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1167 "generated event "EFX_QWORD_FMT"\n", 1168 channel->channel, EFX_QWORD_VAL(*event)); 1169 } 1170} 1171 1172static void 1173efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1174{ 1175 struct efx_nic *efx = channel->efx; 1176 unsigned int ev_sub_code; 1177 unsigned int ev_sub_data; 1178 1179 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1180 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1181 1182 switch (ev_sub_code) { 1183 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1184 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1185 channel->channel, ev_sub_data); 1186 efx_farch_handle_tx_flush_done(efx, event); 1187#ifdef CONFIG_SFC_SRIOV 1188 efx_siena_sriov_tx_flush_done(efx, event); 1189#endif 1190 break; 1191 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1192 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1193 channel->channel, ev_sub_data); 1194 efx_farch_handle_rx_flush_done(efx, event); 1195#ifdef CONFIG_SFC_SRIOV 1196 efx_siena_sriov_rx_flush_done(efx, event); 1197#endif 1198 break; 1199 case FSE_AZ_EVQ_INIT_DONE_EV: 1200 netif_dbg(efx, hw, efx->net_dev, 1201 "channel %d EVQ %d initialised\n", 1202 channel->channel, ev_sub_data); 1203 break; 1204 case FSE_AZ_SRM_UPD_DONE_EV: 1205 netif_vdbg(efx, hw, efx->net_dev, 1206 "channel %d SRAM update done\n", channel->channel); 1207 break; 1208 case FSE_AZ_WAKE_UP_EV: 1209 netif_vdbg(efx, hw, efx->net_dev, 1210 "channel %d RXQ %d wakeup event\n", 1211 channel->channel, ev_sub_data); 1212 break; 1213 case FSE_AZ_TIMER_EV: 1214 netif_vdbg(efx, hw, efx->net_dev, 1215 "channel %d RX queue %d timer expired\n", 1216 channel->channel, ev_sub_data); 1217 break; 1218 case FSE_AA_RX_RECOVER_EV: 1219 netif_err(efx, rx_err, efx->net_dev, 1220 "channel %d seen DRIVER RX_RESET event. " 1221 "Resetting.\n", channel->channel); 1222 atomic_inc(&efx->rx_reset); 1223 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1224 break; 1225 case FSE_BZ_RX_DSC_ERROR_EV: 1226 if (ev_sub_data < EFX_VI_BASE) { 1227 netif_err(efx, rx_err, efx->net_dev, 1228 "RX DMA Q %d reports descriptor fetch error." 1229 " RX Q %d is disabled.\n", ev_sub_data, 1230 ev_sub_data); 1231 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1232 } 1233#ifdef CONFIG_SFC_SRIOV 1234 else 1235 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 1236#endif 1237 break; 1238 case FSE_BZ_TX_DSC_ERROR_EV: 1239 if (ev_sub_data < EFX_VI_BASE) { 1240 netif_err(efx, tx_err, efx->net_dev, 1241 "TX DMA Q %d reports descriptor fetch error." 1242 " TX Q %d is disabled.\n", ev_sub_data, 1243 ev_sub_data); 1244 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1245 } 1246#ifdef CONFIG_SFC_SRIOV 1247 else 1248 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 1249#endif 1250 break; 1251 default: 1252 netif_vdbg(efx, hw, efx->net_dev, 1253 "channel %d unknown driver event code %d " 1254 "data %04x\n", channel->channel, ev_sub_code, 1255 ev_sub_data); 1256 break; 1257 } 1258} 1259 1260int efx_farch_ev_process(struct efx_channel *channel, int budget) 1261{ 1262 struct efx_nic *efx = channel->efx; 1263 unsigned int read_ptr; 1264 efx_qword_t event, *p_event; 1265 int ev_code; 1266 int spent = 0; 1267 1268 if (budget <= 0) 1269 return spent; 1270 1271 read_ptr = channel->eventq_read_ptr; 1272 1273 for (;;) { 1274 p_event = efx_event(channel, read_ptr); 1275 event = *p_event; 1276 1277 if (!efx_event_present(&event)) 1278 /* End of events */ 1279 break; 1280 1281 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1282 "channel %d event is "EFX_QWORD_FMT"\n", 1283 channel->channel, EFX_QWORD_VAL(event)); 1284 1285 /* Clear this event by marking it all ones */ 1286 EFX_SET_QWORD(*p_event); 1287 1288 ++read_ptr; 1289 1290 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1291 1292 switch (ev_code) { 1293 case FSE_AZ_EV_CODE_RX_EV: 1294 efx_farch_handle_rx_event(channel, &event); 1295 if (++spent == budget) 1296 goto out; 1297 break; 1298 case FSE_AZ_EV_CODE_TX_EV: 1299 efx_farch_handle_tx_event(channel, &event); 1300 break; 1301 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1302 efx_farch_handle_generated_event(channel, &event); 1303 break; 1304 case FSE_AZ_EV_CODE_DRIVER_EV: 1305 efx_farch_handle_driver_event(channel, &event); 1306 break; 1307#ifdef CONFIG_SFC_SRIOV 1308 case FSE_CZ_EV_CODE_USER_EV: 1309 efx_siena_sriov_event(channel, &event); 1310 break; 1311#endif 1312 case FSE_CZ_EV_CODE_MCDI_EV: 1313 efx_mcdi_process_event(channel, &event); 1314 break; 1315 case FSE_AZ_EV_CODE_GLOBAL_EV: 1316 if (efx->type->handle_global_event && 1317 efx->type->handle_global_event(channel, &event)) 1318 break; 1319 /* else fall through */ 1320 default: 1321 netif_err(channel->efx, hw, channel->efx->net_dev, 1322 "channel %d unknown event type %d (data " 1323 EFX_QWORD_FMT ")\n", channel->channel, 1324 ev_code, EFX_QWORD_VAL(event)); 1325 } 1326 } 1327 1328out: 1329 channel->eventq_read_ptr = read_ptr; 1330 return spent; 1331} 1332 1333/* Allocate buffer table entries for event queue */ 1334int efx_farch_ev_probe(struct efx_channel *channel) 1335{ 1336 struct efx_nic *efx = channel->efx; 1337 unsigned entries; 1338 1339 entries = channel->eventq_mask + 1; 1340 return efx_alloc_special_buffer(efx, &channel->eventq, 1341 entries * sizeof(efx_qword_t)); 1342} 1343 1344int efx_farch_ev_init(struct efx_channel *channel) 1345{ 1346 efx_oword_t reg; 1347 struct efx_nic *efx = channel->efx; 1348 1349 netif_dbg(efx, hw, efx->net_dev, 1350 "channel %d event queue in special buffers %d-%d\n", 1351 channel->channel, channel->eventq.index, 1352 channel->eventq.index + channel->eventq.entries - 1); 1353 1354 EFX_POPULATE_OWORD_3(reg, 1355 FRF_CZ_TIMER_Q_EN, 1, 1356 FRF_CZ_HOST_NOTIFY_MODE, 0, 1357 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1358 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); 1359 1360 /* Pin event queue buffer */ 1361 efx_init_special_buffer(efx, &channel->eventq); 1362 1363 /* Fill event queue with all ones (i.e. empty events) */ 1364 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 1365 1366 /* Push event queue to card */ 1367 EFX_POPULATE_OWORD_3(reg, 1368 FRF_AZ_EVQ_EN, 1, 1369 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1370 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1371 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, 1372 channel->channel); 1373 1374 return 0; 1375} 1376 1377void efx_farch_ev_fini(struct efx_channel *channel) 1378{ 1379 efx_oword_t reg; 1380 struct efx_nic *efx = channel->efx; 1381 1382 /* Remove event queue from card */ 1383 EFX_ZERO_OWORD(reg); 1384 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base, 1385 channel->channel); 1386 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel); 1387 1388 /* Unpin event queue */ 1389 efx_fini_special_buffer(efx, &channel->eventq); 1390} 1391 1392/* Free buffers backing event queue */ 1393void efx_farch_ev_remove(struct efx_channel *channel) 1394{ 1395 efx_free_special_buffer(channel->efx, &channel->eventq); 1396} 1397 1398 1399void efx_farch_ev_test_generate(struct efx_channel *channel) 1400{ 1401 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1402} 1403 1404void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) 1405{ 1406 efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 1407 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1408} 1409 1410/************************************************************************** 1411 * 1412 * Hardware interrupts 1413 * The hardware interrupt handler does very little work; all the event 1414 * queue processing is carried out by per-channel tasklets. 1415 * 1416 **************************************************************************/ 1417 1418/* Enable/disable/generate interrupts */ 1419static inline void efx_farch_interrupts(struct efx_nic *efx, 1420 bool enabled, bool force) 1421{ 1422 efx_oword_t int_en_reg_ker; 1423 1424 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1425 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1426 FRF_AZ_KER_INT_KER, force, 1427 FRF_AZ_DRV_INT_EN_KER, enabled); 1428 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1429} 1430 1431void efx_farch_irq_enable_master(struct efx_nic *efx) 1432{ 1433 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1434 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1435 1436 efx_farch_interrupts(efx, true, false); 1437} 1438 1439void efx_farch_irq_disable_master(struct efx_nic *efx) 1440{ 1441 /* Disable interrupts */ 1442 efx_farch_interrupts(efx, false, false); 1443} 1444 1445/* Generate a test interrupt 1446 * Interrupt must already have been enabled, otherwise nasty things 1447 * may happen. 1448 */ 1449int efx_farch_irq_test_generate(struct efx_nic *efx) 1450{ 1451 efx_farch_interrupts(efx, true, true); 1452 return 0; 1453} 1454 1455/* Process a fatal interrupt 1456 * Disable bus mastering ASAP and schedule a reset 1457 */ 1458irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) 1459{ 1460 efx_oword_t *int_ker = efx->irq_status.addr; 1461 efx_oword_t fatal_intr; 1462 int error, mem_perr; 1463 1464 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1465 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1466 1467 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1468 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1469 EFX_OWORD_VAL(fatal_intr), 1470 error ? "disabling bus mastering" : "no recognised error"); 1471 1472 /* If this is a memory parity error dump which blocks are offending */ 1473 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1474 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1475 if (mem_perr) { 1476 efx_oword_t reg; 1477 efx_reado(efx, &reg, FR_AZ_MEM_STAT); 1478 netif_err(efx, hw, efx->net_dev, 1479 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1480 EFX_OWORD_VAL(reg)); 1481 } 1482 1483 /* Disable both devices */ 1484 pci_clear_master(efx->pci_dev); 1485 efx_farch_irq_disable_master(efx); 1486 1487 /* Count errors and reset or disable the NIC accordingly */ 1488 if (efx->int_error_count == 0 || 1489 time_after(jiffies, efx->int_error_expire)) { 1490 efx->int_error_count = 0; 1491 efx->int_error_expire = 1492 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1493 } 1494 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1495 netif_err(efx, hw, efx->net_dev, 1496 "SYSTEM ERROR - reset scheduled\n"); 1497 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1498 } else { 1499 netif_err(efx, hw, efx->net_dev, 1500 "SYSTEM ERROR - max number of errors seen." 1501 "NIC will be disabled\n"); 1502 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1503 } 1504 1505 return IRQ_HANDLED; 1506} 1507 1508/* Handle a legacy interrupt 1509 * Acknowledges the interrupt and schedule event queue processing. 1510 */ 1511irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) 1512{ 1513 struct efx_nic *efx = dev_id; 1514 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 1515 efx_oword_t *int_ker = efx->irq_status.addr; 1516 irqreturn_t result = IRQ_NONE; 1517 struct efx_channel *channel; 1518 efx_dword_t reg; 1519 u32 queues; 1520 int syserr; 1521 1522 /* Read the ISR which also ACKs the interrupts */ 1523 efx_readd(efx, &reg, FR_BZ_INT_ISR0); 1524 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1525 1526 /* Legacy interrupts are disabled too late by the EEH kernel 1527 * code. Disable them earlier. 1528 * If an EEH error occurred, the read will have returned all ones. 1529 */ 1530 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1531 !efx->eeh_disabled_legacy_irq) { 1532 disable_irq_nosync(efx->legacy_irq); 1533 efx->eeh_disabled_legacy_irq = true; 1534 } 1535 1536 /* Handle non-event-queue sources */ 1537 if (queues & (1U << efx->irq_level) && soft_enabled) { 1538 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1539 if (unlikely(syserr)) 1540 return efx_farch_fatal_interrupt(efx); 1541 efx->last_irq_cpu = raw_smp_processor_id(); 1542 } 1543 1544 if (queues != 0) { 1545 efx->irq_zero_count = 0; 1546 1547 /* Schedule processing of any interrupting queues */ 1548 if (likely(soft_enabled)) { 1549 efx_for_each_channel(channel, efx) { 1550 if (queues & 1) 1551 efx_schedule_channel_irq(channel); 1552 queues >>= 1; 1553 } 1554 } 1555 result = IRQ_HANDLED; 1556 1557 } else { 1558 efx_qword_t *event; 1559 1560 /* Legacy ISR read can return zero once (SF bug 15783) */ 1561 1562 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1563 * because this might be a shared interrupt. */ 1564 if (efx->irq_zero_count++ == 0) 1565 result = IRQ_HANDLED; 1566 1567 /* Ensure we schedule or rearm all event queues */ 1568 if (likely(soft_enabled)) { 1569 efx_for_each_channel(channel, efx) { 1570 event = efx_event(channel, 1571 channel->eventq_read_ptr); 1572 if (efx_event_present(event)) 1573 efx_schedule_channel_irq(channel); 1574 else 1575 efx_farch_ev_read_ack(channel); 1576 } 1577 } 1578 } 1579 1580 if (result == IRQ_HANDLED) 1581 netif_vdbg(efx, intr, efx->net_dev, 1582 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1583 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1584 1585 return result; 1586} 1587 1588/* Handle an MSI interrupt 1589 * 1590 * Handle an MSI hardware interrupt. This routine schedules event 1591 * queue processing. No interrupt acknowledgement cycle is necessary. 1592 * Also, we never need to check that the interrupt is for us, since 1593 * MSI interrupts cannot be shared. 1594 */ 1595irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) 1596{ 1597 struct efx_msi_context *context = dev_id; 1598 struct efx_nic *efx = context->efx; 1599 efx_oword_t *int_ker = efx->irq_status.addr; 1600 int syserr; 1601 1602 netif_vdbg(efx, intr, efx->net_dev, 1603 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1604 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1605 1606 if (!likely(READ_ONCE(efx->irq_soft_enabled))) 1607 return IRQ_HANDLED; 1608 1609 /* Handle non-event-queue sources */ 1610 if (context->index == efx->irq_level) { 1611 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1612 if (unlikely(syserr)) 1613 return efx_farch_fatal_interrupt(efx); 1614 efx->last_irq_cpu = raw_smp_processor_id(); 1615 } 1616 1617 /* Schedule processing of the channel */ 1618 efx_schedule_channel_irq(efx->channel[context->index]); 1619 1620 return IRQ_HANDLED; 1621} 1622 1623/* Setup RSS indirection table. 1624 * This maps from the hash value of the packet to RXQ 1625 */ 1626void efx_farch_rx_push_indir_table(struct efx_nic *efx) 1627{ 1628 size_t i = 0; 1629 efx_dword_t dword; 1630 1631 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 1632 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1633 1634 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1635 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1636 efx->rss_context.rx_indir_table[i]); 1637 efx_writed(efx, &dword, 1638 FR_BZ_RX_INDIRECTION_TBL + 1639 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1640 } 1641} 1642 1643void efx_farch_rx_pull_indir_table(struct efx_nic *efx) 1644{ 1645 size_t i = 0; 1646 efx_dword_t dword; 1647 1648 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 1649 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1650 1651 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1652 efx_readd(efx, &dword, 1653 FR_BZ_RX_INDIRECTION_TBL + 1654 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1655 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); 1656 } 1657} 1658 1659/* Looks at available SRAM resources and works out how many queues we 1660 * can support, and where things like descriptor caches should live. 1661 * 1662 * SRAM is split up as follows: 1663 * 0 buftbl entries for channels 1664 * efx->vf_buftbl_base buftbl entries for SR-IOV 1665 * efx->rx_dc_base RX descriptor caches 1666 * efx->tx_dc_base TX descriptor caches 1667 */ 1668void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1669{ 1670 unsigned vi_count, buftbl_min, total_tx_channels; 1671 1672#ifdef CONFIG_SFC_SRIOV 1673 struct siena_nic_data *nic_data = efx->nic_data; 1674#endif 1675 1676 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; 1677 /* Account for the buffer table entries backing the datapath channels 1678 * and the descriptor caches for those channels. 1679 */ 1680 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1681 total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1682 efx->n_channels * EFX_MAX_EVQ_SIZE) 1683 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1684 vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES); 1685 1686#ifdef CONFIG_SFC_SRIOV 1687 if (efx->type->sriov_wanted) { 1688 if (efx->type->sriov_wanted(efx)) { 1689 unsigned vi_dc_entries, buftbl_free; 1690 unsigned entries_per_vf, vf_limit; 1691 1692 nic_data->vf_buftbl_base = buftbl_min; 1693 1694 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1695 vi_count = max(vi_count, EFX_VI_BASE); 1696 buftbl_free = (sram_lim_qw - buftbl_min - 1697 vi_count * vi_dc_entries); 1698 1699 entries_per_vf = ((vi_dc_entries + 1700 EFX_VF_BUFTBL_PER_VI) * 1701 efx_vf_size(efx)); 1702 vf_limit = min(buftbl_free / entries_per_vf, 1703 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1704 1705 if (efx->vf_count > vf_limit) { 1706 netif_err(efx, probe, efx->net_dev, 1707 "Reducing VF count from from %d to %d\n", 1708 efx->vf_count, vf_limit); 1709 efx->vf_count = vf_limit; 1710 } 1711 vi_count += efx->vf_count * efx_vf_size(efx); 1712 } 1713 } 1714#endif 1715 1716 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1717 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1718} 1719 1720u32 efx_farch_fpga_ver(struct efx_nic *efx) 1721{ 1722 efx_oword_t altera_build; 1723 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1724 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1725} 1726 1727void efx_farch_init_common(struct efx_nic *efx) 1728{ 1729 efx_oword_t temp; 1730 1731 /* Set positions of descriptor caches in SRAM. */ 1732 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1733 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1734 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1735 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1736 1737 /* Set TX descriptor cache size. */ 1738 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1739 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1740 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1741 1742 /* Set RX descriptor cache size. Set low watermark to size-8, as 1743 * this allows most efficient prefetching. 1744 */ 1745 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1746 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1747 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1748 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1749 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1750 1751 /* Program INT_KER address */ 1752 EFX_POPULATE_OWORD_2(temp, 1753 FRF_AZ_NORM_INT_VEC_DIS_KER, 1754 EFX_INT_MODE_USE_MSI(efx), 1755 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1756 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1757 1758 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1759 /* Use an interrupt level unused by event queues */ 1760 efx->irq_level = 0x1f; 1761 else 1762 /* Use a valid MSI-X vector */ 1763 efx->irq_level = 0; 1764 1765 /* Enable all the genuinely fatal interrupts. (They are still 1766 * masked by the overall interrupt mask, controlled by 1767 * falcon_interrupts()). 1768 * 1769 * Note: All other fatal interrupts are enabled 1770 */ 1771 EFX_POPULATE_OWORD_3(temp, 1772 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1773 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1774 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1775 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1776 EFX_INVERT_OWORD(temp); 1777 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1778 1779 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1780 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1781 */ 1782 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1783 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1784 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1785 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1786 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1787 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1788 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1789 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1790 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1791 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1792 /* Disable hardware watchdog which can misfire */ 1793 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1794 /* Squash TX of packets of 16 bytes or less */ 1795 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1796 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1797 1798 EFX_POPULATE_OWORD_4(temp, 1799 /* Default values */ 1800 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1801 FRF_BZ_TX_PACE_SB_AF, 0xb, 1802 FRF_BZ_TX_PACE_FB_BASE, 0, 1803 /* Allow large pace values in the fast bin. */ 1804 FRF_BZ_TX_PACE_BIN_TH, 1805 FFE_BZ_TX_PACE_RESERVED); 1806 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1807} 1808 1809/************************************************************************** 1810 * 1811 * Filter tables 1812 * 1813 ************************************************************************** 1814 */ 1815 1816/* "Fudge factors" - difference between programmed value and actual depth. 1817 * Due to pipelined implementation we need to program H/W with a value that 1818 * is larger than the hop limit we want. 1819 */ 1820#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 1821#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 1822 1823/* Hard maximum search limit. Hardware will time-out beyond 200-something. 1824 * We also need to avoid infinite loops in efx_farch_filter_search() when the 1825 * table is full. 1826 */ 1827#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 1828 1829/* Don't try very hard to find space for performance hints, as this is 1830 * counter-productive. */ 1831#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 1832 1833enum efx_farch_filter_type { 1834 EFX_FARCH_FILTER_TCP_FULL = 0, 1835 EFX_FARCH_FILTER_TCP_WILD, 1836 EFX_FARCH_FILTER_UDP_FULL, 1837 EFX_FARCH_FILTER_UDP_WILD, 1838 EFX_FARCH_FILTER_MAC_FULL = 4, 1839 EFX_FARCH_FILTER_MAC_WILD, 1840 EFX_FARCH_FILTER_UC_DEF = 8, 1841 EFX_FARCH_FILTER_MC_DEF, 1842 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ 1843}; 1844 1845enum efx_farch_filter_table_id { 1846 EFX_FARCH_FILTER_TABLE_RX_IP = 0, 1847 EFX_FARCH_FILTER_TABLE_RX_MAC, 1848 EFX_FARCH_FILTER_TABLE_RX_DEF, 1849 EFX_FARCH_FILTER_TABLE_TX_MAC, 1850 EFX_FARCH_FILTER_TABLE_COUNT, 1851}; 1852 1853enum efx_farch_filter_index { 1854 EFX_FARCH_FILTER_INDEX_UC_DEF, 1855 EFX_FARCH_FILTER_INDEX_MC_DEF, 1856 EFX_FARCH_FILTER_SIZE_RX_DEF, 1857}; 1858 1859struct efx_farch_filter_spec { 1860 u8 type:4; 1861 u8 priority:4; 1862 u8 flags; 1863 u16 dmaq_id; 1864 u32 data[3]; 1865}; 1866 1867struct efx_farch_filter_table { 1868 enum efx_farch_filter_table_id id; 1869 u32 offset; /* address of table relative to BAR */ 1870 unsigned size; /* number of entries */ 1871 unsigned step; /* step between entries */ 1872 unsigned used; /* number currently used */ 1873 unsigned long *used_bitmap; 1874 struct efx_farch_filter_spec *spec; 1875 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; 1876}; 1877 1878struct efx_farch_filter_state { 1879 struct rw_semaphore lock; /* Protects table contents */ 1880 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; 1881}; 1882 1883static void 1884efx_farch_filter_table_clear_entry(struct efx_nic *efx, 1885 struct efx_farch_filter_table *table, 1886 unsigned int filter_idx); 1887 1888/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 1889 * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 1890static u16 efx_farch_filter_hash(u32 key) 1891{ 1892 u16 tmp; 1893 1894 /* First 16 rounds */ 1895 tmp = 0x1fff ^ key >> 16; 1896 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1897 tmp = tmp ^ tmp >> 9; 1898 /* Last 16 rounds */ 1899 tmp = tmp ^ tmp << 13 ^ key; 1900 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1901 return tmp ^ tmp >> 9; 1902} 1903 1904/* To allow for hash collisions, filter search continues at these 1905 * increments from the first possible entry selected by the hash. */ 1906static u16 efx_farch_filter_increment(u32 key) 1907{ 1908 return key * 2 - 1; 1909} 1910 1911static enum efx_farch_filter_table_id 1912efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) 1913{ 1914 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1915 (EFX_FARCH_FILTER_TCP_FULL >> 2)); 1916 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1917 (EFX_FARCH_FILTER_TCP_WILD >> 2)); 1918 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1919 (EFX_FARCH_FILTER_UDP_FULL >> 2)); 1920 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1921 (EFX_FARCH_FILTER_UDP_WILD >> 2)); 1922 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1923 (EFX_FARCH_FILTER_MAC_FULL >> 2)); 1924 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1925 (EFX_FARCH_FILTER_MAC_WILD >> 2)); 1926 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != 1927 EFX_FARCH_FILTER_TABLE_RX_MAC + 2); 1928 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); 1929} 1930 1931static void efx_farch_filter_push_rx_config(struct efx_nic *efx) 1932{ 1933 struct efx_farch_filter_state *state = efx->filter_state; 1934 struct efx_farch_filter_table *table; 1935 efx_oword_t filter_ctl; 1936 1937 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 1938 1939 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 1940 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 1941 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + 1942 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1943 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 1944 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + 1945 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1946 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 1947 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + 1948 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1949 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 1950 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + 1951 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1952 1953 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 1954 if (table->size) { 1955 EFX_SET_OWORD_FIELD( 1956 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 1957 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 1958 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1959 EFX_SET_OWORD_FIELD( 1960 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 1961 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 1962 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1963 } 1964 1965 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 1966 if (table->size) { 1967 EFX_SET_OWORD_FIELD( 1968 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, 1969 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); 1970 EFX_SET_OWORD_FIELD( 1971 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, 1972 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 1973 EFX_FILTER_FLAG_RX_RSS)); 1974 EFX_SET_OWORD_FIELD( 1975 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 1976 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); 1977 EFX_SET_OWORD_FIELD( 1978 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 1979 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 1980 EFX_FILTER_FLAG_RX_RSS)); 1981 1982 /* There is a single bit to enable RX scatter for all 1983 * unmatched packets. Only set it if scatter is 1984 * enabled in both filter specs. 1985 */ 1986 EFX_SET_OWORD_FIELD( 1987 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1988 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 1989 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 1990 EFX_FILTER_FLAG_RX_SCATTER)); 1991 } else { 1992 /* We don't expose 'default' filters because unmatched 1993 * packets always go to the queue number found in the 1994 * RSS table. But we still need to set the RX scatter 1995 * bit here. 1996 */ 1997 EFX_SET_OWORD_FIELD( 1998 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1999 efx->rx_scatter); 2000 } 2001 2002 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 2003} 2004 2005static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) 2006{ 2007 struct efx_farch_filter_state *state = efx->filter_state; 2008 struct efx_farch_filter_table *table; 2009 efx_oword_t tx_cfg; 2010 2011 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); 2012 2013 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2014 if (table->size) { 2015 EFX_SET_OWORD_FIELD( 2016 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, 2017 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 2018 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 2019 EFX_SET_OWORD_FIELD( 2020 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, 2021 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 2022 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 2023 } 2024 2025 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); 2026} 2027 2028static int 2029efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, 2030 const struct efx_filter_spec *gen_spec) 2031{ 2032 bool is_full = false; 2033 2034 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) 2035 return -EINVAL; 2036 2037 spec->priority = gen_spec->priority; 2038 spec->flags = gen_spec->flags; 2039 spec->dmaq_id = gen_spec->dmaq_id; 2040 2041 switch (gen_spec->match_flags) { 2042 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2043 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 2044 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): 2045 is_full = true; 2046 /* fall through */ 2047 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2048 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { 2049 __be32 rhost, host1, host2; 2050 __be16 rport, port1, port2; 2051 2052 EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); 2053 2054 if (gen_spec->ether_type != htons(ETH_P_IP)) 2055 return -EPROTONOSUPPORT; 2056 if (gen_spec->loc_port == 0 || 2057 (is_full && gen_spec->rem_port == 0)) 2058 return -EADDRNOTAVAIL; 2059 switch (gen_spec->ip_proto) { 2060 case IPPROTO_TCP: 2061 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : 2062 EFX_FARCH_FILTER_TCP_WILD); 2063 break; 2064 case IPPROTO_UDP: 2065 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : 2066 EFX_FARCH_FILTER_UDP_WILD); 2067 break; 2068 default: 2069 return -EPROTONOSUPPORT; 2070 } 2071 2072 /* Filter is constructed in terms of source and destination, 2073 * with the odd wrinkle that the ports are swapped in a UDP 2074 * wildcard filter. We need to convert from local and remote 2075 * (= zero for wildcard) addresses. 2076 */ 2077 rhost = is_full ? gen_spec->rem_host[0] : 0; 2078 rport = is_full ? gen_spec->rem_port : 0; 2079 host1 = rhost; 2080 host2 = gen_spec->loc_host[0]; 2081 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { 2082 port1 = gen_spec->loc_port; 2083 port2 = rport; 2084 } else { 2085 port1 = rport; 2086 port2 = gen_spec->loc_port; 2087 } 2088 spec->data[0] = ntohl(host1) << 16 | ntohs(port1); 2089 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; 2090 spec->data[2] = ntohl(host2); 2091 2092 break; 2093 } 2094 2095 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: 2096 is_full = true; 2097 /* fall through */ 2098 case EFX_FILTER_MATCH_LOC_MAC: 2099 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : 2100 EFX_FARCH_FILTER_MAC_WILD); 2101 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; 2102 spec->data[1] = (gen_spec->loc_mac[2] << 24 | 2103 gen_spec->loc_mac[3] << 16 | 2104 gen_spec->loc_mac[4] << 8 | 2105 gen_spec->loc_mac[5]); 2106 spec->data[2] = (gen_spec->loc_mac[0] << 8 | 2107 gen_spec->loc_mac[1]); 2108 break; 2109 2110 case EFX_FILTER_MATCH_LOC_MAC_IG: 2111 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? 2112 EFX_FARCH_FILTER_MC_DEF : 2113 EFX_FARCH_FILTER_UC_DEF); 2114 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ 2115 break; 2116 2117 default: 2118 return -EPROTONOSUPPORT; 2119 } 2120 2121 return 0; 2122} 2123 2124static void 2125efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, 2126 const struct efx_farch_filter_spec *spec) 2127{ 2128 bool is_full = false; 2129 2130 /* *gen_spec should be completely initialised, to be consistent 2131 * with efx_filter_init_{rx,tx}() and in case we want to copy 2132 * it back to userland. 2133 */ 2134 memset(gen_spec, 0, sizeof(*gen_spec)); 2135 2136 gen_spec->priority = spec->priority; 2137 gen_spec->flags = spec->flags; 2138 gen_spec->dmaq_id = spec->dmaq_id; 2139 2140 switch (spec->type) { 2141 case EFX_FARCH_FILTER_TCP_FULL: 2142 case EFX_FARCH_FILTER_UDP_FULL: 2143 is_full = true; 2144 /* fall through */ 2145 case EFX_FARCH_FILTER_TCP_WILD: 2146 case EFX_FARCH_FILTER_UDP_WILD: { 2147 __be32 host1, host2; 2148 __be16 port1, port2; 2149 2150 gen_spec->match_flags = 2151 EFX_FILTER_MATCH_ETHER_TYPE | 2152 EFX_FILTER_MATCH_IP_PROTO | 2153 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; 2154 if (is_full) 2155 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | 2156 EFX_FILTER_MATCH_REM_PORT); 2157 gen_spec->ether_type = htons(ETH_P_IP); 2158 gen_spec->ip_proto = 2159 (spec->type == EFX_FARCH_FILTER_TCP_FULL || 2160 spec->type == EFX_FARCH_FILTER_TCP_WILD) ? 2161 IPPROTO_TCP : IPPROTO_UDP; 2162 2163 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); 2164 port1 = htons(spec->data[0]); 2165 host2 = htonl(spec->data[2]); 2166 port2 = htons(spec->data[1] >> 16); 2167 if (spec->flags & EFX_FILTER_FLAG_TX) { 2168 gen_spec->loc_host[0] = host1; 2169 gen_spec->rem_host[0] = host2; 2170 } else { 2171 gen_spec->loc_host[0] = host2; 2172 gen_spec->rem_host[0] = host1; 2173 } 2174 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ 2175 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { 2176 gen_spec->loc_port = port1; 2177 gen_spec->rem_port = port2; 2178 } else { 2179 gen_spec->loc_port = port2; 2180 gen_spec->rem_port = port1; 2181 } 2182 2183 break; 2184 } 2185 2186 case EFX_FARCH_FILTER_MAC_FULL: 2187 is_full = true; 2188 /* fall through */ 2189 case EFX_FARCH_FILTER_MAC_WILD: 2190 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; 2191 if (is_full) 2192 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; 2193 gen_spec->loc_mac[0] = spec->data[2] >> 8; 2194 gen_spec->loc_mac[1] = spec->data[2]; 2195 gen_spec->loc_mac[2] = spec->data[1] >> 24; 2196 gen_spec->loc_mac[3] = spec->data[1] >> 16; 2197 gen_spec->loc_mac[4] = spec->data[1] >> 8; 2198 gen_spec->loc_mac[5] = spec->data[1]; 2199 gen_spec->outer_vid = htons(spec->data[0]); 2200 break; 2201 2202 case EFX_FARCH_FILTER_UC_DEF: 2203 case EFX_FARCH_FILTER_MC_DEF: 2204 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; 2205 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; 2206 break; 2207 2208 default: 2209 WARN_ON(1); 2210 break; 2211 } 2212} 2213 2214static void 2215efx_farch_filter_init_rx_auto(struct efx_nic *efx, 2216 struct efx_farch_filter_spec *spec) 2217{ 2218 /* If there's only one channel then disable RSS for non VF 2219 * traffic, thereby allowing VFs to use RSS when the PF can't. 2220 */ 2221 spec->priority = EFX_FILTER_PRI_AUTO; 2222 spec->flags = (EFX_FILTER_FLAG_RX | 2223 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | 2224 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2225 spec->dmaq_id = 0; 2226} 2227 2228/* Build a filter entry and return its n-tuple key. */ 2229static u32 efx_farch_filter_build(efx_oword_t *filter, 2230 struct efx_farch_filter_spec *spec) 2231{ 2232 u32 data3; 2233 2234 switch (efx_farch_filter_spec_table_id(spec)) { 2235 case EFX_FARCH_FILTER_TABLE_RX_IP: { 2236 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || 2237 spec->type == EFX_FARCH_FILTER_UDP_WILD); 2238 EFX_POPULATE_OWORD_7( 2239 *filter, 2240 FRF_BZ_RSS_EN, 2241 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2242 FRF_BZ_SCATTER_EN, 2243 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2244 FRF_BZ_TCP_UDP, is_udp, 2245 FRF_BZ_RXQ_ID, spec->dmaq_id, 2246 EFX_DWORD_2, spec->data[2], 2247 EFX_DWORD_1, spec->data[1], 2248 EFX_DWORD_0, spec->data[0]); 2249 data3 = is_udp; 2250 break; 2251 } 2252 2253 case EFX_FARCH_FILTER_TABLE_RX_MAC: { 2254 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2255 EFX_POPULATE_OWORD_7( 2256 *filter, 2257 FRF_CZ_RMFT_RSS_EN, 2258 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2259 FRF_CZ_RMFT_SCATTER_EN, 2260 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2261 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 2262 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 2263 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 2264 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], 2265 FRF_CZ_RMFT_VLAN_ID, spec->data[0]); 2266 data3 = is_wild; 2267 break; 2268 } 2269 2270 case EFX_FARCH_FILTER_TABLE_TX_MAC: { 2271 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2272 EFX_POPULATE_OWORD_5(*filter, 2273 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, 2274 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, 2275 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], 2276 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], 2277 FRF_CZ_TMFT_VLAN_ID, spec->data[0]); 2278 data3 = is_wild | spec->dmaq_id << 1; 2279 break; 2280 } 2281 2282 default: 2283 BUG(); 2284 } 2285 2286 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; 2287} 2288 2289static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, 2290 const struct efx_farch_filter_spec *right) 2291{ 2292 if (left->type != right->type || 2293 memcmp(left->data, right->data, sizeof(left->data))) 2294 return false; 2295 2296 if (left->flags & EFX_FILTER_FLAG_TX && 2297 left->dmaq_id != right->dmaq_id) 2298 return false; 2299 2300 return true; 2301} 2302 2303/* 2304 * Construct/deconstruct external filter IDs. At least the RX filter 2305 * IDs must be ordered by matching priority, for RX NFC semantics. 2306 * 2307 * Deconstruction needs to be robust against invalid IDs so that 2308 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can 2309 * accept user-provided IDs. 2310 */ 2311 2312#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 2313 2314static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { 2315 [EFX_FARCH_FILTER_TCP_FULL] = 0, 2316 [EFX_FARCH_FILTER_UDP_FULL] = 0, 2317 [EFX_FARCH_FILTER_TCP_WILD] = 1, 2318 [EFX_FARCH_FILTER_UDP_WILD] = 1, 2319 [EFX_FARCH_FILTER_MAC_FULL] = 2, 2320 [EFX_FARCH_FILTER_MAC_WILD] = 3, 2321 [EFX_FARCH_FILTER_UC_DEF] = 4, 2322 [EFX_FARCH_FILTER_MC_DEF] = 4, 2323}; 2324 2325static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { 2326 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ 2327 EFX_FARCH_FILTER_TABLE_RX_IP, 2328 EFX_FARCH_FILTER_TABLE_RX_MAC, 2329 EFX_FARCH_FILTER_TABLE_RX_MAC, 2330 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ 2331 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ 2332 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ 2333}; 2334 2335#define EFX_FARCH_FILTER_INDEX_WIDTH 13 2336#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) 2337 2338static inline u32 2339efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, 2340 unsigned int index) 2341{ 2342 unsigned int range; 2343 2344 range = efx_farch_filter_type_match_pri[spec->type]; 2345 if (!(spec->flags & EFX_FILTER_FLAG_RX)) 2346 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; 2347 2348 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; 2349} 2350 2351static inline enum efx_farch_filter_table_id 2352efx_farch_filter_id_table_id(u32 id) 2353{ 2354 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; 2355 2356 if (range < ARRAY_SIZE(efx_farch_filter_range_table)) 2357 return efx_farch_filter_range_table[range]; 2358 else 2359 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ 2360} 2361 2362static inline unsigned int efx_farch_filter_id_index(u32 id) 2363{ 2364 return id & EFX_FARCH_FILTER_INDEX_MASK; 2365} 2366 2367u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) 2368{ 2369 struct efx_farch_filter_state *state = efx->filter_state; 2370 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; 2371 enum efx_farch_filter_table_id table_id; 2372 2373 do { 2374 table_id = efx_farch_filter_range_table[range]; 2375 if (state->table[table_id].size != 0) 2376 return range << EFX_FARCH_FILTER_INDEX_WIDTH | 2377 state->table[table_id].size; 2378 } while (range--); 2379 2380 return 0; 2381} 2382 2383s32 efx_farch_filter_insert(struct efx_nic *efx, 2384 struct efx_filter_spec *gen_spec, 2385 bool replace_equal) 2386{ 2387 struct efx_farch_filter_state *state = efx->filter_state; 2388 struct efx_farch_filter_table *table; 2389 struct efx_farch_filter_spec spec; 2390 efx_oword_t filter; 2391 int rep_index, ins_index; 2392 unsigned int depth = 0; 2393 int rc; 2394 2395 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); 2396 if (rc) 2397 return rc; 2398 2399 down_write(&state->lock); 2400 2401 table = &state->table[efx_farch_filter_spec_table_id(&spec)]; 2402 if (table->size == 0) { 2403 rc = -EINVAL; 2404 goto out_unlock; 2405 } 2406 2407 netif_vdbg(efx, hw, efx->net_dev, 2408 "%s: type %d search_limit=%d", __func__, spec.type, 2409 table->search_limit[spec.type]); 2410 2411 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2412 /* One filter spec per type */ 2413 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); 2414 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != 2415 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); 2416 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; 2417 ins_index = rep_index; 2418 } else { 2419 /* Search concurrently for 2420 * (1) a filter to be replaced (rep_index): any filter 2421 * with the same match values, up to the current 2422 * search depth for this type, and 2423 * (2) the insertion point (ins_index): (1) or any 2424 * free slot before it or up to the maximum search 2425 * depth for this priority 2426 * We fail if we cannot find (2). 2427 * 2428 * We can stop once either 2429 * (a) we find (1), in which case we have definitely 2430 * found (2) as well; or 2431 * (b) we have searched exhaustively for (1), and have 2432 * either found (2) or searched exhaustively for it 2433 */ 2434 u32 key = efx_farch_filter_build(&filter, &spec); 2435 unsigned int hash = efx_farch_filter_hash(key); 2436 unsigned int incr = efx_farch_filter_increment(key); 2437 unsigned int max_rep_depth = table->search_limit[spec.type]; 2438 unsigned int max_ins_depth = 2439 spec.priority <= EFX_FILTER_PRI_HINT ? 2440 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : 2441 EFX_FARCH_FILTER_CTL_SRCH_MAX; 2442 unsigned int i = hash & (table->size - 1); 2443 2444 ins_index = -1; 2445 depth = 1; 2446 2447 for (;;) { 2448 if (!test_bit(i, table->used_bitmap)) { 2449 if (ins_index < 0) 2450 ins_index = i; 2451 } else if (efx_farch_filter_equal(&spec, 2452 &table->spec[i])) { 2453 /* Case (a) */ 2454 if (ins_index < 0) 2455 ins_index = i; 2456 rep_index = i; 2457 break; 2458 } 2459 2460 if (depth >= max_rep_depth && 2461 (ins_index >= 0 || depth >= max_ins_depth)) { 2462 /* Case (b) */ 2463 if (ins_index < 0) { 2464 rc = -EBUSY; 2465 goto out_unlock; 2466 } 2467 rep_index = -1; 2468 break; 2469 } 2470 2471 i = (i + incr) & (table->size - 1); 2472 ++depth; 2473 } 2474 } 2475 2476 /* If we found a filter to be replaced, check whether we 2477 * should do so 2478 */ 2479 if (rep_index >= 0) { 2480 struct efx_farch_filter_spec *saved_spec = 2481 &table->spec[rep_index]; 2482 2483 if (spec.priority == saved_spec->priority && !replace_equal) { 2484 rc = -EEXIST; 2485 goto out_unlock; 2486 } 2487 if (spec.priority < saved_spec->priority) { 2488 rc = -EPERM; 2489 goto out_unlock; 2490 } 2491 if (saved_spec->priority == EFX_FILTER_PRI_AUTO || 2492 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) 2493 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 2494 } 2495 2496 /* Insert the filter */ 2497 if (ins_index != rep_index) { 2498 __set_bit(ins_index, table->used_bitmap); 2499 ++table->used; 2500 } 2501 table->spec[ins_index] = spec; 2502 2503 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2504 efx_farch_filter_push_rx_config(efx); 2505 } else { 2506 if (table->search_limit[spec.type] < depth) { 2507 table->search_limit[spec.type] = depth; 2508 if (spec.flags & EFX_FILTER_FLAG_TX) 2509 efx_farch_filter_push_tx_limits(efx); 2510 else 2511 efx_farch_filter_push_rx_config(efx); 2512 } 2513 2514 efx_writeo(efx, &filter, 2515 table->offset + table->step * ins_index); 2516 2517 /* If we were able to replace a filter by inserting 2518 * at a lower depth, clear the replaced filter 2519 */ 2520 if (ins_index != rep_index && rep_index >= 0) 2521 efx_farch_filter_table_clear_entry(efx, table, 2522 rep_index); 2523 } 2524 2525 netif_vdbg(efx, hw, efx->net_dev, 2526 "%s: filter type %d index %d rxq %u set", 2527 __func__, spec.type, ins_index, spec.dmaq_id); 2528 rc = efx_farch_filter_make_id(&spec, ins_index); 2529 2530out_unlock: 2531 up_write(&state->lock); 2532 return rc; 2533} 2534 2535static void 2536efx_farch_filter_table_clear_entry(struct efx_nic *efx, 2537 struct efx_farch_filter_table *table, 2538 unsigned int filter_idx) 2539{ 2540 static efx_oword_t filter; 2541 2542 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); 2543 BUG_ON(table->offset == 0); /* can't clear MAC default filters */ 2544 2545 __clear_bit(filter_idx, table->used_bitmap); 2546 --table->used; 2547 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 2548 2549 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); 2550 2551 /* If this filter required a greater search depth than 2552 * any other, the search limit for its type can now be 2553 * decreased. However, it is hard to determine that 2554 * unless the table has become completely empty - in 2555 * which case, all its search limits can be set to 0. 2556 */ 2557 if (unlikely(table->used == 0)) { 2558 memset(table->search_limit, 0, sizeof(table->search_limit)); 2559 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) 2560 efx_farch_filter_push_tx_limits(efx); 2561 else 2562 efx_farch_filter_push_rx_config(efx); 2563 } 2564} 2565 2566static int efx_farch_filter_remove(struct efx_nic *efx, 2567 struct efx_farch_filter_table *table, 2568 unsigned int filter_idx, 2569 enum efx_filter_priority priority) 2570{ 2571 struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; 2572 2573 if (!test_bit(filter_idx, table->used_bitmap) || 2574 spec->priority != priority) 2575 return -ENOENT; 2576 2577 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 2578 efx_farch_filter_init_rx_auto(efx, spec); 2579 efx_farch_filter_push_rx_config(efx); 2580 } else { 2581 efx_farch_filter_table_clear_entry(efx, table, filter_idx); 2582 } 2583 2584 return 0; 2585} 2586 2587int efx_farch_filter_remove_safe(struct efx_nic *efx, 2588 enum efx_filter_priority priority, 2589 u32 filter_id) 2590{ 2591 struct efx_farch_filter_state *state = efx->filter_state; 2592 enum efx_farch_filter_table_id table_id; 2593 struct efx_farch_filter_table *table; 2594 unsigned int filter_idx; 2595 struct efx_farch_filter_spec *spec; 2596 int rc; 2597 2598 table_id = efx_farch_filter_id_table_id(filter_id); 2599 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2600 return -ENOENT; 2601 table = &state->table[table_id]; 2602 2603 filter_idx = efx_farch_filter_id_index(filter_id); 2604 if (filter_idx >= table->size) 2605 return -ENOENT; 2606 down_write(&state->lock); 2607 spec = &table->spec[filter_idx]; 2608 2609 rc = efx_farch_filter_remove(efx, table, filter_idx, priority); 2610 up_write(&state->lock); 2611 2612 return rc; 2613} 2614 2615int efx_farch_filter_get_safe(struct efx_nic *efx, 2616 enum efx_filter_priority priority, 2617 u32 filter_id, struct efx_filter_spec *spec_buf) 2618{ 2619 struct efx_farch_filter_state *state = efx->filter_state; 2620 enum efx_farch_filter_table_id table_id; 2621 struct efx_farch_filter_table *table; 2622 struct efx_farch_filter_spec *spec; 2623 unsigned int filter_idx; 2624 int rc = -ENOENT; 2625 2626 down_read(&state->lock); 2627 2628 table_id = efx_farch_filter_id_table_id(filter_id); 2629 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2630 goto out_unlock; 2631 table = &state->table[table_id]; 2632 2633 filter_idx = efx_farch_filter_id_index(filter_id); 2634 if (filter_idx >= table->size) 2635 goto out_unlock; 2636 spec = &table->spec[filter_idx]; 2637 2638 if (test_bit(filter_idx, table->used_bitmap) && 2639 spec->priority == priority) { 2640 efx_farch_filter_to_gen_spec(spec_buf, spec); 2641 rc = 0; 2642 } 2643 2644out_unlock: 2645 up_read(&state->lock); 2646 return rc; 2647} 2648 2649static void 2650efx_farch_filter_table_clear(struct efx_nic *efx, 2651 enum efx_farch_filter_table_id table_id, 2652 enum efx_filter_priority priority) 2653{ 2654 struct efx_farch_filter_state *state = efx->filter_state; 2655 struct efx_farch_filter_table *table = &state->table[table_id]; 2656 unsigned int filter_idx; 2657 2658 down_write(&state->lock); 2659 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { 2660 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) 2661 efx_farch_filter_remove(efx, table, 2662 filter_idx, priority); 2663 } 2664 up_write(&state->lock); 2665} 2666 2667int efx_farch_filter_clear_rx(struct efx_nic *efx, 2668 enum efx_filter_priority priority) 2669{ 2670 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, 2671 priority); 2672 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, 2673 priority); 2674 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, 2675 priority); 2676 return 0; 2677} 2678 2679u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 2680 enum efx_filter_priority priority) 2681{ 2682 struct efx_farch_filter_state *state = efx->filter_state; 2683 enum efx_farch_filter_table_id table_id; 2684 struct efx_farch_filter_table *table; 2685 unsigned int filter_idx; 2686 u32 count = 0; 2687 2688 down_read(&state->lock); 2689 2690 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2691 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2692 table_id++) { 2693 table = &state->table[table_id]; 2694 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2695 if (test_bit(filter_idx, table->used_bitmap) && 2696 table->spec[filter_idx].priority == priority) 2697 ++count; 2698 } 2699 } 2700 2701 up_read(&state->lock); 2702 2703 return count; 2704} 2705 2706s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 2707 enum efx_filter_priority priority, 2708 u32 *buf, u32 size) 2709{ 2710 struct efx_farch_filter_state *state = efx->filter_state; 2711 enum efx_farch_filter_table_id table_id; 2712 struct efx_farch_filter_table *table; 2713 unsigned int filter_idx; 2714 s32 count = 0; 2715 2716 down_read(&state->lock); 2717 2718 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2719 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2720 table_id++) { 2721 table = &state->table[table_id]; 2722 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2723 if (test_bit(filter_idx, table->used_bitmap) && 2724 table->spec[filter_idx].priority == priority) { 2725 if (count == size) { 2726 count = -EMSGSIZE; 2727 goto out; 2728 } 2729 buf[count++] = efx_farch_filter_make_id( 2730 &table->spec[filter_idx], filter_idx); 2731 } 2732 } 2733 } 2734out: 2735 up_read(&state->lock); 2736 2737 return count; 2738} 2739 2740/* Restore filter stater after reset */ 2741void efx_farch_filter_table_restore(struct efx_nic *efx) 2742{ 2743 struct efx_farch_filter_state *state = efx->filter_state; 2744 enum efx_farch_filter_table_id table_id; 2745 struct efx_farch_filter_table *table; 2746 efx_oword_t filter; 2747 unsigned int filter_idx; 2748 2749 down_write(&state->lock); 2750 2751 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2752 table = &state->table[table_id]; 2753 2754 /* Check whether this is a regular register table */ 2755 if (table->step == 0) 2756 continue; 2757 2758 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2759 if (!test_bit(filter_idx, table->used_bitmap)) 2760 continue; 2761 efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2762 efx_writeo(efx, &filter, 2763 table->offset + table->step * filter_idx); 2764 } 2765 } 2766 2767 efx_farch_filter_push_rx_config(efx); 2768 efx_farch_filter_push_tx_limits(efx); 2769 2770 up_write(&state->lock); 2771} 2772 2773void efx_farch_filter_table_remove(struct efx_nic *efx) 2774{ 2775 struct efx_farch_filter_state *state = efx->filter_state; 2776 enum efx_farch_filter_table_id table_id; 2777 2778 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2779 kfree(state->table[table_id].used_bitmap); 2780 vfree(state->table[table_id].spec); 2781 } 2782 kfree(state); 2783} 2784 2785int efx_farch_filter_table_probe(struct efx_nic *efx) 2786{ 2787 struct efx_farch_filter_state *state; 2788 struct efx_farch_filter_table *table; 2789 unsigned table_id; 2790 2791 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); 2792 if (!state) 2793 return -ENOMEM; 2794 efx->filter_state = state; 2795 init_rwsem(&state->lock); 2796 2797 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2798 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2799 table->offset = FR_BZ_RX_FILTER_TBL0; 2800 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 2801 table->step = FR_BZ_RX_FILTER_TBL0_STEP; 2802 2803 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 2804 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; 2805 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 2806 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 2807 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 2808 2809 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 2810 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; 2811 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; 2812 2813 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2814 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; 2815 table->offset = FR_CZ_TX_MAC_FILTER_TBL0; 2816 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; 2817 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; 2818 2819 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2820 table = &state->table[table_id]; 2821 if (table->size == 0) 2822 continue; 2823 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), 2824 sizeof(unsigned long), 2825 GFP_KERNEL); 2826 if (!table->used_bitmap) 2827 goto fail; 2828 table->spec = vzalloc(array_size(sizeof(*table->spec), 2829 table->size)); 2830 if (!table->spec) 2831 goto fail; 2832 } 2833 2834 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 2835 if (table->size) { 2836 /* RX default filters must always exist */ 2837 struct efx_farch_filter_spec *spec; 2838 unsigned i; 2839 2840 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { 2841 spec = &table->spec[i]; 2842 spec->type = EFX_FARCH_FILTER_UC_DEF + i; 2843 efx_farch_filter_init_rx_auto(efx, spec); 2844 __set_bit(i, table->used_bitmap); 2845 } 2846 } 2847 2848 efx_farch_filter_push_rx_config(efx); 2849 2850 return 0; 2851 2852fail: 2853 efx_farch_filter_table_remove(efx); 2854 return -ENOMEM; 2855} 2856 2857/* Update scatter enable flags for filters pointing to our own RX queues */ 2858void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) 2859{ 2860 struct efx_farch_filter_state *state = efx->filter_state; 2861 enum efx_farch_filter_table_id table_id; 2862 struct efx_farch_filter_table *table; 2863 efx_oword_t filter; 2864 unsigned int filter_idx; 2865 2866 down_write(&state->lock); 2867 2868 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2869 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2870 table_id++) { 2871 table = &state->table[table_id]; 2872 2873 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2874 if (!test_bit(filter_idx, table->used_bitmap) || 2875 table->spec[filter_idx].dmaq_id >= 2876 efx->n_rx_channels) 2877 continue; 2878 2879 if (efx->rx_scatter) 2880 table->spec[filter_idx].flags |= 2881 EFX_FILTER_FLAG_RX_SCATTER; 2882 else 2883 table->spec[filter_idx].flags &= 2884 ~EFX_FILTER_FLAG_RX_SCATTER; 2885 2886 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) 2887 /* Pushed by efx_farch_filter_push_rx_config() */ 2888 continue; 2889 2890 efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2891 efx_writeo(efx, &filter, 2892 table->offset + table->step * filter_idx); 2893 } 2894 } 2895 2896 efx_farch_filter_push_rx_config(efx); 2897 2898 up_write(&state->lock); 2899} 2900 2901#ifdef CONFIG_RFS_ACCEL 2902 2903bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 2904 unsigned int index) 2905{ 2906 struct efx_farch_filter_state *state = efx->filter_state; 2907 struct efx_farch_filter_table *table; 2908 bool ret = false, force = false; 2909 u16 arfs_id; 2910 2911 down_write(&state->lock); 2912 spin_lock_bh(&efx->rps_hash_lock); 2913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2914 if (test_bit(index, table->used_bitmap) && 2915 table->spec[index].priority == EFX_FILTER_PRI_HINT) { 2916 struct efx_arfs_rule *rule = NULL; 2917 struct efx_filter_spec spec; 2918 2919 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); 2920 if (!efx->rps_hash_table) { 2921 /* In the absence of the table, we always returned 0 to 2922 * ARFS, so use the same to query it. 2923 */ 2924 arfs_id = 0; 2925 } else { 2926 rule = efx_rps_hash_find(efx, &spec); 2927 if (!rule) { 2928 /* ARFS table doesn't know of this filter, remove it */ 2929 force = true; 2930 } else { 2931 arfs_id = rule->arfs_id; 2932 if (!efx_rps_check_rule(rule, index, &force)) 2933 goto out_unlock; 2934 } 2935 } 2936 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, 2937 flow_id, arfs_id)) { 2938 if (rule) 2939 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 2940 efx_rps_hash_del(efx, &spec); 2941 efx_farch_filter_table_clear_entry(efx, table, index); 2942 ret = true; 2943 } 2944 } 2945out_unlock: 2946 spin_unlock_bh(&efx->rps_hash_lock); 2947 up_write(&state->lock); 2948 return ret; 2949} 2950 2951#endif /* CONFIG_RFS_ACCEL */ 2952 2953void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) 2954{ 2955 struct net_device *net_dev = efx->net_dev; 2956 struct netdev_hw_addr *ha; 2957 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 2958 u32 crc; 2959 int bit; 2960 2961 if (!efx_dev_registered(efx)) 2962 return; 2963 2964 netif_addr_lock_bh(net_dev); 2965 2966 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2967 2968 /* Build multicast hash table */ 2969 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2970 memset(mc_hash, 0xff, sizeof(*mc_hash)); 2971 } else { 2972 memset(mc_hash, 0x00, sizeof(*mc_hash)); 2973 netdev_for_each_mc_addr(ha, net_dev) { 2974 crc = ether_crc_le(ETH_ALEN, ha->addr); 2975 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 2976 __set_bit_le(bit, mc_hash); 2977 } 2978 2979 /* Broadcast packets go through the multicast hash filter. 2980 * ether_crc_le() of the broadcast address is 0xbe2612ff 2981 * so we always add bit 0xff to the mask. 2982 */ 2983 __set_bit_le(0xff, mc_hash); 2984 } 2985 2986 netif_addr_unlock_bh(net_dev); 2987}