Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sfc: Remove struct efx_special_buffer

The attributes index and entries are no longer needed, so use
struct efx_buffer instead.
next_buffer_table was also Siena specific.
Removed some checkpatch warnings.

Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
Acked-by: Edward Cree <ecree.xilinx@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Martin Habets and committed by
David S. Miller
d73e7715 a847431c

+25 -75
+1 -1
drivers/net/ethernet/sfc/ef10.c
··· 2209 2209 /* low two bits of label are what we want for type */ 2210 2210 BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3); 2211 2211 tx_queue->type = tx_queue->label & 3; 2212 - return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2212 + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, 2213 2213 (tx_queue->ptr_mask + 1) * 2214 2214 sizeof(efx_qword_t), 2215 2215 GFP_KERNEL);
+1 -1
drivers/net/ethernet/sfc/ef100_nic.c
··· 224 224 static int ef100_ev_probe(struct efx_channel *channel) 225 225 { 226 226 /* Allocate an extra descriptor for the QMDA status completion entry */ 227 - return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 227 + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, 228 228 (channel->eventq_mask + 2) * 229 229 sizeof(efx_qword_t), 230 230 GFP_KERNEL);
+3 -3
drivers/net/ethernet/sfc/ef100_tx.c
··· 23 23 int ef100_tx_probe(struct efx_tx_queue *tx_queue) 24 24 { 25 25 /* Allocate an extra descriptor for the QMDA status completion entry */ 26 - return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 26 + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd, 27 27 (tx_queue->ptr_mask + 2) * 28 28 sizeof(efx_oword_t), 29 29 GFP_KERNEL); ··· 101 101 102 102 static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 103 103 { 104 - if (likely(tx_queue->txd.buf.addr)) 105 - return ((efx_oword_t *)tx_queue->txd.buf.addr) + index; 104 + if (likely(tx_queue->txd.addr)) 105 + return ((efx_oword_t *)tx_queue->txd.addr) + index; 106 106 else 107 107 return NULL; 108 108 }
+1 -29
drivers/net/ethernet/sfc/efx_channels.c
··· 713 713 struct efx_channel *channel; 714 714 int rc; 715 715 716 - /* Restart special buffer allocation */ 717 - efx->next_buffer_table = 0; 718 - 719 716 /* Probe channels in reverse, so that any 'extra' channels 720 717 * use the start of the buffer table. This allows the traffic 721 718 * channels to be resized without moving them or wasting the ··· 846 849 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel, 847 850 *ptp_channel = efx_ptp_channel(efx); 848 851 struct efx_ptp_data *ptp_data = efx->ptp_data; 849 - unsigned int i, next_buffer_table = 0; 850 852 u32 old_rxq_entries, old_txq_entries; 853 + unsigned int i; 851 854 int rc, rc2; 852 855 853 856 rc = efx_check_disabled(efx); 854 857 if (rc) 855 858 return rc; 856 - 857 - /* Not all channels should be reallocated. We must avoid 858 - * reallocating their buffer table entries. 859 - */ 860 - efx_for_each_channel(channel, efx) { 861 - struct efx_rx_queue *rx_queue; 862 - struct efx_tx_queue *tx_queue; 863 - 864 - if (channel->type->copy) 865 - continue; 866 - next_buffer_table = max(next_buffer_table, 867 - channel->eventq.index + 868 - channel->eventq.entries); 869 - efx_for_each_channel_rx_queue(rx_queue, channel) 870 - next_buffer_table = max(next_buffer_table, 871 - rx_queue->rxd.index + 872 - rx_queue->rxd.entries); 873 - efx_for_each_channel_tx_queue(tx_queue, channel) 874 - next_buffer_table = max(next_buffer_table, 875 - tx_queue->txd.index + 876 - tx_queue->txd.entries); 877 - } 878 859 879 860 efx_device_detach_sync(efx); 880 861 efx_stop_all(efx); ··· 878 903 efx->txq_entries = txq_entries; 879 904 for (i = 0; i < efx->n_channels; i++) 880 905 swap(efx->channel[i], other_channel[i]); 881 - 882 - /* Restart buffer table allocation */ 883 - efx->next_buffer_table = next_buffer_table; 884 906 885 907 for (i = 0; i < efx->n_channels; i++) { 886 908 channel = efx->channel[i];
+12 -12
drivers/net/ethernet/sfc/mcdi_functions.c
··· 62 62 63 63 int efx_mcdi_ev_probe(struct efx_channel *channel) 64 64 { 65 - return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 65 + return efx_nic_alloc_buffer(channel->efx, &channel->eventq, 66 66 (channel->eventq_mask + 1) * 67 67 sizeof(efx_qword_t), 68 68 GFP_KERNEL); ··· 74 74 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / 75 75 EFX_BUF_SIZE)); 76 76 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); 77 - size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; 77 + size_t entries = channel->eventq.len / EFX_BUF_SIZE; 78 78 struct efx_nic *efx = channel->efx; 79 79 size_t inlen, outlen; 80 80 dma_addr_t dma_addr; 81 81 int rc, i; 82 82 83 83 /* Fill event queue with all ones (i.e. empty events) */ 84 - memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 84 + memset(channel->eventq.addr, 0xff, channel->eventq.len); 85 85 86 86 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); 87 87 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); ··· 112 112 INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru); 113 113 } 114 114 115 - dma_addr = channel->eventq.buf.dma_addr; 115 + dma_addr = channel->eventq.dma_addr; 116 116 for (i = 0; i < entries; ++i) { 117 117 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); 118 118 dma_addr += EFX_BUF_SIZE; ··· 134 134 135 135 void efx_mcdi_ev_remove(struct efx_channel *channel) 136 136 { 137 - efx_nic_free_buffer(channel->efx, &channel->eventq.buf); 137 + efx_nic_free_buffer(channel->efx, &channel->eventq); 138 138 } 139 139 140 140 void efx_mcdi_ev_fini(struct efx_channel *channel) ··· 166 166 EFX_BUF_SIZE)); 167 167 bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; 168 168 bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM; 169 - size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; 169 + size_t entries = tx_queue->txd.len / EFX_BUF_SIZE; 170 170 struct efx_channel *channel = tx_queue->channel; 171 171 struct efx_nic *efx = tx_queue->efx; 172 172 dma_addr_t dma_addr; ··· 182 182 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); 183 183 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); 184 184 185 - dma_addr = tx_queue->txd.buf.dma_addr; 185 + dma_addr = tx_queue->txd.dma_addr; 186 186 187 187 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", 188 188 tx_queue->queue, entries, (u64)dma_addr); ··· 240 240 241 241 void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue) 242 242 { 243 - efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); 243 + efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd); 244 244 } 245 245 246 246 void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue) ··· 269 269 270 270 int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) 271 271 { 272 - return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, 272 + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd, 273 273 (rx_queue->ptr_mask + 1) * 274 274 sizeof(efx_qword_t), 275 275 GFP_KERNEL); ··· 278 278 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) 279 279 { 280 280 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 281 - size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; 281 + size_t entries = rx_queue->rxd.len / EFX_BUF_SIZE; 282 282 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); 283 283 struct efx_nic *efx = rx_queue->efx; 284 284 unsigned int buffer_size; ··· 306 306 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id); 307 307 MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size); 308 308 309 - dma_addr = rx_queue->rxd.buf.dma_addr; 309 + dma_addr = rx_queue->rxd.dma_addr; 310 310 311 311 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", 312 312 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); ··· 325 325 326 326 void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue) 327 327 { 328 - efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); 328 + efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd); 329 329 } 330 330 331 331 void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
+3 -25
drivers/net/ethernet/sfc/net_driver.h
··· 123 123 }; 124 124 125 125 /** 126 - * struct efx_special_buffer - DMA buffer entered into buffer table 127 - * @buf: Standard &struct efx_buffer 128 - * @index: Buffer index within controller;s buffer table 129 - * @entries: Number of buffer table entries 130 - * 131 - * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. 132 - * Event and descriptor rings are addressed via one or more buffer 133 - * table entries (and so can be physically non-contiguous, although we 134 - * currently do not take advantage of that). On Falcon and Siena we 135 - * have to take care of allocating and initialising the entries 136 - * ourselves. On later hardware this is managed by the firmware and 137 - * @index and @entries are left as 0. 138 - */ 139 - struct efx_special_buffer { 140 - struct efx_buffer buf; 141 - unsigned int index; 142 - unsigned int entries; 143 - }; 144 - 145 - /** 146 126 * struct efx_tx_buffer - buffer state for a TX descriptor 147 127 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be 148 128 * freed when descriptor completes ··· 248 268 struct netdev_queue *core_txq; 249 269 struct efx_tx_buffer *buffer; 250 270 struct efx_buffer *cb_page; 251 - struct efx_special_buffer txd; 271 + struct efx_buffer txd; 252 272 unsigned int ptr_mask; 253 273 void __iomem *piobuf; 254 274 unsigned int piobuf_offset; ··· 377 397 struct efx_nic *efx; 378 398 int core_index; 379 399 struct efx_rx_buffer *buffer; 380 - struct efx_special_buffer rxd; 400 + struct efx_buffer rxd; 381 401 unsigned int ptr_mask; 382 402 bool refill_enabled; 383 403 bool flush_pending; ··· 493 513 #ifdef CONFIG_NET_RX_BUSY_POLL 494 514 unsigned long busy_poll_state; 495 515 #endif 496 - struct efx_special_buffer eventq; 516 + struct efx_buffer eventq; 497 517 unsigned int eventq_mask; 498 518 unsigned int eventq_read_ptr; 499 519 int event_test_cpu; ··· 861 881 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 862 882 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 863 883 * @sram_lim_qw: Qword address limit of SRAM 864 - * @next_buffer_table: First available buffer table id 865 884 * @n_channels: Number of channels in use 866 885 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 867 886 * @n_tx_channels: Number of channels used for TX ··· 1025 1046 unsigned tx_dc_base; 1026 1047 unsigned rx_dc_base; 1027 1048 unsigned sram_lim_qw; 1028 - unsigned next_buffer_table; 1029 1049 1030 1050 unsigned int max_channels; 1031 1051 unsigned int max_vis;
+3 -3
drivers/net/ethernet/sfc/nic_common.h
··· 32 32 static inline efx_qword_t *efx_event(struct efx_channel *channel, 33 33 unsigned int index) 34 34 { 35 - return ((efx_qword_t *) (channel->eventq.buf.addr)) + 35 + return ((efx_qword_t *)(channel->eventq.addr)) + 36 36 (index & channel->eventq_mask); 37 37 } 38 38 ··· 58 58 static inline efx_qword_t * 59 59 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 60 60 { 61 - return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 61 + return ((efx_qword_t *)(tx_queue->txd.addr)) + index; 62 62 } 63 63 64 64 /* Report whether this TX queue would be empty for the given write_count. ··· 98 98 static inline efx_qword_t * 99 99 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 100 100 { 101 - return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; 101 + return ((efx_qword_t *)(rx_queue->rxd.addr)) + index; 102 102 } 103 103 104 104 /* Alignment of PCIe DMA boundaries (4KB) */
+1 -1
drivers/net/ethernet/sfc/tx_tso.c
··· 85 85 prefetch(ptr); 86 86 prefetch(ptr + 0x80); 87 87 88 - ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr); 88 + ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr); 89 89 prefetch(ptr); 90 90 prefetch(ptr + 0x80); 91 91 }