Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gve: DQO: avoid unused variable warnings

The use of dma_unmap_addr()/dma_unmap_len() in the driver causes
multiple warnings when these macros are defined as empty, e.g.
in an ARCH=i386 allmodconfig build:

drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_add_skb_no_copy_dqo':
drivers/net/ethernet/google/gve/gve_tx_dqo.c:494:40: error: unused variable 'buf' [-Werror=unused-variable]
494 | struct gve_tx_dma_buf *buf =

This is not how the NEED_DMA_MAP_STATE macros are meant to work,
as they rely on never using local variables or a temporary structure
like gve_tx_dma_buf.

Remote the gve_tx_dma_buf definition and open-code the contents
in all places to avoid the warning. This causes some rather long
lines but otherwise ends up making the driver slightly smaller.

Fixes: a57e5de476be ("gve: DQO: Add TX path")
Link: https://lore.kernel.org/netdev/20210723231957.1113800-1-bcf@google.com/
Link: https://lore.kernel.org/netdev/20210721151100.2042139-1-arnd@kernel.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnd Bergmann and committed by
David S. Miller
1e0083bd af3826db

+54 -66
+6 -7
drivers/net/ethernet/google/gve/gve.h
··· 224 224 u32 iov_padding; /* padding associated with this segment */ 225 225 }; 226 226 227 - struct gve_tx_dma_buf { 228 - DEFINE_DMA_UNMAP_ADDR(dma); 229 - DEFINE_DMA_UNMAP_LEN(len); 230 - }; 231 - 232 227 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc 233 228 * ring entry but only used for a pkt_desc not a seg_desc 234 229 */ ··· 231 236 struct sk_buff *skb; /* skb for this pkt */ 232 237 union { 233 238 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ 234 - struct gve_tx_dma_buf buf; 239 + struct { 240 + DEFINE_DMA_UNMAP_ADDR(dma); 241 + DEFINE_DMA_UNMAP_LEN(len); 242 + }; 235 243 }; 236 244 }; 237 245 ··· 278 280 * All others correspond to `skb`'s frags and should be unmapped with 279 281 * `dma_unmap_page`. 280 282 */ 281 - struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1]; 283 + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); 284 + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); 282 285 u16 num_bufs; 283 286 284 287 /* Linked list index to next element in the list, or -1 if none */
+10 -13
drivers/net/ethernet/google/gve/gve_tx.c
··· 303 303 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) 304 304 { 305 305 if (info->skb) { 306 - dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), 307 - dma_unmap_len(&info->buf, len), 306 + dma_unmap_single(dev, dma_unmap_addr(info, dma), 307 + dma_unmap_len(info, len), 308 308 DMA_TO_DEVICE); 309 - dma_unmap_len_set(&info->buf, len, 0); 309 + dma_unmap_len_set(info, len, 0); 310 310 } else { 311 - dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), 312 - dma_unmap_len(&info->buf, len), 311 + dma_unmap_page(dev, dma_unmap_addr(info, dma), 312 + dma_unmap_len(info, len), 313 313 DMA_TO_DEVICE); 314 - dma_unmap_len_set(&info->buf, len, 0); 314 + dma_unmap_len_set(info, len, 0); 315 315 } 316 316 } 317 317 ··· 491 491 struct gve_tx_buffer_state *info; 492 492 bool is_gso = skb_is_gso(skb); 493 493 u32 idx = tx->req & tx->mask; 494 - struct gve_tx_dma_buf *buf; 495 494 u64 addr; 496 495 u32 len; 497 496 int i; ··· 514 515 tx->dma_mapping_error++; 515 516 goto drop; 516 517 } 517 - buf = &info->buf; 518 - dma_unmap_len_set(buf, len, len); 519 - dma_unmap_addr_set(buf, dma, addr); 518 + dma_unmap_len_set(info, len, len); 519 + dma_unmap_addr_set(info, dma, addr); 520 520 521 521 payload_nfrags = shinfo->nr_frags; 522 522 if (hlen < len) { ··· 547 549 tx->dma_mapping_error++; 548 550 goto unmap_drop; 549 551 } 550 - buf = &tx->info[idx].buf; 551 552 tx->info[idx].skb = NULL; 552 - dma_unmap_len_set(buf, len, len); 553 - dma_unmap_addr_set(buf, dma, addr); 553 + dma_unmap_len_set(&tx->info[idx], len, len); 554 + dma_unmap_addr_set(&tx->info[idx], dma, addr); 554 555 555 556 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); 556 557 }
+38 -46
drivers/net/ethernet/google/gve/gve_tx_dqo.c
··· 85 85 int j; 86 86 87 87 for (j = 0; j < cur_state->num_bufs; j++) { 88 - struct gve_tx_dma_buf *buf = &cur_state->bufs[j]; 89 - 90 88 if (j == 0) { 91 89 dma_unmap_single(tx->dev, 92 - dma_unmap_addr(buf, dma), 93 - dma_unmap_len(buf, len), 94 - DMA_TO_DEVICE); 90 + dma_unmap_addr(cur_state, dma[j]), 91 + dma_unmap_len(cur_state, len[j]), 92 + DMA_TO_DEVICE); 95 93 } else { 96 94 dma_unmap_page(tx->dev, 97 - dma_unmap_addr(buf, dma), 98 - dma_unmap_len(buf, len), 99 - DMA_TO_DEVICE); 95 + dma_unmap_addr(cur_state, dma[j]), 96 + dma_unmap_len(cur_state, len[j]), 97 + DMA_TO_DEVICE); 100 98 } 101 99 } 102 100 if (cur_state->skb) { ··· 455 457 const bool is_gso = skb_is_gso(skb); 456 458 u32 desc_idx = tx->dqo_tx.tail; 457 459 458 - struct gve_tx_pending_packet_dqo *pending_packet; 460 + struct gve_tx_pending_packet_dqo *pkt; 459 461 struct gve_tx_metadata_dqo metadata; 460 462 s16 completion_tag; 461 463 int i; 462 464 463 - pending_packet = gve_alloc_pending_packet(tx); 464 - pending_packet->skb = skb; 465 - pending_packet->num_bufs = 0; 466 - completion_tag = pending_packet - tx->dqo.pending_packets; 465 + pkt = gve_alloc_pending_packet(tx); 466 + pkt->skb = skb; 467 + pkt->num_bufs = 0; 468 + completion_tag = pkt - tx->dqo.pending_packets; 467 469 468 470 gve_extract_tx_metadata_dqo(skb, &metadata); 469 471 if (is_gso) { ··· 491 493 492 494 /* Map the linear portion of skb */ 493 495 { 494 - struct gve_tx_dma_buf *buf = 495 - &pending_packet->bufs[pending_packet->num_bufs]; 496 496 u32 len = skb_headlen(skb); 497 497 dma_addr_t addr; 498 498 ··· 498 502 if (unlikely(dma_mapping_error(tx->dev, addr))) 499 503 goto err; 500 504 501 - dma_unmap_len_set(buf, len, len); 502 - dma_unmap_addr_set(buf, dma, addr); 503 - ++pending_packet->num_bufs; 505 + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); 506 + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); 507 + ++pkt->num_bufs; 504 508 505 509 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, 506 510 completion_tag, ··· 508 512 } 509 513 510 514 for (i = 0; i < shinfo->nr_frags; i++) { 511 - struct gve_tx_dma_buf *buf = 512 - &pending_packet->bufs[pending_packet->num_bufs]; 513 515 const skb_frag_t *frag = &shinfo->frags[i]; 514 516 bool is_eop = i == (shinfo->nr_frags - 1); 515 517 u32 len = skb_frag_size(frag); ··· 517 523 if (unlikely(dma_mapping_error(tx->dev, addr))) 518 524 goto err; 519 525 520 - dma_unmap_len_set(buf, len, len); 521 - dma_unmap_addr_set(buf, dma, addr); 522 - ++pending_packet->num_bufs; 526 + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); 527 + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); 528 + ++pkt->num_bufs; 523 529 524 530 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, 525 531 completion_tag, is_eop, is_gso); ··· 546 552 return 0; 547 553 548 554 err: 549 - for (i = 0; i < pending_packet->num_bufs; i++) { 550 - struct gve_tx_dma_buf *buf = &pending_packet->bufs[i]; 551 - 555 + for (i = 0; i < pkt->num_bufs; i++) { 552 556 if (i == 0) { 553 - dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma), 554 - dma_unmap_len(buf, len), 557 + dma_unmap_single(tx->dev, 558 + dma_unmap_addr(pkt, dma[i]), 559 + dma_unmap_len(pkt, len[i]), 555 560 DMA_TO_DEVICE); 556 561 } else { 557 - dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma), 558 - dma_unmap_len(buf, len), DMA_TO_DEVICE); 562 + dma_unmap_page(tx->dev, 563 + dma_unmap_addr(pkt, dma[i]), 564 + dma_unmap_len(pkt, len[i]), 565 + DMA_TO_DEVICE); 559 566 } 560 567 } 561 568 562 - pending_packet->skb = NULL; 563 - pending_packet->num_bufs = 0; 564 - gve_free_pending_packet(tx, pending_packet); 569 + pkt->skb = NULL; 570 + pkt->num_bufs = 0; 571 + gve_free_pending_packet(tx, pkt); 565 572 566 573 return -1; 567 574 } ··· 720 725 721 726 static void remove_from_list(struct gve_tx_ring *tx, 722 727 struct gve_index_list *list, 723 - struct gve_tx_pending_packet_dqo *pending_packet) 728 + struct gve_tx_pending_packet_dqo *pkt) 724 729 { 725 730 s16 prev_index, next_index; 726 731 727 - prev_index = pending_packet->prev; 728 - next_index = pending_packet->next; 732 + prev_index = pkt->prev; 733 + next_index = pkt->next; 729 734 730 735 if (prev_index == -1) { 731 736 /* Node is head */ ··· 742 747 } 743 748 744 749 static void gve_unmap_packet(struct device *dev, 745 - struct gve_tx_pending_packet_dqo *pending_packet) 750 + struct gve_tx_pending_packet_dqo *pkt) 746 751 { 747 - struct gve_tx_dma_buf *buf; 748 752 int i; 749 753 750 754 /* SKB linear portion is guaranteed to be mapped */ 751 - buf = &pending_packet->bufs[0]; 752 - dma_unmap_single(dev, dma_unmap_addr(buf, dma), 753 - dma_unmap_len(buf, len), DMA_TO_DEVICE); 754 - for (i = 1; i < pending_packet->num_bufs; i++) { 755 - buf = &pending_packet->bufs[i]; 756 - dma_unmap_page(dev, dma_unmap_addr(buf, dma), 757 - dma_unmap_len(buf, len), DMA_TO_DEVICE); 755 + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), 756 + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); 757 + for (i = 1; i < pkt->num_bufs; i++) { 758 + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), 759 + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); 758 760 } 759 - pending_packet->num_bufs = 0; 761 + pkt->num_bufs = 0; 760 762 } 761 763 762 764 /* Completion types and expected behavior: