Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iw_cxgb4/cxgb4/cxgb4vf/cxgb4i/csiostor: Cleanup register defines/macros related to all other cpl messages

This patch cleanups all other macros/register define related to
CPL messages that are defined in t4_msg.h and the affected files

Signed-off-by: Anish Bhatt <anish@chelsio.com>
Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Hariprasad Shenai and committed by
David S. Miller
bdc590b9 6c53e938

+169 -85
+10 -10
drivers/infiniband/hw/cxgb4/cm.c
··· 3501 3501 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3502 3502 memset(req, 0, sizeof(*req)); 3503 3503 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 3504 - V_SYN_MAC_IDX(G_RX_MACIDX( 3504 + V_SYN_MAC_IDX(RX_MACIDX_G( 3505 3505 (__force int) htonl(l2info))) | 3506 3506 F_SYN_XACT_MATCH); 3507 3507 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3508 - G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 3509 - G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 3510 - req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 3508 + RX_ETHHDR_LEN_G((__force int)htonl(l2info)) : 3509 + RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info)); 3510 + req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(RX_CHAN_G( 3511 3511 (__force int) htonl(l2info))) | 3512 - V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 3512 + V_TCP_HDR_LEN(RX_TCPHDR_LEN_G( 3513 3513 (__force int) htons(hdr_len))) | 3514 - V_IP_HDR_LEN(G_RX_IPHDR_LEN( 3514 + V_IP_HDR_LEN(RX_IPHDR_LEN_G( 3515 3515 (__force int) htons(hdr_len))) | 3516 - V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 3516 + V_ETH_HDR_LEN(RX_ETHHDR_LEN_G(eth_hdr_len))); 3517 3517 req->vlan = (__force __be16) vlantag; 3518 3518 req->len = (__force __be16) len; 3519 3519 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | ··· 3613 3613 struct neighbour *neigh; 3614 3614 3615 3615 /* Drop all non-SYN packets */ 3616 - if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3616 + if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) 3617 3617 goto reject; 3618 3618 3619 3619 /* ··· 3635 3635 } 3636 3636 3637 3637 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3638 - G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3639 - G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3638 + RX_ETHHDR_LEN_G(htonl(cpl->l2info)) : 3639 + RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info)); 3640 3640 if (eth_hdr_len == ETH_HLEN) { 3641 3641 eh = (struct ethhdr *)(req + 1); 3642 3642 iph = (struct iphdr *)(eh + 1);
+2 -2
drivers/infiniband/hw/cxgb4/mem.c
··· 86 86 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; 87 87 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); 88 88 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); 89 - req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); 89 + req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1)); 90 90 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); 91 91 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); 92 92 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); 93 93 94 94 sgl = (struct ulptx_sgl *)(req + 1); 95 95 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 96 - ULPTX_NSGE(1)); 96 + ULPTX_NSGE_V(1)); 97 97 sgl->len0 = cpu_to_be32(len); 98 98 sgl->addr0 = cpu_to_be64(data); 99 99
+4 -4
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 672 672 if (idx >= adap->tids.ftid_base && nidx < 673 673 (adap->tids.nftids + adap->tids.nsftids)) { 674 674 idx = nidx; 675 - ret = GET_TCB_COOKIE(rpl->cookie); 675 + ret = TCB_COOKIE_G(rpl->cookie); 676 676 f = &adap->tids.ftid_tab[idx]; 677 677 678 678 if (ret == FW_FILTER_WR_FLT_DELETED) { ··· 724 724 725 725 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 726 726 const struct cpl_sge_egr_update *p = (void *)rsp; 727 - unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 727 + unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); 728 728 struct sge_txq *txq; 729 729 730 730 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; ··· 3483 3483 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); 3484 3484 INIT_TP_WR(req, 0); 3485 3485 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); 3486 - req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : 3487 - LISTSVR_IPV6(0)) | QUEUENO(queue)); 3486 + req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : 3487 + LISTSVR_IPV6_V(0)) | QUEUENO_V(queue)); 3488 3488 ret = t4_mgmt_tx(adap, skb); 3489 3489 return net_xmit_eval(ret); 3490 3490 }
+1 -1
drivers/net/ethernet/chelsio/cxgb4/l2t.c
··· 152 152 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, 153 153 e->idx | (sync ? F_SYNC_WR : 0) | 154 154 TID_QID_V(adap->sge.fw_evtq.abs_id))); 155 - req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 155 + req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); 156 156 req->l2t_idx = htons(e->idx); 157 157 req->vlan = htons(e->vlan); 158 158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
+5 -4
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 821 821 sgl->addr0 = cpu_to_be64(addr[1]); 822 822 } 823 823 824 - sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); 824 + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 825 + ULPTX_NSGE_V(nfrags)); 825 826 if (likely(--nfrags == 0)) 826 827 return; 827 828 /* ··· 1762 1761 pkt = (const struct cpl_rx_pkt *)rsp; 1763 1762 csum_ok = pkt->csum_calc && !pkt->err_vec && 1764 1763 (q->netdev->features & NETIF_F_RXCSUM); 1765 - if ((pkt->l2info & htonl(RXF_TCP)) && 1764 + if ((pkt->l2info & htonl(RXF_TCP_F)) && 1766 1765 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1767 1766 do_gro(rxq, si, pkt); 1768 1767 return 0; ··· 1784 1783 1785 1784 rxq->stats.pkts++; 1786 1785 1787 - if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1786 + if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { 1788 1787 if (!pkt->ip_frag) { 1789 1788 skb->ip_summed = CHECKSUM_UNNECESSARY; 1790 1789 rxq->stats.rx_cso++; 1791 - } else if (pkt->l2info & htonl(RXF_IP)) { 1790 + } else if (pkt->l2info & htonl(RXF_IP_F)) { 1792 1791 __sum16 c = (__force __sum16)pkt->csum; 1793 1792 skb->csum = csum_unfold(c); 1794 1793 skb->ip_summed = CHECKSUM_COMPLETE;
+137 -54
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
··· 518 518 WR_HDR; 519 519 union opcode_tid ot; 520 520 __be16 reply_ctrl; 521 - #define QUEUENO(x) ((x) << 0) 522 - #define REPLY_CHAN(x) ((x) << 14) 523 - #define NO_REPLY(x) ((x) << 15) 524 521 __be16 cookie; 525 522 }; 523 + 524 + /* cpl_get_tcb.reply_ctrl fields */ 525 + #define QUEUENO_S 0 526 + #define QUEUENO_V(x) ((x) << QUEUENO_S) 527 + 528 + #define REPLY_CHAN_S 14 529 + #define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S) 530 + #define REPLY_CHAN_F REPLY_CHAN_V(1U) 531 + 532 + #define NO_REPLY_S 15 533 + #define NO_REPLY_V(x) ((x) << NO_REPLY_S) 534 + #define NO_REPLY_F NO_REPLY_V(1U) 526 535 527 536 struct cpl_set_tcb_field { 528 537 WR_HDR; 529 538 union opcode_tid ot; 530 539 __be16 reply_ctrl; 531 540 __be16 word_cookie; 532 - #define TCB_WORD(x) ((x) << 0) 533 - #define TCB_COOKIE(x) ((x) << 5) 534 - #define GET_TCB_COOKIE(x) (((x) >> 5) & 7) 535 541 __be64 mask; 536 542 __be64 val; 537 543 }; 544 + 545 + /* cpl_set_tcb_field.word_cookie fields */ 546 + #define TCB_WORD_S 0 547 + #define TCB_WORD(x) ((x) << TCB_WORD_S) 548 + 549 + #define TCB_COOKIE_S 5 550 + #define TCB_COOKIE_M 0x7 551 + #define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S) 552 + #define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M) 538 553 539 554 struct cpl_set_tcb_rpl { 540 555 union opcode_tid ot; ··· 577 562 WR_HDR; 578 563 union opcode_tid ot; 579 564 __be16 reply_ctrl; 580 - #define LISTSVR_IPV6(x) ((x) << 14) 581 565 __be16 rsvd; 582 566 }; 567 + 568 + /* additional cpl_close_listsvr_req.reply_ctrl field */ 569 + #define LISTSVR_IPV6_S 14 570 + #define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S) 571 + #define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U) 583 572 584 573 struct cpl_close_listsvr_rpl { 585 574 union opcode_tid ot; ··· 680 661 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ 681 662 }; 682 663 664 + /* cpl_tx_pkt_lso_core.lso_ctrl fields */ 665 + #define LSO_TCPHDR_LEN_S 0 666 + #define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S) 667 + 668 + #define LSO_IPHDR_LEN_S 4 669 + #define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S) 670 + 671 + #define LSO_ETHHDR_LEN_S 16 672 + #define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S) 673 + 674 + #define LSO_IPV6_S 20 675 + #define LSO_IPV6_V(x) ((x) << LSO_IPV6_S) 676 + #define LSO_IPV6_F LSO_IPV6_V(1U) 677 + 678 + #define LSO_LAST_SLICE_S 22 679 + #define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S) 680 + #define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U) 681 + 682 + #define LSO_FIRST_SLICE_S 23 683 + #define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S) 684 + #define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U) 685 + 686 + #define LSO_OPCODE_S 24 687 + #define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S) 688 + 689 + #define LSO_T5_XFER_SIZE_S 0 690 + #define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S) 691 + 683 692 struct cpl_tx_pkt_lso { 684 693 WR_HDR; 685 694 struct cpl_tx_pkt_lso_core c; ··· 717 670 struct cpl_iscsi_hdr { 718 671 union opcode_tid ot; 719 672 __be16 pdu_len_ddp; 720 - #define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) 721 - #define ISCSI_DDP (1 << 15) 722 673 __be16 len; 723 674 __be32 seq; 724 675 __be16 urg; 725 676 u8 rsvd; 726 677 u8 status; 727 678 }; 679 + 680 + /* cpl_iscsi_hdr.pdu_len_ddp fields */ 681 + #define ISCSI_PDU_LEN_S 0 682 + #define ISCSI_PDU_LEN_M 0x7FFF 683 + #define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S) 684 + #define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M) 685 + 686 + #define ISCSI_DDP_S 15 687 + #define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S) 688 + #define ISCSI_DDP_F ISCSI_DDP_V(1U) 728 689 729 690 struct cpl_rx_data { 730 691 union opcode_tid ot; ··· 790 735 __be16 vlan; 791 736 __be16 len; 792 737 __be32 l2info; 793 - #define RXF_UDP (1 << 22) 794 - #define RXF_TCP (1 << 23) 795 - #define RXF_IP (1 << 24) 796 - #define RXF_IP6 (1 << 25) 797 738 __be16 hdr_len; 798 739 __be16 err_vec; 799 740 }; 800 741 742 + #define RXF_UDP_S 22 743 + #define RXF_UDP_V(x) ((x) << RXF_UDP_S) 744 + #define RXF_UDP_F RXF_UDP_V(1U) 745 + 746 + #define RXF_TCP_S 23 747 + #define RXF_TCP_V(x) ((x) << RXF_TCP_S) 748 + #define RXF_TCP_F RXF_TCP_V(1U) 749 + 750 + #define RXF_IP_S 24 751 + #define RXF_IP_V(x) ((x) << RXF_IP_S) 752 + #define RXF_IP_F RXF_IP_V(1U) 753 + 754 + #define RXF_IP6_S 25 755 + #define RXF_IP6_V(x) ((x) << RXF_IP6_S) 756 + #define RXF_IP6_F RXF_IP6_V(1U) 757 + 801 758 /* rx_pkt.l2info fields */ 802 - #define S_RX_ETHHDR_LEN 0 803 - #define M_RX_ETHHDR_LEN 0x1F 804 - #define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 805 - #define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 759 + #define RX_ETHHDR_LEN_S 0 760 + #define RX_ETHHDR_LEN_M 0x1F 761 + #define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S) 762 + #define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M) 806 763 807 - #define S_RX_T5_ETHHDR_LEN 0 808 - #define M_RX_T5_ETHHDR_LEN 0x3F 809 - #define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) 810 - #define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) 764 + #define RX_T5_ETHHDR_LEN_S 0 765 + #define RX_T5_ETHHDR_LEN_M 0x3F 766 + #define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S) 767 + #define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M) 811 768 812 - #define S_RX_MACIDX 8 813 - #define M_RX_MACIDX 0x1FF 814 - #define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 815 - #define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX) 769 + #define RX_MACIDX_S 8 770 + #define RX_MACIDX_M 0x1FF 771 + #define RX_MACIDX_V(x) ((x) << RX_MACIDX_S) 772 + #define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M) 816 773 817 - #define S_RXF_SYN 21 818 - #define V_RXF_SYN(x) ((x) << S_RXF_SYN) 819 - #define F_RXF_SYN V_RXF_SYN(1U) 774 + #define RXF_SYN_S 21 775 + #define RXF_SYN_V(x) ((x) << RXF_SYN_S) 776 + #define RXF_SYN_F RXF_SYN_V(1U) 820 777 821 - #define S_RX_CHAN 28 822 - #define M_RX_CHAN 0xF 823 - #define V_RX_CHAN(x) ((x) << S_RX_CHAN) 824 - #define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN) 778 + #define RX_CHAN_S 28 779 + #define RX_CHAN_M 0xF 780 + #define RX_CHAN_V(x) ((x) << RX_CHAN_S) 781 + #define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M) 825 782 826 783 /* rx_pkt.hdr_len fields */ 827 - #define S_RX_TCPHDR_LEN 0 828 - #define M_RX_TCPHDR_LEN 0x3F 829 - #define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN) 830 - #define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN) 784 + #define RX_TCPHDR_LEN_S 0 785 + #define RX_TCPHDR_LEN_M 0x3F 786 + #define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S) 787 + #define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M) 831 788 832 - #define S_RX_IPHDR_LEN 6 833 - #define M_RX_IPHDR_LEN 0x3FF 834 - #define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN) 835 - #define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN) 789 + #define RX_IPHDR_LEN_S 6 790 + #define RX_IPHDR_LEN_M 0x3FF 791 + #define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S) 792 + #define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M) 836 793 837 794 struct cpl_trace_pkt { 838 795 u8 opcode; ··· 893 826 WR_HDR; 894 827 union opcode_tid ot; 895 828 __be16 params; 896 - #define L2T_W_INFO(x) ((x) << 2) 897 - #define L2T_W_PORT(x) ((x) << 8) 898 - #define L2T_W_NOREPLY(x) ((x) << 15) 899 829 __be16 l2t_idx; 900 830 __be16 vlan; 901 831 u8 dst_mac[6]; 902 832 }; 833 + 834 + /* cpl_l2t_write_req.params fields */ 835 + #define L2T_W_INFO_S 2 836 + #define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S) 837 + 838 + #define L2T_W_PORT_S 8 839 + #define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S) 840 + 841 + #define L2T_W_NOREPLY_S 15 842 + #define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S) 843 + #define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U) 903 844 904 845 struct cpl_l2t_write_rpl { 905 846 union opcode_tid ot; ··· 923 848 924 849 struct cpl_sge_egr_update { 925 850 __be32 opcode_qid; 926 - #define EGR_QID(x) ((x) & 0x1FFFF) 927 851 __be16 cidx; 928 852 __be16 pidx; 929 853 }; 854 + 855 + /* cpl_sge_egr_update.ot fields */ 856 + #define EGR_QID_S 0 857 + #define EGR_QID_M 0x1FFFF 858 + #define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M) 930 859 931 860 /* cpl_fw*.type values */ 932 861 enum { ··· 1024 945 1025 946 struct ulptx_sgl { 1026 947 __be32 cmd_nsge; 1027 - #define ULPTX_NSGE(x) ((x) << 0) 1028 - #define ULPTX_MORE (1U << 23) 1029 948 __be32 len0; 1030 949 __be64 addr0; 1031 950 struct ulptx_sge_pair sge[0]; 1032 951 }; 952 + 953 + #define ULPTX_NSGE_S 0 954 + #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) 955 + 956 + #define ULPTX_MORE_S 23 957 + #define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) 958 + #define ULPTX_MORE_F ULPTX_MORE_V(1U) 1033 959 1034 960 struct ulp_mem_io { 1035 961 WR_HDR; ··· 1042 958 __be32 len16; /* command length */ 1043 959 __be32 dlen; /* data length in 32-byte units */ 1044 960 __be32 lock_addr; 1045 - #define ULP_MEMIO_LOCK(x) ((x) << 31) 1046 961 }; 962 + 963 + #define ULP_MEMIO_LOCK_S 31 964 + #define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S) 965 + #define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U) 1047 966 1048 967 /* additional ulp_mem_io.cmd fields */ 1049 968 #define ULP_MEMIO_ORDER_S 23 ··· 1057 970 #define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) 1058 971 #define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) 1059 972 1060 - #define S_T5_ULP_MEMIO_IMM 23 1061 - #define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) 1062 - #define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) 1063 - 1064 - #define S_T5_ULP_MEMIO_ORDER 22 1065 - #define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) 1066 - #define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) 973 + #define T5_ULP_MEMIO_ORDER_S 22 974 + #define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S) 975 + #define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U) 1067 976 1068 977 /* ulp_mem_io.lock_addr fields */ 1069 978 #define ULP_MEMIO_ADDR_S 0
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 471 471 * free TX Queue Descriptors ... 472 472 */ 473 473 const struct cpl_sge_egr_update *p = cpl; 474 - unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 474 + unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); 475 475 struct sge *s = &adapter->sge; 476 476 struct sge_txq *tq; 477 477 struct sge_eth_txq *txq;
+3 -3
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 926 926 } 927 927 928 928 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 929 - ULPTX_NSGE(nfrags)); 929 + ULPTX_NSGE_V(nfrags)); 930 930 if (likely(--nfrags == 0)) 931 931 return; 932 932 /* ··· 1604 1604 * If this is a good TCP packet and we have Generic Receive Offload 1605 1605 * enabled, handle the packet in the GRO path. 1606 1606 */ 1607 - if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && 1607 + if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) && 1608 1608 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && 1609 1609 !pkt->ip_frag) { 1610 1610 do_gro(rxq, gl, pkt); ··· 1626 1626 rxq->stats.pkts++; 1627 1627 1628 1628 if (csum_ok && !pkt->err_vec && 1629 - (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1629 + (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) { 1630 1630 if (!pkt->ip_frag) 1631 1631 skb->ip_summed = CHECKSUM_UNNECESSARY; 1632 1632 else {
+1 -1
drivers/scsi/csiostor/csio_lnode.c
··· 1758 1758 else { 1759 1759 /* Program DSGL to dma payload */ 1760 1760 dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 1761 - ULPTX_MORE | ULPTX_NSGE(1)); 1761 + ULPTX_MORE_F | ULPTX_NSGE_V(1)); 1762 1762 dsgl.len0 = cpu_to_be32(pld_len); 1763 1763 dsgl.addr0 = cpu_to_be64(pld->paddr); 1764 1764 csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
+2 -2
drivers/scsi/csiostor/csio_scsi.c
··· 298 298 struct csio_dma_buf *dma_buf; 299 299 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); 300 300 301 - sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE | 302 - ULPTX_NSGE(req->nsge)); 301 + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_MORE_F | 302 + ULPTX_NSGE_V(req->nsge)); 303 303 /* Now add the data SGLs */ 304 304 if (likely(!req->dcopy)) { 305 305 scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+3 -3
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 1112 1112 hlen = ntohs(cpl->len); 1113 1113 dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; 1114 1114 1115 - plen = ISCSI_PDU_LEN(pdu_len_ddp); 1115 + plen = ISCSI_PDU_LEN_G(pdu_len_ddp); 1116 1116 if (is_t4(lldi->adapter_type)) 1117 1117 plen -= 40; 1118 1118 ··· 1619 1619 req = (struct cpl_set_tcb_field *)skb->head; 1620 1620 INIT_TP_WR(req, csk->tid); 1621 1621 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 1622 - req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1622 + req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1623 1623 req->word_cookie = htons(0); 1624 1624 req->mask = cpu_to_be64(0x3 << 8); 1625 1625 req->val = cpu_to_be64(pg_idx << 8); ··· 1651 1651 req = (struct cpl_set_tcb_field *)skb->head; 1652 1652 INIT_TP_WR(req, tid); 1653 1653 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1654 - req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); 1654 + req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 1655 1655 req->word_cookie = htons(0); 1656 1656 req->mask = cpu_to_be64(0x3 << 4); 1657 1657 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |