Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB: Remove __constant_{endian} uses

The base versions handle constant folding just fine, use them
directly. The replacements are OK in the include/ files as they are
not exported to userspace so we don't need the __ prefixed versions.

This patch does not affect code generation at all.

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Harvey Harrison and committed by
Roland Dreier
9c3da099 f3b8436a

+124 -126
+7 -8
drivers/infiniband/core/cm.c
··· 927 927 unsigned long flags; 928 928 int ret = 0; 929 929 930 - service_mask = service_mask ? service_mask : 931 - __constant_cpu_to_be64(~0ULL); 930 + service_mask = service_mask ? service_mask : ~cpu_to_be64(0); 932 931 service_id &= service_mask; 933 932 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 934 933 (service_id != IB_CM_ASSIGN_SERVICE_ID)) ··· 953 954 spin_lock_irqsave(&cm.lock, flags); 954 955 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 955 956 cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 956 - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 957 + cm_id->service_mask = ~cpu_to_be64(0); 957 958 } else { 958 959 cm_id->service_id = service_id; 959 960 cm_id->service_mask = service_mask; ··· 1133 1134 goto error1; 1134 1135 } 1135 1136 cm_id->service_id = param->service_id; 1136 - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 1137 + cm_id->service_mask = ~cpu_to_be64(0); 1137 1138 cm_id_priv->timeout_ms = cm_convert_to_ms( 1138 1139 param->primary_path->packet_life_time) * 2 + 1139 1140 cm_convert_to_ms( ··· 1544 1545 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1545 1546 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1546 1547 cm_id_priv->id.service_id = req_msg->service_id; 1547 - cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1548 + cm_id_priv->id.service_mask = ~cpu_to_be64(0); 1548 1549 1549 1550 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); 1550 1551 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ··· 2897 2898 goto out; 2898 2899 2899 2900 cm_id->service_id = param->service_id; 2900 - cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2901 + cm_id->service_mask = ~cpu_to_be64(0); 2901 2902 cm_id_priv->timeout_ms = param->timeout_ms; 2902 2903 cm_id_priv->max_cm_retries = param->max_cm_retries; 2903 2904 ret = cm_alloc_msg(cm_id_priv, &msg); ··· 2991 2992 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2992 2993 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2993 2994 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2994 - cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2995 + cm_id_priv->id.service_mask = ~cpu_to_be64(0); 2995 2996 2996 2997 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2997 2998 cm_process_work(cm_id_priv, work); ··· 3788 3789 rwlock_init(&cm.device_lock); 3789 3790 spin_lock_init(&cm.lock); 3790 3791 cm.listen_service_table = RB_ROOT; 3791 - cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3792 + cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 3792 3793 cm.remote_id_table = RB_ROOT; 3793 3794 cm.remote_qp_table = RB_ROOT; 3794 3795 cm.remote_sidr_table = RB_ROOT;
+11 -11
drivers/infiniband/core/cm_msgs.h
··· 44 44 45 45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 46 46 47 - #define CM_REQ_ATTR_ID __constant_htons(0x0010) 48 - #define CM_MRA_ATTR_ID __constant_htons(0x0011) 49 - #define CM_REJ_ATTR_ID __constant_htons(0x0012) 50 - #define CM_REP_ATTR_ID __constant_htons(0x0013) 51 - #define CM_RTU_ATTR_ID __constant_htons(0x0014) 52 - #define CM_DREQ_ATTR_ID __constant_htons(0x0015) 53 - #define CM_DREP_ATTR_ID __constant_htons(0x0016) 54 - #define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 55 - #define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 56 - #define CM_LAP_ATTR_ID __constant_htons(0x0019) 57 - #define CM_APR_ATTR_ID __constant_htons(0x001A) 47 + #define CM_REQ_ATTR_ID cpu_to_be16(0x0010) 48 + #define CM_MRA_ATTR_ID cpu_to_be16(0x0011) 49 + #define CM_REJ_ATTR_ID cpu_to_be16(0x0012) 50 + #define CM_REP_ATTR_ID cpu_to_be16(0x0013) 51 + #define CM_RTU_ATTR_ID cpu_to_be16(0x0014) 52 + #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) 53 + #define CM_DREP_ATTR_ID cpu_to_be16(0x0016) 54 + #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) 55 + #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) 56 + #define CM_LAP_ATTR_ID cpu_to_be16(0x0019) 57 + #define CM_APR_ATTR_ID cpu_to_be16(0x001A) 58 58 59 59 enum cm_msg_sequence { 60 60 CM_MSG_SEQUENCE_REQ,
+1 -1
drivers/infiniband/core/mad_rmpp.c
··· 735 735 goto bad; 736 736 } 737 737 738 - if (rmpp_hdr->seg_num == __constant_htonl(1)) { 738 + if (rmpp_hdr->seg_num == cpu_to_be32(1)) { 739 739 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 740 740 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 741 741 goto bad;
+2 -2
drivers/infiniband/hw/cxgb3/iwch_qp.c
··· 99 99 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 100 100 plen = 4; 101 101 wqe->write.sgl[0].stag = wr->ex.imm_data; 102 - wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 103 - wqe->write.num_sgle = __constant_cpu_to_be32(0); 102 + wqe->write.sgl[0].len = cpu_to_be32(0); 103 + wqe->write.num_sgle = cpu_to_be32(0); 104 104 *flit_cnt = 6; 105 105 } else { 106 106 plen = 0;
+4 -4
drivers/infiniband/hw/ehca/ehca_sqp.c
··· 46 46 #include "ehca_iverbs.h" 47 47 #include "hcp_if.h" 48 48 49 - #define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002) 50 - #define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004) 51 - #define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008) 49 + #define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002) 50 + #define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004) 51 + #define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008) 52 52 53 - #define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 53 + #define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) 54 54 55 55 /** 56 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
+2 -2
drivers/infiniband/hw/ipath/ipath_eeprom.c
··· 772 772 "0x%x, not 0x%x\n", csum, ifp->if_csum); 773 773 goto done; 774 774 } 775 - if (*(__be64 *) ifp->if_guid == 0ULL || 776 - *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) { 775 + if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || 776 + *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { 777 777 ipath_dev_err(dd, "Invalid GUID %llx from flash; " 778 778 "ignoring\n", 779 779 *(unsigned long long *) ifp->if_guid);
+47 -48
drivers/infiniband/hw/ipath/ipath_mad.c
··· 37 37 #include "ipath_verbs.h" 38 38 #include "ipath_common.h" 39 39 40 - #define IB_SMP_UNSUP_VERSION __constant_htons(0x0004) 41 - #define IB_SMP_UNSUP_METHOD __constant_htons(0x0008) 42 - #define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C) 43 - #define IB_SMP_INVALID_FIELD __constant_htons(0x001C) 40 + #define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) 41 + #define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) 42 + #define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) 43 + #define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C) 44 44 45 45 static int reply(struct ib_smp *smp) 46 46 { ··· 789 789 return recv_subn_get_pkeytable(smp, ibdev); 790 790 } 791 791 792 - #define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001) 793 - #define IB_PMA_PORT_SAMPLES_CONTROL __constant_htons(0x0010) 794 - #define IB_PMA_PORT_SAMPLES_RESULT __constant_htons(0x0011) 795 - #define IB_PMA_PORT_COUNTERS __constant_htons(0x0012) 796 - #define IB_PMA_PORT_COUNTERS_EXT __constant_htons(0x001D) 797 - #define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E) 792 + #define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) 793 + #define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) 794 + #define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) 795 + #define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) 796 + #define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) 797 + #define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) 798 798 799 799 struct ib_perf { 800 800 u8 base_version; ··· 884 884 __be32 port_rcv_packets; 885 885 } __attribute__ ((packed)); 886 886 887 - #define IB_PMA_SEL_SYMBOL_ERROR __constant_htons(0x0001) 888 - #define IB_PMA_SEL_LINK_ERROR_RECOVERY __constant_htons(0x0002) 889 - #define IB_PMA_SEL_LINK_DOWNED __constant_htons(0x0004) 890 - #define IB_PMA_SEL_PORT_RCV_ERRORS __constant_htons(0x0008) 891 - #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS __constant_htons(0x0010) 892 - #define IB_PMA_SEL_PORT_XMIT_DISCARDS __constant_htons(0x0040) 893 - #define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200) 894 - #define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS __constant_htons(0x0400) 895 - #define IB_PMA_SEL_PORT_VL15_DROPPED __constant_htons(0x0800) 896 - #define IB_PMA_SEL_PORT_XMIT_DATA __constant_htons(0x1000) 897 - #define IB_PMA_SEL_PORT_RCV_DATA __constant_htons(0x2000) 898 - #define IB_PMA_SEL_PORT_XMIT_PACKETS __constant_htons(0x4000) 899 - #define IB_PMA_SEL_PORT_RCV_PACKETS __constant_htons(0x8000) 887 + #define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) 888 + #define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) 889 + #define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) 890 + #define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) 891 + #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) 892 + #define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) 893 + #define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) 894 + #define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) 895 + #define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) 896 + #define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) 897 + #define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) 898 + #define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) 899 + #define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) 900 900 901 901 struct ib_pma_portcounters_ext { 902 902 u8 reserved; ··· 913 913 __be64 port_multicast_rcv_packets; 914 914 } __attribute__ ((packed)); 915 915 916 - #define IB_PMA_SELX_PORT_XMIT_DATA __constant_htons(0x0001) 917 - #define IB_PMA_SELX_PORT_RCV_DATA __constant_htons(0x0002) 918 - #define IB_PMA_SELX_PORT_XMIT_PACKETS __constant_htons(0x0004) 919 - #define IB_PMA_SELX_PORT_RCV_PACKETS __constant_htons(0x0008) 920 - #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS __constant_htons(0x0010) 921 - #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS __constant_htons(0x0020) 922 - #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS __constant_htons(0x0040) 923 - #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS __constant_htons(0x0080) 916 + #define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) 917 + #define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) 918 + #define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) 919 + #define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) 920 + #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) 921 + #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) 922 + #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) 923 + #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) 924 924 925 925 static int recv_pma_get_classportinfo(struct ib_perf *pmp) 926 926 { ··· 933 933 pmp->status |= IB_SMP_INVALID_FIELD; 934 934 935 935 /* Indicate AllPortSelect is valid (only one port anyway) */ 936 - p->cap_mask = __constant_cpu_to_be16(1 << 8); 936 + p->cap_mask = cpu_to_be16(1 << 8); 937 937 p->base_version = 1; 938 938 p->class_version = 1; 939 939 /* ··· 951 951 * We support 5 counters which only count the mandatory quantities. 952 952 */ 953 953 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) 954 - #define COUNTER_MASK0_9 \ 955 - __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \ 956 - COUNTER_MASK(1, 1) | \ 957 - COUNTER_MASK(1, 2) | \ 958 - COUNTER_MASK(1, 3) | \ 959 - COUNTER_MASK(1, 4)) 954 + #define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \ 955 + COUNTER_MASK(1, 1) | \ 956 + COUNTER_MASK(1, 2) | \ 957 + COUNTER_MASK(1, 3) | \ 958 + COUNTER_MASK(1, 4)) 960 959 961 960 static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 962 961 struct ib_device *ibdev, u8 port) ··· 1136 1137 status = dev->pma_sample_status; 1137 1138 p->sample_status = cpu_to_be16(status); 1138 1139 /* 64 bits */ 1139 - p->extended_width = __constant_cpu_to_be32(0x80000000); 1140 + p->extended_width = cpu_to_be32(0x80000000); 1140 1141 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1141 1142 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 : 1142 1143 cpu_to_be64( ··· 1184 1185 pmp->status |= IB_SMP_INVALID_FIELD; 1185 1186 1186 1187 if (cntrs.symbol_error_counter > 0xFFFFUL) 1187 - p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF); 1188 + p->symbol_error_counter = cpu_to_be16(0xFFFF); 1188 1189 else 1189 1190 p->symbol_error_counter = 1190 1191 cpu_to_be16((u16)cntrs.symbol_error_counter); ··· 1198 1199 else 1199 1200 p->link_downed_counter = (u8)cntrs.link_downed_counter; 1200 1201 if (cntrs.port_rcv_errors > 0xFFFFUL) 1201 - p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF); 1202 + p->port_rcv_errors = cpu_to_be16(0xFFFF); 1202 1203 else 1203 1204 p->port_rcv_errors = 1204 1205 cpu_to_be16((u16) cntrs.port_rcv_errors); 1205 1206 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) 1206 - p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF); 1207 + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); 1207 1208 else 1208 1209 p->port_rcv_remphys_errors = 1209 1210 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); 1210 1211 if (cntrs.port_xmit_discards > 0xFFFFUL) 1211 - p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF); 1212 + p->port_xmit_discards = cpu_to_be16(0xFFFF); 1212 1213 else 1213 1214 p->port_xmit_discards = 1214 1215 cpu_to_be16((u16)cntrs.port_xmit_discards); ··· 1219 1220 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1220 1221 cntrs.excessive_buffer_overrun_errors; 1221 1222 if (cntrs.vl15_dropped > 0xFFFFUL) 1222 - p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1223 + p->vl15_dropped = cpu_to_be16(0xFFFF); 1223 1224 else 1224 1225 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); 1225 1226 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1226 - p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1227 + p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); 1227 1228 else 1228 1229 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); 1229 1230 if (cntrs.port_rcv_data > 0xFFFFFFFFUL) 1230 - p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF); 1231 + p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); 1231 1232 else 1232 1233 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); 1233 1234 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) 1234 - p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1235 + p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); 1235 1236 else 1236 1237 p->port_xmit_packets = 1237 1238 cpu_to_be32((u32)cntrs.port_xmit_packets); 1238 1239 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) 1239 - p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF); 1240 + p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); 1240 1241 else 1241 1242 p->port_rcv_packets = 1242 1243 cpu_to_be32((u32) cntrs.port_rcv_packets);
+1 -1
drivers/infiniband/hw/ipath/ipath_rc.c
··· 1744 1744 /* Signal completion event if the solicited bit is set. */ 1745 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1746 1746 (ohdr->bth[0] & 1747 - __constant_cpu_to_be32(1 << 23)) != 0); 1747 + cpu_to_be32(1 << 23)) != 0); 1748 1748 break; 1749 1749 1750 1750 case OP(RDMA_WRITE_FIRST):
+2 -2
drivers/infiniband/hw/ipath/ipath_sdma.c
··· 781 781 descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; 782 782 descqp -= 2; 783 783 /* SDmaLastDesc */ 784 - descqp[0] |= __constant_cpu_to_le64(1ULL << 11); 784 + descqp[0] |= cpu_to_le64(1ULL << 11); 785 785 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { 786 786 /* SDmaIntReq */ 787 - descqp[0] |= __constant_cpu_to_le64(1ULL << 15); 787 + descqp[0] |= cpu_to_le64(1ULL << 15); 788 788 } 789 789 790 790 /* Commit writes to memory and advance the tail on the chip */
+1 -1
drivers/infiniband/hw/ipath/ipath_uc.c
··· 419 419 /* Signal completion event if the solicited bit is set. */ 420 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 421 421 (ohdr->bth[0] & 422 - __constant_cpu_to_be32(1 << 23)) != 0); 422 + cpu_to_be32(1 << 23)) != 0); 423 423 break; 424 424 425 425 case OP(RDMA_WRITE_FIRST):
+2 -2
drivers/infiniband/hw/ipath/ipath_ud.c
··· 370 370 */ 371 371 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 372 372 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 373 - __constant_cpu_to_be32(IPATH_MULTICAST_QPN) : 373 + cpu_to_be32(IPATH_MULTICAST_QPN) : 374 374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 375 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 376 376 /* ··· 573 573 /* Signal completion event if the solicited bit is set. */ 574 574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 575 575 (ohdr->bth[0] & 576 - __constant_cpu_to_be32(1 << 23)) != 0); 576 + cpu_to_be32(1 << 23)) != 0); 577 577 578 578 bail:; 579 579 }
+3 -3
drivers/infiniband/hw/ipath/ipath_user_sdma.c
··· 667 667 668 668 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq) 669 669 { 670 - return descq | __constant_cpu_to_le64(1ULL << 12); 670 + return descq | cpu_to_le64(1ULL << 12); 671 671 } 672 672 673 673 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq) 674 674 { 675 675 /* last */ /* dma head */ 676 - return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13); 676 + return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13); 677 677 } 678 678 679 679 static inline __le64 ipath_sdma_make_desc1(u64 addr) ··· 763 763 if (ofs >= IPATH_SMALLBUF_DWORDS) { 764 764 for (i = 0; i < pkt->naddr; i++) { 765 765 dd->ipath_sdma_descq[dtail].qw[0] |= 766 - __constant_cpu_to_le64(1ULL << 14); 766 + cpu_to_le64(1ULL << 14); 767 767 if (++dtail == dd->ipath_sdma_descq_cnt) 768 768 dtail = 0; 769 769 }
+1 -1
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 1585 1585 u64 ibcstat; 1586 1586 1587 1587 memset(props, 0, sizeof(*props)); 1588 - props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1588 + props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1589 1589 props->lmc = dd->ipath_lmc; 1590 1590 props->sm_lid = dev->sm_lid; 1591 1591 props->sm_sl = dev->sm_sl;
+5 -5
drivers/infiniband/hw/ipath/ipath_verbs.h
··· 86 86 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 87 87 88 88 /* Mandatory IB performance counter select values. */ 89 - #define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001) 90 - #define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002) 91 - #define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003) 92 - #define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004) 93 - #define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005) 89 + #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) 90 + #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) 91 + #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) 92 + #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) 93 + #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) 94 94 95 95 struct ib_reth { 96 96 __be64 vaddr;
+11 -11
drivers/infiniband/hw/mlx4/qp.c
··· 71 71 }; 72 72 73 73 static const __be32 mlx4_ib_opcode[] = { 74 - [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), 75 - [IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO), 76 - [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), 77 - [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 78 - [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 79 - [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), 80 - [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 81 - [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 82 - [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 83 - [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 84 - [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), 74 + [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 75 + [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 76 + [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 77 + [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 78 + [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 79 + [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 80 + [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 81 + [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 82 + [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 83 + [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 84 + [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 85 85 }; 86 86 87 87 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
+6 -6
include/rdma/ib_cm.h
··· 314 314 */ 315 315 void ib_destroy_cm_id(struct ib_cm_id *cm_id); 316 316 317 - #define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL) 318 - #define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL) 319 - #define IB_CMA_SERVICE_ID __constant_cpu_to_be64(0x0000000001000000ULL) 320 - #define IB_CMA_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFF000000ULL) 321 - #define IB_SDP_SERVICE_ID __constant_cpu_to_be64(0x0000000000010000ULL) 322 - #define IB_SDP_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 317 + #define IB_SERVICE_ID_AGN_MASK cpu_to_be64(0xFF00000000000000ULL) 318 + #define IB_CM_ASSIGN_SERVICE_ID cpu_to_be64(0x0200000000000000ULL) 319 + #define IB_CMA_SERVICE_ID cpu_to_be64(0x0000000001000000ULL) 320 + #define IB_CMA_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFF000000ULL) 321 + #define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL) 322 + #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 323 323 324 324 struct ib_cm_compare_data { 325 325 u8 data[IB_CM_COMPARE_SIZE];
+1 -1
include/rdma/ib_mad.h
··· 107 107 #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 108 108 109 109 #define IB_QP0 0 110 - #define IB_QP1 __constant_htonl(1) 110 + #define IB_QP1 cpu_to_be32(1) 111 111 #define IB_QP1_QKEY 0x80010000 112 112 #define IB_QP_SET_QKEY 0x80000000 113 113
+17 -17
include/rdma/ib_smi.h
··· 63 63 u8 return_path[IB_SMP_MAX_PATH_HOPS]; 64 64 } __attribute__ ((packed)); 65 65 66 - #define IB_SMP_DIRECTION __constant_htons(0x8000) 66 + #define IB_SMP_DIRECTION cpu_to_be16(0x8000) 67 67 68 68 /* Subnet management attributes */ 69 - #define IB_SMP_ATTR_NOTICE __constant_htons(0x0002) 70 - #define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010) 71 - #define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011) 72 - #define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012) 73 - #define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014) 74 - #define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015) 75 - #define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016) 76 - #define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017) 77 - #define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018) 78 - #define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019) 79 - #define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A) 80 - #define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B) 81 - #define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020) 82 - #define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030) 83 - #define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031) 84 - #define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00) 69 + #define IB_SMP_ATTR_NOTICE cpu_to_be16(0x0002) 70 + #define IB_SMP_ATTR_NODE_DESC cpu_to_be16(0x0010) 71 + #define IB_SMP_ATTR_NODE_INFO cpu_to_be16(0x0011) 72 + #define IB_SMP_ATTR_SWITCH_INFO cpu_to_be16(0x0012) 73 + #define IB_SMP_ATTR_GUID_INFO cpu_to_be16(0x0014) 74 + #define IB_SMP_ATTR_PORT_INFO cpu_to_be16(0x0015) 75 + #define IB_SMP_ATTR_PKEY_TABLE cpu_to_be16(0x0016) 76 + #define IB_SMP_ATTR_SL_TO_VL_TABLE cpu_to_be16(0x0017) 77 + #define IB_SMP_ATTR_VL_ARB_TABLE cpu_to_be16(0x0018) 78 + #define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cpu_to_be16(0x0019) 79 + #define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cpu_to_be16(0x001A) 80 + #define IB_SMP_ATTR_MCAST_FORWARD_TABLE cpu_to_be16(0x001B) 81 + #define IB_SMP_ATTR_SM_INFO cpu_to_be16(0x0020) 82 + #define IB_SMP_ATTR_VENDOR_DIAG cpu_to_be16(0x0030) 83 + #define IB_SMP_ATTR_LED_INFO cpu_to_be16(0x0031) 84 + #define IB_SMP_ATTR_VENDOR_MASK cpu_to_be16(0xFF00) 85 85 86 86 struct ib_port_info { 87 87 __be64 mkey;