Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Use __packed annotation instead of __attribute__ ((packed))

"__attribute__" set of macros has been standardized, have became more
potentially portable and consistent code back in v2.6.21 by commit
82ddcb040 ("[PATCH] extend the set of "__attribute__" shortcut macros").
Moreover, nowadays checkpatch.pl warns about using __attribute__((packed))
instead of __packed.

This patch converts all the "__attribute__ ((packed))" annotations to
"__packed" within the RDMA subsystem.

Signed-off-by: Erez Alfasi <ereza@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Erez Alfasi and committed by
Jason Gunthorpe
19b1a294 d0a93556

+39 -39
+11 -11
drivers/infiniband/core/cm_msgs.h
··· 98 98 99 99 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 100 100 101 - } __attribute__ ((packed)); 101 + } __packed; 102 102 103 103 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 104 104 { ··· 423 423 424 424 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE]; 425 425 426 - } __attribute__ ((packed)); 426 + } __packed; 427 427 428 428 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg) 429 429 { ··· 461 461 462 462 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; 463 463 464 - } __attribute__ ((packed)); 464 + } __packed; 465 465 466 466 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg) 467 467 { ··· 506 506 507 507 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 508 508 509 - } __attribute__ ((packed)); 509 + } __packed; 510 510 511 511 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 512 512 { ··· 614 614 615 615 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 616 616 617 - } __attribute__ ((packed)); 617 + } __packed; 618 618 619 619 struct cm_dreq_msg { 620 620 struct ib_mad_hdr hdr; ··· 626 626 627 627 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 628 628 629 - } __attribute__ ((packed)); 629 + } __packed; 630 630 631 631 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 632 632 { ··· 647 647 648 648 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 649 649 650 - } __attribute__ ((packed)); 650 + } __packed; 651 651 652 652 struct cm_lap_msg { 653 653 struct ib_mad_hdr hdr; ··· 675 675 u8 offset63; 676 676 677 677 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 678 - } __attribute__ ((packed)); 678 + } __packed; 679 679 680 680 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 681 681 { ··· 784 784 u8 info[IB_CM_APR_INFO_LENGTH]; 785 785 786 786 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE]; 787 - } __attribute__ ((packed)); 787 + } __packed; 788 788 789 789 struct cm_sidr_req_msg { 790 790 struct ib_mad_hdr hdr; ··· 795 795 __be64 service_id; 796 796 797 797 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; 798 - } __attribute__ ((packed)); 798 + } __packed; 799 799 800 800 struct cm_sidr_rep_msg { 801 801 struct ib_mad_hdr hdr; ··· 811 811 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 812 812 813 813 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 814 - } __attribute__ ((packed)); 814 + } __packed; 815 815 816 816 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 817 817 {
+2 -2
drivers/infiniband/core/mad_priv.h
··· 73 73 struct ib_mad_recv_wc recv_wc; 74 74 struct ib_wc wc; 75 75 u64 mapping; 76 - } __attribute__ ((packed)); 76 + } __packed; 77 77 78 78 struct ib_mad_private { 79 79 struct ib_mad_private_header header; 80 80 size_t mad_size; 81 81 struct ib_grh grh; 82 82 u8 mad[0]; 83 - } __attribute__ ((packed)); 83 + } __packed; 84 84 85 85 struct ib_rmpp_segment { 86 86 struct list_head list;
+5 -5
drivers/infiniband/hw/cxgb3/cxio_wr.h
··· 64 64 T3_SOLICITED_EVENT_FLAG = 0x04, 65 65 T3_READ_FENCE_FLAG = 0x08, 66 66 T3_LOCAL_FENCE_FLAG = 0x10 67 - } __attribute__ ((packed)); 67 + } __packed; 68 68 69 69 enum t3_wr_opcode { 70 70 T3_WR_BP = FW_WROPCODE_RI_BYPASS, ··· 77 77 T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT, 78 78 T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP, 79 79 T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR 80 - } __attribute__ ((packed)); 80 + } __packed; 81 81 82 82 enum t3_rdma_opcode { 83 83 T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */ ··· 95 95 T3_QP_MOD, 96 96 T3_BYPASS, 97 97 T3_RDMA_READ_REQ_WITH_INV, 98 - } __attribute__ ((packed)); 98 + } __packed; 99 99 100 100 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop) 101 101 { ··· 306 306 uP_RI_MPA_TX_MARKER_ENABLE = 0x2, 307 307 uP_RI_MPA_CRC_ENABLE = 0x4, 308 308 uP_RI_MPA_IETF_ENABLE = 0x8 309 - } __attribute__ ((packed)); 309 + } __packed; 310 310 311 311 enum t3_qp_caps { 312 312 uP_RI_QP_RDMA_READ_ENABLE = 0x01, ··· 314 314 uP_RI_QP_BIND_ENABLE = 0x04, 315 315 uP_RI_QP_FAST_REGISTER_ENABLE = 0x08, 316 316 uP_RI_QP_STAG0_ENABLE = 0x10 317 - } __attribute__ ((packed)); 317 + } __packed; 318 318 319 319 enum rdma_init_rtr_types { 320 320 RTR_READ = 1,
+1 -1
drivers/infiniband/hw/mthca/mthca_cq.c
··· 77 77 __be32 ci_db; /* Arbel only */ 78 78 __be32 state_db; /* Arbel only */ 79 79 u32 reserved; 80 - } __attribute__((packed)); 80 + } __packed; 81 81 82 82 #define MTHCA_CQ_STATUS_OK ( 0 << 28) 83 83 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
+8 -8
drivers/infiniband/hw/mthca/mthca_eq.c
··· 63 63 __be32 consumer_index; 64 64 __be32 producer_index; 65 65 u32 reserved3[4]; 66 - } __attribute__((packed)); 66 + } __packed; 67 67 68 68 #define MTHCA_EQ_STATUS_OK ( 0 << 28) 69 69 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) ··· 130 130 u32 raw[6]; 131 131 struct { 132 132 __be32 cqn; 133 - } __attribute__((packed)) comp; 133 + } __packed comp; 134 134 struct { 135 135 u16 reserved1; 136 136 __be16 token; ··· 138 138 u8 reserved3[3]; 139 139 u8 status; 140 140 __be64 out_param; 141 - } __attribute__((packed)) cmd; 141 + } __packed cmd; 142 142 struct { 143 143 __be32 qpn; 144 - } __attribute__((packed)) qp; 144 + } __packed qp; 145 145 struct { 146 146 __be32 srqn; 147 - } __attribute__((packed)) srq; 147 + } __packed srq; 148 148 struct { 149 149 __be32 cqn; 150 150 u32 reserved1; 151 151 u8 reserved2[3]; 152 152 u8 syndrome; 153 - } __attribute__((packed)) cq_err; 153 + } __packed cq_err; 154 154 struct { 155 155 u32 reserved1[2]; 156 156 __be32 port; 157 - } __attribute__((packed)) port_change; 157 + } __packed port_change; 158 158 } event; 159 159 u8 reserved3[3]; 160 160 u8 owner; 161 - } __attribute__((packed)); 161 + } __packed; 162 162 163 163 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) 164 164 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
+1 -1
drivers/infiniband/hw/mthca/mthca_mr.c
··· 60 60 __be64 mtt_seg; 61 61 __be32 mtt_sz; /* Arbel only */ 62 62 u32 reserved[2]; 63 - } __attribute__((packed)); 63 + } __packed; 64 64 65 65 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) 66 66 #define MTHCA_MPT_FLAG_MIO (1 << 17)
+3 -3
drivers/infiniband/hw/mthca/mthca_qp.c
··· 115 115 u8 hop_limit; 116 116 __be32 sl_tclass_flowlabel; 117 117 u8 rgid[16]; 118 - } __attribute__((packed)); 118 + } __packed; 119 119 120 120 struct mthca_qp_context { 121 121 __be32 flags; ··· 154 154 __be16 rq_wqe_counter; /* reserved on Tavor */ 155 155 __be16 sq_wqe_counter; /* reserved on Tavor */ 156 156 u32 reserved3[18]; 157 - } __attribute__((packed)); 157 + } __packed; 158 158 159 159 struct mthca_qp_param { 160 160 __be32 opt_param_mask; 161 161 u32 reserved1; 162 162 struct mthca_qp_context context; 163 163 u32 reserved2[62]; 164 - } __attribute__((packed)); 164 + } __packed; 165 165 166 166 enum { 167 167 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
+1 -1
drivers/infiniband/sw/rxe/rxe_hdr.h
··· 643 643 __be32 rkey; 644 644 __be64 swap_add; 645 645 __be64 comp; 646 - } __attribute__((__packed__)); 646 + } __packed; 647 647 648 648 static inline u64 __atmeth_va(void *arg) 649 649 {
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 311 311 u64 rsp_dma; 312 312 struct ib_sge sge; 313 313 struct ib_cqe cqe; 314 - } __attribute__((packed)); 314 + } __packed; 315 315 316 316 struct iser_conn; 317 317 struct ib_conn;
+2 -2
include/rdma/ib_mad.h
··· 198 198 __be16 attr_offset; 199 199 __be16 reserved; 200 200 ib_sa_comp_mask comp_mask; 201 - } __attribute__ ((packed)); 201 + } __packed; 202 202 203 203 struct ib_mad { 204 204 struct ib_mad_hdr mad_hdr; ··· 227 227 struct ib_rmpp_hdr rmpp_hdr; 228 228 struct ib_sa_hdr sa_hdr; 229 229 u8 data[IB_MGMT_SA_DATA]; 230 - } __attribute__ ((packed)); 230 + } __packed; 231 231 232 232 struct ib_vendor_mad { 233 233 struct ib_mad_hdr mad_hdr;
+1 -1
include/rdma/ib_smi.h
··· 61 61 u8 data[IB_SMP_DATA_SIZE]; 62 62 u8 initial_path[IB_SMP_MAX_PATH_HOPS]; 63 63 u8 return_path[IB_SMP_MAX_PATH_HOPS]; 64 - } __attribute__ ((packed)); 64 + } __packed; 65 65 66 66 #define IB_SMP_DIRECTION cpu_to_be16(0x8000) 67 67
+1 -1
include/rdma/opa_port_info.h
··· 413 413 u8 local_port_num; 414 414 u8 reserved12; 415 415 u8 reserved13; /* was guid_cap */ 416 - } __attribute__ ((packed)); 416 + } __packed; 417 417 418 418 #endif /* OPA_PORT_INFO_H */
+2 -2
include/rdma/opa_smi.h
··· 98 98 99 99 struct opa_node_description { 100 100 u8 data[64]; 101 - } __attribute__ ((packed)); 101 + } __packed; 102 102 103 103 struct opa_node_info { 104 104 u8 base_version; ··· 114 114 __be32 revision; 115 115 u8 local_port_num; 116 116 u8 vendor_id[3]; /* network byte order */ 117 - } __attribute__ ((packed)); 117 + } __packed; 118 118 119 119 #define OPA_PARTITION_TABLE_BLK_SIZE 32 120 120