Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/ehca: Fix warnings issued by checkpatch.pl

Run the existing ehca code through checkpatch.pl and clean up the
worst of the coding style violations.

Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Hoang-Nam Nguyen and committed by
Roland Dreier
2b94397a 187c72e3

+259 -240
+1 -1
drivers/infiniband/hw/ehca/ehca_av.c
··· 79 79 av->av.ipd = (ah_mult > 0) ? 80 80 ((ehca_mult - 1) / ah_mult) : 0; 81 81 } else 82 - av->av.ipd = ehca_static_rate; 82 + av->av.ipd = ehca_static_rate; 83 83 84 84 av->av.lnh = ah_attr->ah_flags; 85 85 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
+2 -2
drivers/infiniband/hw/ehca/ehca_classes.h
··· 208 208 u32 num_hwpages; /* number of hw pages to form MR */ 209 209 int acl; /* ACL (stored here for usage in reregister) */ 210 210 u64 *start; /* virtual start address (stored here for */ 211 - /* usage in reregister) */ 211 + /* usage in reregister) */ 212 212 u64 size; /* size (stored here for usage in reregister) */ 213 213 u32 fmr_page_size; /* page size for FMR */ 214 214 u32 fmr_max_pages; /* max pages for FMR */ ··· 391 391 392 392 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); 393 393 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); 394 - struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); 394 + struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); 395 395 396 396 #endif
+78 -78
drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
··· 154 154 u32 reserved_70_127[58]; /* 70 */ 155 155 }; 156 156 157 - #define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0) 158 - #define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2) 159 - #define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3) 160 - #define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4) 161 - #define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31) 162 - #define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5) 163 - #define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6) 164 - #define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31) 165 - #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7) 166 - #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8) 167 - #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9) 168 - #define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31) 169 - #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11) 170 - #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12) 171 - #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13) 172 - #define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14) 173 - #define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15) 174 - #define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16) 175 - #define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17) 176 - #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18) 177 - #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19) 178 - #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20) 179 - #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31) 180 - #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21) 181 - #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31) 182 - #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22) 183 - #define MQPCB_DLID EHCA_BMASK_IBM(16,31) 184 - #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23) 185 - #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31) 186 - #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24) 187 - #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31) 188 - #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25) 189 - #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31) 190 - #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26) 191 - #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31) 192 - #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27) 193 - #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31) 194 - #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28) 195 - #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31) 196 - #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30) 197 - #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31) 198 - #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31) 199 - #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32) 200 - #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31) 201 - #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33) 202 - #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) 203 - #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34) 204 - #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31) 205 - #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35) 206 - #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31) 207 - #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36) 208 - #define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31) 209 - #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37) 210 - #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) 211 - #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38) 212 - #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31) 213 - #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39) 214 - #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31) 215 - #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40) 216 - #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31) 217 - #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41) 218 - #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31) 219 - #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42) 220 - #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31) 221 - #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44) 222 - #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45) 223 - #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31) 224 - #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46) 225 - #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31) 226 - #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47) 227 - #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31) 228 - #define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31) 229 - #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48) 230 - #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31) 231 - #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49,49) 232 - #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16,31) 233 - #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50) 234 - #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51) 157 + #define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0) 158 + #define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2) 159 + #define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3) 160 + #define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4) 161 + #define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31) 162 + #define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5) 163 + #define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6) 164 + #define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31) 165 + #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) 166 + #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) 167 + #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) 168 + #define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) 169 + #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) 170 + #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) 171 + #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) 172 + #define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14) 173 + #define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15) 174 + #define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16) 175 + #define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17) 176 + #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) 177 + #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) 178 + #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) 179 + #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) 180 + #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) 181 + #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) 182 + #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) 183 + #define MQPCB_DLID EHCA_BMASK_IBM(16, 31) 184 + #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) 185 + #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) 186 + #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) 187 + #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) 188 + #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) 189 + #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) 190 + #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) 191 + #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) 192 + #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) 193 + #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) 194 + #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) 195 + #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) 196 + #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) 197 + #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) 198 + #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) 199 + #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) 200 + #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) 201 + #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) 202 + #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) 203 + #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) 204 + #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) 205 + #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) 206 + #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) 207 + #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) 208 + #define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) 209 + #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) 210 + #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) 211 + #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) 212 + #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) 213 + #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) 214 + #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) 215 + #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) 216 + #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) 217 + #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) 218 + #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) 219 + #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) 220 + #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) 221 + #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) 222 + #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) 223 + #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) 224 + #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) 225 + #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) 226 + #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) 227 + #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) 228 + #define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) 229 + #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) 230 + #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) 231 + #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) 232 + #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) 233 + #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) 234 + #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) 235 235 236 236 #endif /* __EHCA_CLASSES_PSERIES_H__ */
+1 -1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 97 97 return ret; 98 98 } 99 99 100 - struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) 100 + struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) 101 101 { 102 102 struct ehca_qp *ret = NULL; 103 103 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+2 -1
drivers/infiniband/hw/ehca/ehca_eq.c
··· 96 96 for (i = 0; i < nr_pages; i++) { 97 97 u64 rpage; 98 98 99 - if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) { 99 + vpage = ipz_qpageit_get_inc(&eq->ipz_queue); 100 + if (!vpage) { 100 101 ret = H_RESOURCE; 101 102 goto create_eq_exit2; 102 103 }
+20 -8
drivers/infiniband/hw/ehca/ehca_hca.c
··· 127 127 u8 port, struct ib_port_attr *props) 128 128 { 129 129 int ret = 0; 130 + u64 h_ret; 130 131 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, 131 132 ib_device); 132 133 struct hipz_query_port *rblock; ··· 138 137 return -ENOMEM; 139 138 } 140 139 141 - if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { 140 + h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); 141 + if (h_ret != H_SUCCESS) { 142 142 ehca_err(&shca->ib_device, "Can't query port properties"); 143 143 ret = -EINVAL; 144 144 goto query_port1; ··· 199 197 u8 port, struct ehca_sma_attr *attr) 200 198 { 201 199 int ret = 0; 200 + u64 h_ret; 202 201 struct hipz_query_port *rblock; 203 202 204 203 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); ··· 208 205 return -ENOMEM; 209 206 } 210 207 211 - if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { 208 + h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); 209 + if (h_ret != H_SUCCESS) { 212 210 ehca_err(&shca->ib_device, "Can't query port properties"); 213 211 ret = -EINVAL; 214 212 goto query_sma_attr1; ··· 234 230 int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 235 231 { 236 232 int ret = 0; 237 - struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); 233 + u64 h_ret; 234 + struct ehca_shca *shca; 238 235 struct hipz_query_port *rblock; 239 236 237 + shca = container_of(ibdev, struct ehca_shca, ib_device); 240 238 if (index > 16) { 241 239 ehca_err(&shca->ib_device, "Invalid index: %x.", index); 242 240 return -EINVAL; ··· 250 244 return -ENOMEM; 251 245 } 252 246 253 - if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { 247 + h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); 248 + if (h_ret != H_SUCCESS) { 254 249 ehca_err(&shca->ib_device, "Can't query port properties"); 255 250 ret = -EINVAL; 256 251 goto query_pkey1; ··· 269 262 int index, union ib_gid *gid) 270 263 { 271 264 int ret = 0; 265 + u64 h_ret; 272 266 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, 273 267 ib_device); 274 268 struct hipz_query_port *rblock; ··· 285 277 return -ENOMEM; 286 278 } 287 279 288 - if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { 280 + h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); 281 + if (h_ret != H_SUCCESS) { 289 282 ehca_err(&shca->ib_device, "Can't query port properties"); 290 283 ret = -EINVAL; 291 284 goto query_gid1; ··· 311 302 struct ib_port_modify *props) 312 303 { 313 304 int ret = 0; 314 - struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); 305 + struct ehca_shca *shca; 315 306 struct hipz_query_port *rblock; 316 307 u32 cap; 317 308 u64 hret; 318 309 310 + shca = container_of(ibdev, struct ehca_shca, ib_device); 319 311 if ((props->set_port_cap_mask | props->clr_port_cap_mask) 320 312 & ~allowed_port_caps) { 321 313 ehca_err(&shca->ib_device, "Non-changeable bits set in masks " ··· 335 325 goto modify_port1; 336 326 } 337 327 338 - if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { 328 + hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock); 329 + if (hret != H_SUCCESS) { 339 330 ehca_err(&shca->ib_device, "Can't query port properties"); 340 331 ret = -EINVAL; 341 332 goto modify_port2; ··· 348 337 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 349 338 cap, props->init_type, port_modify_mask); 350 339 if (hret != H_SUCCESS) { 351 - ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret); 340 + ehca_err(&shca->ib_device, "Modify port failed hret=%lx", 341 + hret); 352 342 ret = -EINVAL; 353 343 } 354 344
+26 -26
drivers/infiniband/hw/ehca/ehca_irq.c
··· 49 49 #include "hipz_fns.h" 50 50 #include "ipz_pt_fn.h" 51 51 52 - #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 53 - #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) 54 - #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7) 55 - #define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31) 56 - #define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31) 57 - #define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63) 58 - #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63) 52 + #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1) 53 + #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31) 54 + #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7) 55 + #define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31) 56 + #define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31) 57 + #define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63) 58 + #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63) 59 59 60 - #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 61 - #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) 62 - #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) 63 - #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) 64 - #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16,16) 60 + #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1) 61 + #define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7) 62 + #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15) 63 + #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16) 64 + #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16) 65 65 66 - #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) 67 - #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) 66 + #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63) 67 + #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7) 68 68 69 69 static void queue_comp_task(struct ehca_cq *__cq); 70 70 71 - static struct ehca_comp_pool* pool; 71 + static struct ehca_comp_pool *pool; 72 72 #ifdef CONFIG_HOTPLUG_CPU 73 73 static struct notifier_block comp_pool_callback_nb; 74 74 #endif ··· 85 85 return; 86 86 } 87 87 88 - static void print_error_data(struct ehca_shca * shca, void* data, 89 - u64* rblock, int length) 88 + static void print_error_data(struct ehca_shca *shca, void *data, 89 + u64 *rblock, int length) 90 90 { 91 91 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); 92 92 u64 resource = rblock[1]; ··· 94 94 switch (type) { 95 95 case 0x1: /* Queue Pair */ 96 96 { 97 - struct ehca_qp *qp = (struct ehca_qp*)data; 97 + struct ehca_qp *qp = (struct ehca_qp *)data; 98 98 99 99 /* only print error data if AER is set */ 100 100 if (rblock[6] == 0) ··· 107 107 } 108 108 case 0x4: /* Completion Queue */ 109 109 { 110 - struct ehca_cq *cq = (struct ehca_cq*)data; 110 + struct ehca_cq *cq = (struct ehca_cq *)data; 111 111 112 112 ehca_err(&shca->ib_device, 113 113 "CQ 0x%x (resource=%lx) has errors.", ··· 572 572 ehca_process_eq((struct ehca_shca*)data, 1); 573 573 } 574 574 575 - static inline int find_next_online_cpu(struct ehca_comp_pool* pool) 575 + static inline int find_next_online_cpu(struct ehca_comp_pool *pool) 576 576 { 577 577 int cpu; 578 578 unsigned long flags; ··· 636 636 __queue_comp_task(__cq, cct); 637 637 } 638 638 639 - static void run_comp_task(struct ehca_cpu_comp_task* cct) 639 + static void run_comp_task(struct ehca_cpu_comp_task *cct) 640 640 { 641 641 struct ehca_cq *cq; 642 642 unsigned long flags; ··· 666 666 667 667 static int comp_task(void *__cct) 668 668 { 669 - struct ehca_cpu_comp_task* cct = __cct; 669 + struct ehca_cpu_comp_task *cct = __cct; 670 670 int cql_empty; 671 671 DECLARE_WAITQUEUE(wait, current); 672 672 673 673 set_current_state(TASK_INTERRUPTIBLE); 674 - while(!kthread_should_stop()) { 674 + while (!kthread_should_stop()) { 675 675 add_wait_queue(&cct->wait_queue, &wait); 676 676 677 677 spin_lock_irq(&cct->task_lock); ··· 745 745 746 746 list_splice_init(&cct->cq_list, &list); 747 747 748 - while(!list_empty(&list)) { 748 + while (!list_empty(&list)) { 749 749 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 750 750 751 751 list_del(&cq->entry); ··· 768 768 case CPU_UP_PREPARE: 769 769 case CPU_UP_PREPARE_FROZEN: 770 770 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 771 - if(!create_comp_task(pool, cpu)) { 771 + if (!create_comp_task(pool, cpu)) { 772 772 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 773 773 return NOTIFY_BAD; 774 774 } ··· 838 838 839 839 #ifdef CONFIG_HOTPLUG_CPU 840 840 comp_pool_callback_nb.notifier_call = comp_pool_callback; 841 - comp_pool_callback_nb.priority =0; 841 + comp_pool_callback_nb.priority = 0; 842 842 register_cpu_notifier(&comp_pool_callback_nb); 843 843 #endif 844 844
+4 -3
drivers/infiniband/hw/ehca/ehca_iverbs.h
··· 81 81 int num_phys_buf, 82 82 int mr_access_flags, u64 *iova_start); 83 83 84 - struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, 85 - int mr_access_flags, struct ib_udata *udata); 84 + struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 85 + u64 virt, int mr_access_flags, 86 + struct ib_udata *udata); 86 87 87 88 int ehca_rereg_phys_mr(struct ib_mr *mr, 88 89 int mr_rereg_mask, ··· 193 192 void *ehca_alloc_fw_ctrlblock(gfp_t flags); 194 193 void ehca_free_fw_ctrlblock(void *ptr); 195 194 #else 196 - #define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags)) 195 + #define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags)) 197 196 #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 198 197 #endif 199 198
+12 -9
drivers/infiniband/hw/ehca/ehca_main.c
··· 107 107 static struct timer_list poll_eqs_timer; 108 108 109 109 #ifdef CONFIG_PPC_64K_PAGES 110 - static struct kmem_cache *ctblk_cache = NULL; 110 + static struct kmem_cache *ctblk_cache; 111 111 112 112 void *ehca_alloc_fw_ctrlblock(gfp_t flags) 113 113 { ··· 200 200 #endif 201 201 } 202 202 203 - #define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) 204 - #define EHCA_REVID EHCA_BMASK_IBM(40,63) 203 + #define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39) 204 + #define EHCA_REVID EHCA_BMASK_IBM(40, 63) 205 205 206 206 static struct cap_descr { 207 207 u64 mask; ··· 295 295 if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap)) 296 296 ehca_gen_dbg(" %s", hca_cap_descr[i].descr); 297 297 298 - port = (struct hipz_query_port *) rblock; 298 + port = (struct hipz_query_port *)rblock; 299 299 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 300 300 if (h_ret != H_SUCCESS) { 301 301 ehca_gen_err("Cannot query port properties. h_ret=%lx", ··· 444 444 return -EPERM; 445 445 } 446 446 447 - ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0); 447 + ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); 448 448 if (IS_ERR(ibcq)) { 449 449 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); 450 450 return PTR_ERR(ibcq); ··· 671 671 } 672 672 673 673 /* create internal protection domain */ 674 - ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL); 674 + ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL); 675 675 if (IS_ERR(ibpd)) { 676 676 ehca_err(&shca->ib_device, "Cannot create internal PD."); 677 677 ret = PTR_ERR(ibpd); ··· 868 868 printk(KERN_INFO "eHCA Infiniband Device Driver " 869 869 "(Rel.: SVNEHCA_0023)\n"); 870 870 871 - if ((ret = ehca_create_comp_pool())) { 871 + ret = ehca_create_comp_pool(); 872 + if (ret) { 872 873 ehca_gen_err("Cannot create comp pool."); 873 874 return ret; 874 875 } 875 876 876 - if ((ret = ehca_create_slab_caches())) { 877 + ret = ehca_create_slab_caches(); 878 + if (ret) { 877 879 ehca_gen_err("Cannot create SLAB caches"); 878 880 ret = -ENOMEM; 879 881 goto module_init1; 880 882 } 881 883 882 - if ((ret = ibmebus_register_driver(&ehca_driver))) { 884 + ret = ibmebus_register_driver(&ehca_driver); 885 + if (ret) { 883 886 ehca_gen_err("Cannot register eHCA device driver"); 884 887 ret = -EINVAL; 885 888 goto module_init2;
+30 -29
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 61 61 struct ehca_mr *me; 62 62 63 63 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); 64 - if (me) { 64 + if (me) 65 65 spin_lock_init(&me->mrlock); 66 - } else 66 + else 67 67 ehca_gen_err("alloc failed"); 68 68 69 69 return me; ··· 79 79 struct ehca_mw *me; 80 80 81 81 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); 82 - if (me) { 82 + if (me) 83 83 spin_lock_init(&me->mwlock); 84 - } else 84 + else 85 85 ehca_gen_err("alloc failed"); 86 86 87 87 return me; ··· 111 111 goto get_dma_mr_exit0; 112 112 } 113 113 114 - ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE, 114 + ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, 115 115 mr_access_flags, e_pd, 116 116 &e_maxmr->ib.ib_mr.lkey, 117 117 &e_maxmr->ib.ib_mr.rkey); ··· 246 246 247 247 /*----------------------------------------------------------------------*/ 248 248 249 - struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, 250 - int mr_access_flags, struct ib_udata *udata) 249 + struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 250 + u64 virt, int mr_access_flags, 251 + struct ib_udata *udata) 251 252 { 252 253 struct ib_mr *ib_mr; 253 254 struct ehca_mr *e_mr; ··· 296 295 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, 297 296 mr_access_flags); 298 297 if (IS_ERR(e_mr->umem)) { 299 - ib_mr = (void *) e_mr->umem; 298 + ib_mr = (void *)e_mr->umem; 300 299 goto reg_user_mr_exit1; 301 300 } 302 301 ··· 323 322 (&e_mr->umem->chunk_list), 324 323 list); 325 324 326 - ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd, 327 - &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); 325 + ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, 326 + e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 327 + &e_mr->ib.ib_mr.rkey); 328 328 if (ret) { 329 329 ib_mr = ERR_PTR(ret); 330 330 goto reg_user_mr_exit2; ··· 422 420 goto rereg_phys_mr_exit0; 423 421 } 424 422 if (!phys_buf_array || num_phys_buf <= 0) { 425 - ehca_err(mr->device, "bad input values: mr_rereg_mask=%x" 423 + ehca_err(mr->device, "bad input values mr_rereg_mask=%x" 426 424 " phys_buf_array=%p num_phys_buf=%x", 427 425 mr_rereg_mask, phys_buf_array, num_phys_buf); 428 426 ret = -EINVAL; ··· 446 444 447 445 /* set requested values dependent on rereg request */ 448 446 spin_lock_irqsave(&e_mr->mrlock, sl_flags); 449 - new_start = e_mr->start; /* new == old address */ 450 - new_size = e_mr->size; /* new == old length */ 451 - new_acl = e_mr->acl; /* new == old access control */ 452 - new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/ 447 + new_start = e_mr->start; 448 + new_size = e_mr->size; 449 + new_acl = e_mr->acl; 450 + new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); 453 451 454 452 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 455 453 new_start = iova_start; /* change address */ ··· 519 517 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); 520 518 u32 cur_pid = current->tgid; 521 519 unsigned long sl_flags; 522 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 520 + struct ehca_mr_hipzout_parms hipzout; 523 521 524 522 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 525 523 (my_pd->ownpid != cur_pid)) { ··· 631 629 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); 632 630 struct ehca_shca *shca = 633 631 container_of(pd->device, struct ehca_shca, ib_device); 634 - struct ehca_mw_hipzout_parms hipzout = {{0},0}; 632 + struct ehca_mw_hipzout_parms hipzout; 635 633 636 634 e_mw = ehca_mw_new(); 637 635 if (!e_mw) { ··· 828 826 EHCA_PAGESIZE); 829 827 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; 830 828 831 - ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, 829 + ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, 832 830 list_len * e_fmr->fmr_page_size, 833 831 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); 834 832 if (ret) ··· 843 841 map_phys_fmr_exit0: 844 842 if (ret) 845 843 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x " 846 - "iova=%lx", 847 - ret, fmr, page_list, list_len, iova); 844 + "iova=%lx", ret, fmr, page_list, list_len, iova); 848 845 return ret; 849 846 } /* end ehca_map_phys_fmr() */ 850 847 ··· 961 960 int ret; 962 961 u64 h_ret; 963 962 u32 hipz_acl; 964 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 963 + struct ehca_mr_hipzout_parms hipzout; 965 964 966 965 ehca_mrmw_map_acl(acl, &hipz_acl); 967 966 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 968 967 if (ehca_use_hp_mr == 1) 969 - hipz_acl |= 0x00000001; 968 + hipz_acl |= 0x00000001; 970 969 971 970 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, 972 971 (u64)iova_start, size, hipz_acl, ··· 1128 1127 u64 *kpage; 1129 1128 u64 rpage; 1130 1129 struct ehca_mr_pginfo pginfo_save; 1131 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1130 + struct ehca_mr_hipzout_parms hipzout; 1132 1131 1133 1132 ehca_mrmw_map_acl(acl, &hipz_acl); 1134 1133 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); ··· 1168 1167 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr); 1169 1168 *pginfo = pginfo_save; 1170 1169 ret = -EAGAIN; 1171 - } else if ((u64*)hipzout.vaddr != iova_start) { 1170 + } else if ((u64 *)hipzout.vaddr != iova_start) { 1172 1171 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1173 1172 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " 1174 1173 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, ··· 1306 1305 struct ehca_mr save_fmr; 1307 1306 u32 tmp_lkey, tmp_rkey; 1308 1307 struct ehca_mr_pginfo pginfo; 1309 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1308 + struct ehca_mr_hipzout_parms hipzout; 1310 1309 struct ehca_mr save_mr; 1311 1310 1312 1311 if (e_fmr->fmr_max_pages <= MAX_RPAGES) { ··· 1398 1397 int ret = 0; 1399 1398 u64 h_ret; 1400 1399 u32 hipz_acl; 1401 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1400 + struct ehca_mr_hipzout_parms hipzout; 1402 1401 1403 1402 ehca_mrmw_map_acl(acl, &hipz_acl); 1404 1403 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); ··· 1463 1462 1464 1463 /* register internal max-MR on HCA */ 1465 1464 size_maxmr = (u64)high_memory - PAGE_OFFSET; 1466 - iova_start = (u64*)KERNELBASE; 1465 + iova_start = (u64 *)KERNELBASE; 1467 1466 ib_pbuf.addr = 0; 1468 1467 ib_pbuf.size = size_maxmr; 1469 1468 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, ··· 1520 1519 u64 h_ret; 1521 1520 struct ehca_mr *e_origmr = shca->maxmr; 1522 1521 u32 hipz_acl; 1523 - struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1522 + struct ehca_mr_hipzout_parms hipzout; 1524 1523 1525 1524 ehca_mrmw_map_acl(acl, &hipz_acl); 1526 1525 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); ··· 1866 1865 { 1867 1866 /* a MR is treated as max-MR only if it fits following: */ 1868 1867 if ((size == ((u64)high_memory - PAGE_OFFSET)) && 1869 - (iova_start == (void*)KERNELBASE)) { 1868 + (iova_start == (void *)KERNELBASE)) { 1870 1869 ehca_gen_dbg("this is a max-MR"); 1871 1870 return 1; 1872 1871 } else
+1 -6
drivers/infiniband/hw/ehca/ehca_mrmw.h
··· 101 101 u64 *page_list, 102 102 int list_len); 103 103 104 - int ehca_set_pagebuf(struct ehca_mr *e_mr, 105 - struct ehca_mr_pginfo *pginfo, 104 + int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo, 106 105 u32 number, 107 106 u64 *kpage); 108 - 109 - int ehca_set_pagebuf_1(struct ehca_mr *e_mr, 110 - struct ehca_mr_pginfo *pginfo, 111 - u64 *rpage); 112 107 113 108 int ehca_mr_is_maxmr(u64 size, 114 109 u64 *iova_start);
+11 -11
drivers/infiniband/hw/ehca/ehca_qes.h
··· 53 53 u32 length; 54 54 }; 55 55 56 - #define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7) 57 - #define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3) 58 - #define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12) 59 - #define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31) 60 - #define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47) 61 - #define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55) 62 - #define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63) 56 + #define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7) 57 + #define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3) 58 + #define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12) 59 + #define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31) 60 + #define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47) 61 + #define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55) 62 + #define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63) 63 63 64 64 /* 65 65 * Unreliable Datagram Address Vector Format ··· 206 206 207 207 }; 208 208 209 - #define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0) 210 - #define WC_IMM_DATA EHCA_BMASK_IBM(1,1) 211 - #define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2) 212 - #define WC_SE_BIT EHCA_BMASK_IBM(3,3) 209 + #define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0) 210 + #define WC_IMM_DATA EHCA_BMASK_IBM(1, 1) 211 + #define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2) 212 + #define WC_SE_BIT EHCA_BMASK_IBM(3, 3) 213 213 #define WC_STATUS_ERROR_BIT 0x80000000 214 214 #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 215 215 #define WC_STATUS_PURGE_BIT 0x10
+20 -19
drivers/infiniband/hw/ehca/ehca_qp.c
··· 602 602 /* UD circumvention */ 603 603 parms.act_nr_send_sges -= 2; 604 604 parms.act_nr_recv_sges -= 2; 605 - swqe_size = offsetof(struct ehca_wqe, 606 - u.ud_av.sg_list[parms.act_nr_send_sges]); 607 - rwqe_size = offsetof(struct ehca_wqe, 608 - u.ud_av.sg_list[parms.act_nr_recv_sges]); 605 + swqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[ 606 + parms.act_nr_send_sges]); 607 + rwqe_size = offsetof(struct ehca_wqe, u.ud_av.sg_list[ 608 + parms.act_nr_recv_sges]); 609 609 } 610 610 611 611 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) { ··· 690 690 if (my_qp->send_cq) { 691 691 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); 692 692 if (ret) { 693 - ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", 694 - ret); 693 + ehca_err(pd->device, 694 + "Couldn't assign qp to send_cq ret=%x", ret); 695 695 goto create_qp_exit4; 696 696 } 697 697 } ··· 749 749 struct ehca_qp *ret; 750 750 751 751 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0); 752 - return IS_ERR(ret) ? (struct ib_qp *) ret : &ret->ib_qp; 752 + return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp; 753 753 } 754 754 755 755 int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, ··· 780 780 781 781 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1); 782 782 if (IS_ERR(my_qp)) 783 - return (struct ib_srq *) my_qp; 783 + return (struct ib_srq *)my_qp; 784 784 785 785 /* copy back return values */ 786 786 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr; ··· 875 875 my_qp, qp_num, h_ret); 876 876 return ehca2ib_return_code(h_ret); 877 877 } 878 - bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63))); 878 + bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63))); 879 879 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", 880 880 qp_num, bad_send_wqe_p); 881 881 /* convert wqe pointer to vadr */ ··· 890 890 } 891 891 892 892 /* loop sets wqe's purge bit */ 893 - wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); 893 + wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 894 894 *bad_wqe_cnt = 0; 895 895 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 896 896 if (ehca_debug_level) ··· 898 898 wqe->nr_of_data_seg = 0; /* suppress data access */ 899 899 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ 900 900 q_ofs = ipz_queue_advance_offset(squeue, q_ofs); 901 - wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs); 901 + wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 902 902 *bad_wqe_cnt = (*bad_wqe_cnt)+1; 903 903 } 904 904 /* ··· 1003 1003 goto modify_qp_exit1; 1004 1004 } 1005 1005 1006 - ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x " 1006 + ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x " 1007 1007 "new qp_state=%x attribute_mask=%x", 1008 1008 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); 1009 1009 ··· 1019 1019 goto modify_qp_exit1; 1020 1020 } 1021 1021 1022 - if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state))) 1022 + mqpcb->qp_state = ib2ehca_qp_state(qp_new_state); 1023 + if (mqpcb->qp_state) 1023 1024 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); 1024 1025 else { 1025 1026 ret = -EINVAL; ··· 1078 1077 spin_lock_irqsave(&my_qp->spinlock_s, flags); 1079 1078 squeue_locked = 1; 1080 1079 /* mark next free wqe */ 1081 - wqe = (struct ehca_wqe*) 1080 + wqe = (struct ehca_wqe *) 1082 1081 ipz_qeit_get(&my_qp->ipz_squeue); 1083 1082 wqe->optype = wqe->wqef = 0xff; 1084 1083 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", ··· 1313 1312 if (h_ret != H_SUCCESS) { 1314 1313 ret = ehca2ib_return_code(h_ret); 1315 1314 ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx " 1316 - "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num); 1315 + "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1317 1316 goto modify_qp_exit2; 1318 1317 } 1319 1318 ··· 1412 1411 } 1413 1412 1414 1413 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { 1415 - ehca_err(qp->device,"Invalid attribute mask " 1414 + ehca_err(qp->device, "Invalid attribute mask " 1416 1415 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", 1417 1416 my_qp, qp->qp_num, qp_attr_mask); 1418 1417 return -EINVAL; ··· 1420 1419 1421 1420 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1422 1421 if (!qpcb) { 1423 - ehca_err(qp->device,"Out of memory for qpcb " 1422 + ehca_err(qp->device, "Out of memory for qpcb " 1424 1423 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1425 1424 return -ENOMEM; 1426 1425 } ··· 1432 1431 1433 1432 if (h_ret != H_SUCCESS) { 1434 1433 ret = ehca2ib_return_code(h_ret); 1435 - ehca_err(qp->device,"hipz_h_query_qp() failed " 1434 + ehca_err(qp->device, "hipz_h_query_qp() failed " 1436 1435 "ehca_qp=%p qp_num=%x h_ret=%lx", 1437 1436 my_qp, qp->qp_num, h_ret); 1438 1437 goto query_qp_exit1; ··· 1443 1442 1444 1443 if (qp_attr->cur_qp_state == -EINVAL) { 1445 1444 ret = -EINVAL; 1446 - ehca_err(qp->device,"Got invalid ehca_qp_state=%x " 1445 + ehca_err(qp->device, "Got invalid ehca_qp_state=%x " 1447 1446 "ehca_qp=%p qp_num=%x", 1448 1447 qpcb->qp_state, my_qp, qp->qp_num); 1449 1448 goto query_qp_exit1;
+9 -6
drivers/infiniband/hw/ehca/ehca_reqs.c
··· 79 79 } 80 80 81 81 if (ehca_debug_level) { 82 - ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue); 82 + ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 83 + ipz_rqueue); 83 84 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 84 85 } 85 86 ··· 100 99 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; 101 100 struct ib_sge *sge = send_wr->sg_list; 102 101 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " 103 - "send_flags=%x opcode=%x",idx, send_wr->wr_id, 102 + "send_flags=%x opcode=%x", idx, send_wr->wr_id, 104 103 send_wr->num_sge, send_wr->send_flags, 105 104 send_wr->opcode); 106 105 if (mad_hdr) { ··· 117 116 mad_hdr->attr_mod); 118 117 } 119 118 for (j = 0; j < send_wr->num_sge; j++) { 120 - u8 *data = (u8 *) abs_to_virt(sge->addr); 119 + u8 *data = (u8 *)abs_to_virt(sge->addr); 121 120 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " 122 121 "lkey=%x", 123 122 idx, j, data, sge->length, sge->lkey); ··· 535 534 536 535 cqe_count++; 537 536 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { 538 - struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); 537 + struct ehca_qp *qp; 539 538 int purgeflag; 540 539 unsigned long flags; 540 + 541 + qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); 541 542 if (!qp) { 542 543 ehca_err(cq->device, "cq_num=%x qp_num=%x " 543 544 "could not find qp -> ignore cqe", ··· 554 551 spin_unlock_irqrestore(&qp->spinlock_s, flags); 555 552 556 553 if (purgeflag) { 557 - ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " 558 - "src_qp=%x", 554 + ehca_dbg(cq->device, 555 + "Got CQE with purged bit qp_num=%x src_qp=%x", 559 556 cqe->local_qp_number, cqe->remote_qp_number); 560 557 if (ehca_debug_level) 561 558 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
+14 -14
drivers/infiniband/hw/ehca/ehca_tools.h
··· 93 93 #define ehca_gen_dbg(format, arg...) \ 94 94 do { \ 95 95 if (unlikely(ehca_debug_level)) \ 96 - printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\ 96 + printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \ 97 97 get_paca()->paca_index, __FUNCTION__, ## arg); \ 98 98 } while (0) 99 99 100 100 #define ehca_gen_warn(format, arg...) \ 101 101 do { \ 102 102 if (unlikely(ehca_debug_level)) \ 103 - printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\ 103 + printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \ 104 104 get_paca()->paca_index, __FUNCTION__, ## arg); \ 105 105 } while (0) 106 106 ··· 114 114 * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex> 115 115 */ 116 116 #define ehca_dmp(adr, len, format, args...) \ 117 - do { \ 118 - unsigned int x; \ 117 + do { \ 118 + unsigned int x; \ 119 119 unsigned int l = (unsigned int)(len); \ 120 - unsigned char *deb = (unsigned char*)(adr); \ 120 + unsigned char *deb = (unsigned char *)(adr); \ 121 121 for (x = 0; x < l; x += 16) { \ 122 - printk("EHCA_DMP:%s " format \ 122 + printk(KERN_INFO "EHCA_DMP:%s " format \ 123 123 " adr=%p ofs=%04x %016lx %016lx\n", \ 124 124 __FUNCTION__, ##args, deb, x, \ 125 125 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ ··· 128 128 } while (0) 129 129 130 130 /* define a bitmask, little endian version */ 131 - #define EHCA_BMASK(pos,length) (((pos)<<16)+(length)) 131 + #define EHCA_BMASK(pos, length) (((pos) << 16) + (length)) 132 132 133 133 /* define a bitmask, the ibm way... */ 134 - #define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1)) 134 + #define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) 135 135 136 136 /* internal function, don't use */ 137 - #define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff) 137 + #define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff) 138 138 139 139 /* internal function, don't use */ 140 - #define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff)) 140 + #define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff)) 141 141 142 142 /** 143 143 * EHCA_BMASK_SET - return value shifted and masked by mask ··· 145 145 * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask 146 146 * in variable 147 147 */ 148 - #define EHCA_BMASK_SET(mask,value) \ 149 - ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask)) 148 + #define EHCA_BMASK_SET(mask, value) \ 149 + ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask)) 150 150 151 151 /** 152 152 * EHCA_BMASK_GET - extract a parameter from value by mask 153 153 */ 154 - #define EHCA_BMASK_GET(mask,value) \ 155 - (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask))) 154 + #define EHCA_BMASK_GET(mask, value) \ 155 + (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask))) 156 156 157 157 158 158 /* Converts ehca to ib return code */
+6 -4
drivers/infiniband/hw/ehca/ehca_uverbs.c
··· 70 70 71 71 static void ehca_mm_open(struct vm_area_struct *vma) 72 72 { 73 - u32 *count = (u32*)vma->vm_private_data; 73 + u32 *count = (u32 *)vma->vm_private_data; 74 74 if (!count) { 75 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", 76 76 vma->vm_start, vma->vm_end); ··· 86 86 87 87 static void ehca_mm_close(struct vm_area_struct *vma) 88 88 { 89 - u32 *count = (u32*)vma->vm_private_data; 89 + u32 *count = (u32 *)vma->vm_private_data; 90 90 if (!count) { 91 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", 92 92 vma->vm_start, vma->vm_end); ··· 215 215 case 2: /* qp rqueue_addr */ 216 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", 217 217 qp->ib_qp.qp_num); 218 - ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue); 218 + ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, 219 + &qp->mm_count_rqueue); 219 220 if (unlikely(ret)) { 220 221 ehca_err(qp->ib_qp.device, 221 222 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x", ··· 228 227 case 3: /* qp squeue_addr */ 229 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", 230 229 qp->ib_qp.qp_num); 231 - ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue); 230 + ret = ehca_mmap_queue(vma, &qp->ipz_squeue, 231 + &qp->mm_count_squeue); 232 232 if (unlikely(ret)) { 233 233 ehca_err(qp->ib_qp.device, 234 234 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
+4 -4
drivers/infiniband/hw/ehca/hcp_if.c
··· 501 501 return H_PARAMETER; 502 502 } 503 503 504 - return hipz_h_register_rpage(adapter_handle,pagesize,queue_type, 505 - qp_handle.handle,logical_address_of_page, 504 + return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, 505 + qp_handle.handle, logical_address_of_page, 506 506 count); 507 507 } 508 508 ··· 522 522 qp_handle.handle, /* r6 */ 523 523 0, 0, 0, 0, 0, 0); 524 524 if (log_addr_next_sq_wqe2processed) 525 - *log_addr_next_sq_wqe2processed = (void*)outs[0]; 525 + *log_addr_next_sq_wqe2processed = (void *)outs[0]; 526 526 if (log_addr_next_rq_wqe2processed) 527 - *log_addr_next_rq_wqe2processed = (void*)outs[1]; 527 + *log_addr_next_rq_wqe2processed = (void *)outs[1]; 528 528 529 529 return ret; 530 530 }
+1 -1
drivers/infiniband/hw/ehca/hcp_phyp.c
··· 50 50 51 51 int hcall_unmap_page(u64 mapaddr) 52 52 { 53 - iounmap((volatile void __iomem*)mapaddr); 53 + iounmap((volatile void __iomem *) mapaddr); 54 54 return 0; 55 55 } 56 56
+2 -2
drivers/infiniband/hw/ehca/hipz_fns_core.h
··· 53 53 #define hipz_galpa_load_cq(gal, offset) \ 54 54 hipz_galpa_load(gal, CQTEMM_OFFSET(offset)) 55 55 56 - #define hipz_galpa_store_qp(gal,offset, value) \ 56 + #define hipz_galpa_store_qp(gal, offset, value) \ 57 57 hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value) 58 58 #define hipz_galpa_load_qp(gal, offset) \ 59 - hipz_galpa_load(gal,QPTEMM_OFFSET(offset)) 59 + hipz_galpa_load(gal, QPTEMM_OFFSET(offset)) 60 60 61 61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) 62 62 {
+12 -12
drivers/infiniband/hw/ehca/hipz_hw.h
··· 161 161 /* 0x1000 */ 162 162 }; 163 163 164 - #define QPX_SQADDER EHCA_BMASK_IBM(48,63) 165 - #define QPX_RQADDER EHCA_BMASK_IBM(48,63) 166 - #define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3,3) 164 + #define QPX_SQADDER EHCA_BMASK_IBM(48, 63) 165 + #define QPX_RQADDER EHCA_BMASK_IBM(48, 63) 166 + #define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3) 167 167 168 - #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x) 168 + #define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x) 169 169 170 170 /* MRMWPT Entry Memory Map */ 171 171 struct hipz_mrmwmm { ··· 187 187 188 188 }; 189 189 190 - #define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x) 190 + #define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x) 191 191 192 192 struct hipz_qpedmm { 193 193 /* 0x00 */ ··· 238 238 u64 qpedx_rrva3; 239 239 }; 240 240 241 - #define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x) 241 + #define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x) 242 242 243 243 /* CQ Table Entry Memory Map */ 244 244 struct hipz_cqtemm { ··· 263 263 /* 0x1000 */ 264 264 }; 265 265 266 - #define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63) 267 - #define CQX_FECADDER EHCA_BMASK_IBM(32,63) 268 - #define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0) 269 - #define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0) 266 + #define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63) 267 + #define CQX_FECADDER EHCA_BMASK_IBM(32, 63) 268 + #define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0) 269 + #define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0) 270 270 271 - #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x) 271 + #define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x) 272 272 273 273 /* EQ Table Entry Memory Map */ 274 274 struct hipz_eqtemm { ··· 293 293 294 294 }; 295 295 296 - #define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x) 296 + #define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x) 297 297 298 298 /* access control defines for MR/MW */ 299 299 #define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
+1 -1
drivers/infiniband/hw/ehca/ipz_pt_fn.c
··· 114 114 */ 115 115 f = 0; 116 116 while (f < nr_of_pages) { 117 - u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); 117 + u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL); 118 118 int k; 119 119 if (!kpage) 120 120 goto ipz_queue_ctor_exit0; /*NOMEM*/
+2 -2
drivers/infiniband/hw/ehca/ipz_pt_fn.h
··· 240 240 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) 241 241 { 242 242 void *ret = ipz_qeit_get(queue); 243 - u32 qe = *(u8 *) ret; 243 + u32 qe = *(u8 *)ret; 244 244 if ((qe >> 7) != (queue->toggle_state & 1)) 245 245 return NULL; 246 246 ipz_qeit_eq_get_inc(queue); /* this is a good one */ ··· 250 250 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) 251 251 { 252 252 void *ret = ipz_qeit_get(queue); 253 - u32 qe = *(u8 *) ret; 253 + u32 qe = *(u8 *)ret; 254 254 if ((qe >> 7) != (queue->toggle_state & 1)) 255 255 return NULL; 256 256 return ret;