Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/core: Annotate timeout as unsigned long

The ucma users supply timeout in u32 format, it means that any number
with most significant bit set will be converted to negative value
by various rdma_*, cma_* and sa_query functions, which treat timeout
as int.

In the lowest level, the timeout is converted back to be unsigned long.
Remove this ambiguous conversion by updating all function signatures to
receive unsigned long.

Reported-by: Noa Osherovich <noaos@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Leon Romanovsky and committed by
Doug Ledford
dbace111 9549c2bd

+26 -23
+1 -1
drivers/infiniband/core/addr.c
··· 659 659 } 660 660 661 661 int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, 662 - struct rdma_dev_addr *addr, int timeout_ms, 662 + struct rdma_dev_addr *addr, unsigned long timeout_ms, 663 663 void (*callback)(int status, struct sockaddr *src_addr, 664 664 struct rdma_dev_addr *addr, void *context), 665 665 bool resolve_by_gid_attr, void *context)
+6 -5
drivers/infiniband/core/cma.c
··· 2510 2510 queue_work(cma_wq, &work->work); 2511 2511 } 2512 2512 2513 - static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, 2514 - struct cma_work *work) 2513 + static int cma_query_ib_route(struct rdma_id_private *id_priv, 2514 + unsigned long timeout_ms, struct cma_work *work) 2515 2515 { 2516 2516 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2517 2517 struct sa_path_rec path_rec; ··· 2629 2629 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2630 2630 } 2631 2631 2632 - static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) 2632 + static int cma_resolve_ib_route(struct rdma_id_private *id_priv, 2633 + unsigned long timeout_ms) 2633 2634 { 2634 2635 struct rdma_route *route = &id_priv->id.route; 2635 2636 struct cma_work *work; ··· 2853 2852 return ret; 2854 2853 } 2855 2854 2856 - int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 2855 + int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) 2857 2856 { 2858 2857 struct rdma_id_private *id_priv; 2859 2858 int ret; ··· 3073 3072 } 3074 3073 3075 3074 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 3076 - const struct sockaddr *dst_addr, int timeout_ms) 3075 + const struct sockaddr *dst_addr, unsigned long timeout_ms) 3077 3076 { 3078 3077 struct rdma_id_private *id_priv; 3079 3078 int ret;
+1 -1
drivers/infiniband/core/mad.c
··· 2414 2414 } 2415 2415 2416 2416 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2417 - int timeout_ms) 2417 + unsigned long timeout_ms) 2418 2418 { 2419 2419 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2420 2420 wait_for_response(mad_send_wr);
+1 -1
drivers/infiniband/core/mad_priv.h
··· 221 221 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr); 222 222 223 223 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 224 - int timeout_ms); 224 + unsigned long timeout_ms); 225 225 226 226 #endif /* __IB_MAD_PRIV_H__ */
+2 -2
drivers/infiniband/core/sa.h
··· 51 51 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 52 52 struct ib_device *device, u8 port_num, u8 method, 53 53 struct ib_sa_mcmember_rec *rec, 54 - ib_sa_comp_mask comp_mask, int timeout_ms, 55 - gfp_t gfp_mask, 54 + ib_sa_comp_mask comp_mask, 55 + unsigned long timeout_ms, gfp_t gfp_mask, 56 56 void (*callback)(int status, 57 57 struct ib_sa_mcmember_rec *resp, 58 58 void *context),
+7 -6
drivers/infiniband/core/sa_query.c
··· 1360 1360 spin_unlock_irqrestore(&tid_lock, flags); 1361 1361 } 1362 1362 1363 - static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1363 + static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms, 1364 + gfp_t gfp_mask) 1364 1365 { 1365 1366 bool preload = gfpflags_allow_blocking(gfp_mask); 1366 1367 unsigned long flags; ··· 1551 1550 struct ib_device *device, u8 port_num, 1552 1551 struct sa_path_rec *rec, 1553 1552 ib_sa_comp_mask comp_mask, 1554 - int timeout_ms, gfp_t gfp_mask, 1553 + unsigned long timeout_ms, gfp_t gfp_mask, 1555 1554 void (*callback)(int status, 1556 1555 struct sa_path_rec *resp, 1557 1556 void *context), ··· 1705 1704 struct ib_device *device, u8 port_num, u8 method, 1706 1705 struct ib_sa_service_rec *rec, 1707 1706 ib_sa_comp_mask comp_mask, 1708 - int timeout_ms, gfp_t gfp_mask, 1707 + unsigned long timeout_ms, gfp_t gfp_mask, 1709 1708 void (*callback)(int status, 1710 1709 struct ib_sa_service_rec *resp, 1711 1710 void *context), ··· 1802 1801 u8 method, 1803 1802 struct ib_sa_mcmember_rec *rec, 1804 1803 ib_sa_comp_mask comp_mask, 1805 - int timeout_ms, gfp_t gfp_mask, 1804 + unsigned long timeout_ms, gfp_t gfp_mask, 1806 1805 void (*callback)(int status, 1807 1806 struct ib_sa_mcmember_rec *resp, 1808 1807 void *context), ··· 1893 1892 struct ib_device *device, u8 port_num, 1894 1893 struct ib_sa_guidinfo_rec *rec, 1895 1894 ib_sa_comp_mask comp_mask, u8 method, 1896 - int timeout_ms, gfp_t gfp_mask, 1895 + unsigned long timeout_ms, gfp_t gfp_mask, 1897 1896 void (*callback)(int status, 1898 1897 struct ib_sa_guidinfo_rec *resp, 1899 1898 void *context), ··· 2060 2059 } 2061 2060 2062 2061 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, 2063 - int timeout_ms, 2062 + unsigned long timeout_ms, 2064 2063 void (*callback)(void *context), 2065 2064 void *context, 2066 2065 struct ib_sa_query **sa_query)
+1 -1
include/rdma/ib_addr.h
··· 99 99 * @context: User-specified context associated with the call. 100 100 */ 101 101 int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, 102 - struct rdma_dev_addr *addr, int timeout_ms, 102 + struct rdma_dev_addr *addr, unsigned long timeout_ms, 103 103 void (*callback)(int status, struct sockaddr *src_addr, 104 104 struct rdma_dev_addr *addr, void *context), 105 105 bool resolve_by_gid_attr, void *context);
+1 -1
include/rdma/ib_cm.h
··· 583 583 struct sa_path_rec *path; 584 584 const struct ib_gid_attr *sgid_attr; 585 585 __be64 service_id; 586 - int timeout_ms; 586 + unsigned long timeout_ms; 587 587 const void *private_data; 588 588 u8 private_data_len; 589 589 u8 max_cm_retries;
+3 -3
include/rdma/ib_sa.h
··· 451 451 452 452 int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, 453 453 u8 port_num, struct sa_path_rec *rec, 454 - ib_sa_comp_mask comp_mask, int timeout_ms, 454 + ib_sa_comp_mask comp_mask, unsigned long timeout_ms, 455 455 gfp_t gfp_mask, 456 456 void (*callback)(int status, struct sa_path_rec *resp, 457 457 void *context), ··· 460 460 int ib_sa_service_rec_query(struct ib_sa_client *client, 461 461 struct ib_device *device, u8 port_num, u8 method, 462 462 struct ib_sa_service_rec *rec, 463 - ib_sa_comp_mask comp_mask, int timeout_ms, 463 + ib_sa_comp_mask comp_mask, unsigned long timeout_ms, 464 464 gfp_t gfp_mask, 465 465 void (*callback)(int status, 466 466 struct ib_sa_service_rec *resp, ··· 568 568 struct ib_device *device, u8 port_num, 569 569 struct ib_sa_guidinfo_rec *rec, 570 570 ib_sa_comp_mask comp_mask, u8 method, 571 - int timeout_ms, gfp_t gfp_mask, 571 + unsigned long timeout_ms, gfp_t gfp_mask, 572 572 void (*callback)(int status, 573 573 struct ib_sa_guidinfo_rec *resp, 574 574 void *context),
+3 -2
include/rdma/rdma_cm.h
··· 196 196 * @timeout_ms: Time to wait for resolution to complete. 197 197 */ 198 198 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 199 - const struct sockaddr *dst_addr, int timeout_ms); 199 + const struct sockaddr *dst_addr, 200 + unsigned long timeout_ms); 200 201 201 202 /** 202 203 * rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier ··· 207 206 * Users must have first called rdma_resolve_addr to resolve a dst_addr 208 207 * into an RDMA address before calling this routine. 209 208 */ 210 - int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms); 209 + int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms); 211 210 212 211 /** 213 212 * rdma_create_qp - Allocate a QP and associate it with the specified RDMA