Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git

+2732 -1424
-2
drivers/infiniband/core/Makefile
··· 1 - EXTRA_CFLAGS += -Idrivers/infiniband/include 2 - 3 1 obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4 2 ib_cm.o ib_umad.o ib_ucm.o 5 3 obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
+7 -6
drivers/infiniband/core/agent.c
··· 1 1 /* 2 - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 - * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 - * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 2 + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. 3 + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 4 + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 5 + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 6 + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 7 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 8 * 8 9 * This software is available to you under a choice of one of two 9 10 * licenses. You may choose to be licensed under the terms of the GNU ··· 41 40 42 41 #include <asm/bug.h> 43 42 44 - #include <ib_smi.h> 43 + #include <rdma/ib_smi.h> 45 44 46 45 #include "smi.h" 47 46 #include "agent_priv.h"
+5 -5
drivers/infiniband/core/agent_priv.h
··· 1 1 /* 2 - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 - * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 - * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 2 + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. 3 + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 4 + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 5 + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 6 + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 7 7 * 8 8 * This software is available to you under a choice of one of two 9 9 * licenses. You may choose to be licensed under the terms of the GNU
+4 -2
drivers/infiniband/core/cache.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 6 * 4 7 * This software is available to you under a choice of one of two 5 8 * licenses. You may choose to be licensed under the terms of the GNU ··· 35 32 * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $ 36 33 */ 37 34 38 - #include <linux/version.h> 39 35 #include <linux/module.h> 40 36 #include <linux/errno.h> 41 37 #include <linux/slab.h> 42 38 43 - #include <ib_cache.h> 39 + #include <rdma/ib_cache.h> 44 40 45 41 #include "core_priv.h" 46 42
+64 -61
drivers/infiniband/core/cm.c
··· 43 43 #include <linux/spinlock.h> 44 44 #include <linux/workqueue.h> 45 45 46 - #include <ib_cache.h> 47 - #include <ib_cm.h> 46 + #include <rdma/ib_cache.h> 47 + #include <rdma/ib_cm.h> 48 48 #include "cm_msgs.h" 49 49 50 50 MODULE_AUTHOR("Sean Hefty"); ··· 83 83 struct cm_device { 84 84 struct list_head list; 85 85 struct ib_device *device; 86 - u64 ca_guid; 86 + __be64 ca_guid; 87 87 struct cm_port port[0]; 88 88 }; 89 89 ··· 100 100 struct list_head list; 101 101 struct cm_port *port; 102 102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ 103 - u32 local_id; /* Established / timewait */ 104 - u32 remote_id; 103 + __be32 local_id; /* Established / timewait */ 104 + __be32 remote_id; 105 105 struct ib_cm_event cm_event; 106 106 struct ib_sa_path_rec path[0]; 107 107 }; ··· 110 110 struct cm_work work; /* Must be first. */ 111 111 struct rb_node remote_qp_node; 112 112 struct rb_node remote_id_node; 113 - u64 remote_ca_guid; 114 - u32 remote_qpn; 113 + __be64 remote_ca_guid; 114 + __be32 remote_qpn; 115 115 u8 inserted_remote_qp; 116 116 u8 inserted_remote_id; 117 117 }; ··· 132 132 struct cm_av alt_av; 133 133 134 134 void *private_data; 135 - u64 tid; 136 - u32 local_qpn; 137 - u32 remote_qpn; 138 - u32 sq_psn; 139 - u32 rq_psn; 135 + __be64 tid; 136 + __be32 local_qpn; 137 + __be32 remote_qpn; 138 + __be32 sq_psn; 139 + __be32 rq_psn; 140 140 int timeout_ms; 141 141 enum ib_mtu path_mtu; 142 142 u8 private_data_len; ··· 253 253 u16 dlid, u8 sl, u16 src_path_bits) 254 254 { 255 255 memset(ah_attr, 0, sizeof ah_attr); 256 - ah_attr->dlid = be16_to_cpu(dlid); 256 + ah_attr->dlid = dlid; 257 257 ah_attr->sl = sl; 258 258 ah_attr->src_path_bits = src_path_bits; 259 259 ah_attr->port_num = port_num; ··· 264 264 { 265 265 av->port = port; 266 266 av->pkey_index = wc->pkey_index; 267 - cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid), 267 + cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid, 268 268 wc->sl, wc->dlid_path_bits); 269 269 } 270 270 ··· 295 295 return ret; 296 296 297 297 av->port = port; 298 - cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid, 299 - path->sl, path->slid & 0x7F); 298 + cm_set_ah_attr(&av->ah_attr, av->port->port_num, 299 + be16_to_cpu(path->dlid), path->sl, 300 + be16_to_cpu(path->slid) & 0x7F); 300 301 av->packet_life_time = path->packet_life_time; 301 302 return 0; 302 303 } ··· 310 309 do { 311 310 spin_lock_irqsave(&cm.lock, flags); 312 311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1, 313 - (int *) &cm_id_priv->id.local_id); 312 + (__force int *) &cm_id_priv->id.local_id); 314 313 spin_unlock_irqrestore(&cm.lock, flags); 315 314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 316 315 return ret; 317 316 } 318 317 319 - static void cm_free_id(u32 local_id) 318 + static void cm_free_id(__be32 local_id) 320 319 { 321 320 unsigned long flags; 322 321 323 322 spin_lock_irqsave(&cm.lock, flags); 324 - idr_remove(&cm.local_id_table, (int) local_id); 323 + idr_remove(&cm.local_id_table, (__force int) local_id); 325 324 spin_unlock_irqrestore(&cm.lock, flags); 326 325 } 327 326 328 - static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id) 327 + static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) 329 328 { 330 329 struct cm_id_private *cm_id_priv; 331 330 332 - cm_id_priv = idr_find(&cm.local_id_table, (int) local_id); 331 + cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); 333 332 if (cm_id_priv) { 334 333 if (cm_id_priv->id.remote_id == remote_id) 335 334 atomic_inc(&cm_id_priv->refcount); ··· 340 339 return cm_id_priv; 341 340 } 342 341 343 - static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id) 342 + static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) 344 343 { 345 344 struct cm_id_private *cm_id_priv; 346 345 unsigned long flags; ··· 357 356 struct rb_node **link = &cm.listen_service_table.rb_node; 358 357 struct rb_node *parent = NULL; 359 358 struct cm_id_private *cur_cm_id_priv; 360 - u64 service_id = cm_id_priv->id.service_id; 361 - u64 service_mask = cm_id_priv->id.service_mask; 359 + __be64 service_id = cm_id_priv->id.service_id; 360 + __be64 service_mask = cm_id_priv->id.service_mask; 362 361 363 362 while (*link) { 364 363 parent = *link; ··· 377 376 return NULL; 378 377 } 379 378 380 - static struct cm_id_private * cm_find_listen(u64 service_id) 379 + static struct cm_id_private * cm_find_listen(__be64 service_id) 381 380 { 382 381 struct rb_node *node = cm.listen_service_table.rb_node; 383 382 struct cm_id_private *cm_id_priv; ··· 401 400 struct rb_node **link = &cm.remote_id_table.rb_node; 402 401 struct rb_node *parent = NULL; 403 402 struct cm_timewait_info *cur_timewait_info; 404 - u64 remote_ca_guid = timewait_info->remote_ca_guid; 405 - u32 remote_id = timewait_info->work.remote_id; 403 + __be64 remote_ca_guid = timewait_info->remote_ca_guid; 404 + __be32 remote_id = timewait_info->work.remote_id; 406 405 407 406 while (*link) { 408 407 parent = *link; ··· 425 424 return NULL; 426 425 } 427 426 428 - static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid, 429 - u32 remote_id) 427 + static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, 428 + __be32 remote_id) 430 429 { 431 430 struct rb_node *node = cm.remote_id_table.rb_node; 432 431 struct cm_timewait_info *timewait_info; ··· 454 453 struct rb_node **link = &cm.remote_qp_table.rb_node; 455 454 struct rb_node *parent = NULL; 456 455 struct cm_timewait_info *cur_timewait_info; 457 - u64 remote_ca_guid = timewait_info->remote_ca_guid; 458 - u32 remote_qpn = timewait_info->remote_qpn; 456 + __be64 remote_ca_guid = timewait_info->remote_ca_guid; 457 + __be32 remote_qpn = timewait_info->remote_qpn; 459 458 460 459 while (*link) { 461 460 parent = *link; ··· 485 484 struct rb_node *parent = NULL; 486 485 struct cm_id_private *cur_cm_id_priv; 487 486 union ib_gid *port_gid = &cm_id_priv->av.dgid; 488 - u32 remote_id = cm_id_priv->id.remote_id; 487 + __be32 remote_id = cm_id_priv->id.remote_id; 489 488 490 489 while (*link) { 491 490 parent = *link; ··· 599 598 spin_unlock_irqrestore(&cm.lock, flags); 600 599 } 601 600 602 - static struct cm_timewait_info * cm_create_timewait_info(u32 local_id) 601 + static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) 603 602 { 604 603 struct cm_timewait_info *timewait_info; 605 604 ··· 716 715 EXPORT_SYMBOL(ib_destroy_cm_id); 717 716 718 717 int ib_cm_listen(struct ib_cm_id *cm_id, 719 - u64 service_id, 720 - u64 service_mask) 718 + __be64 service_id, 719 + __be64 service_mask) 721 720 { 722 721 struct cm_id_private *cm_id_priv, *cur_cm_id_priv; 723 722 unsigned long flags; 724 723 int ret = 0; 725 724 726 - service_mask = service_mask ? service_mask : ~0ULL; 725 + service_mask = service_mask ? service_mask : 726 + __constant_cpu_to_be64(~0ULL); 727 727 service_id &= service_mask; 728 728 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && 729 729 (service_id != IB_CM_ASSIGN_SERVICE_ID)) ··· 737 735 738 736 spin_lock_irqsave(&cm.lock, flags); 739 737 if (service_id == IB_CM_ASSIGN_SERVICE_ID) { 740 - cm_id->service_id = __cpu_to_be64(cm.listen_service_id++); 741 - cm_id->service_mask = ~0ULL; 738 + cm_id->service_id = cpu_to_be64(cm.listen_service_id++); 739 + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 742 740 } else { 743 741 cm_id->service_id = service_id; 744 742 cm_id->service_mask = service_mask; ··· 754 752 } 755 753 EXPORT_SYMBOL(ib_cm_listen); 756 754 757 - static u64 cm_form_tid(struct cm_id_private *cm_id_priv, 758 - enum cm_msg_sequence msg_seq) 755 + static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, 756 + enum cm_msg_sequence msg_seq) 759 757 { 760 758 u64 hi_tid, low_tid; 761 759 762 760 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; 763 - low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30)); 761 + low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | 762 + (msg_seq << 30)); 764 763 return cpu_to_be64(hi_tid | low_tid); 765 764 } 766 765 767 766 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, 768 - enum cm_msg_attr_id attr_id, u64 tid) 767 + __be16 attr_id, __be64 tid) 769 768 { 770 769 hdr->base_version = IB_MGMT_BASE_VERSION; 771 770 hdr->mgmt_class = IB_MGMT_CLASS_CM; ··· 899 896 goto error1; 900 897 } 901 898 cm_id->service_id = param->service_id; 902 - cm_id->service_mask = ~0ULL; 899 + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 903 900 cm_id_priv->timeout_ms = cm_convert_to_ms( 904 901 param->primary_path->packet_life_time) * 2 + 905 902 cm_convert_to_ms( ··· 966 963 rej_msg->remote_comm_id = rcv_msg->local_comm_id; 967 964 rej_msg->local_comm_id = rcv_msg->remote_comm_id; 968 965 cm_rej_set_msg_rejected(rej_msg, msg_rejected); 969 - rej_msg->reason = reason; 966 + rej_msg->reason = cpu_to_be16(reason); 970 967 971 968 if (ari && ari_length) { 972 969 cm_rej_set_reject_info_len(rej_msg, ari_length); ··· 980 977 return ret; 981 978 } 982 979 983 - static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid, 984 - u32 local_qpn, u32 remote_qpn) 980 + static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, 981 + __be32 local_qpn, __be32 remote_qpn) 985 982 { 986 983 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || 987 984 ((local_ca_guid == remote_ca_guid) && ··· 1140 1137 break; 1141 1138 } 1142 1139 1143 - rej_msg->reason = reason; 1140 + rej_msg->reason = cpu_to_be16(reason); 1144 1141 if (ari && ari_length) { 1145 1142 cm_rej_set_reject_info_len(rej_msg, ari_length); 1146 1143 memcpy(rej_msg->ari, ari, ari_length); ··· 1279 1276 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1280 1277 cm_id_priv->id.context = listen_cm_id_priv->id.context; 1281 1278 cm_id_priv->id.service_id = req_msg->service_id; 1282 - cm_id_priv->id.service_mask = ~0ULL; 1279 + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 1283 1280 1284 1281 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1285 1282 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); ··· 1972 1969 param = &work->cm_event.param.rej_rcvd; 1973 1970 param->ari = rej_msg->ari; 1974 1971 param->ari_length = cm_rej_get_reject_info_len(rej_msg); 1975 - param->reason = rej_msg->reason; 1972 + param->reason = __be16_to_cpu(rej_msg->reason); 1976 1973 work->cm_event.private_data = &rej_msg->private_data; 1977 1974 } 1978 1975 ··· 1981 1978 struct cm_timewait_info *timewait_info; 1982 1979 struct cm_id_private *cm_id_priv; 1983 1980 unsigned long flags; 1984 - u32 remote_id; 1981 + __be32 remote_id; 1985 1982 1986 1983 remote_id = rej_msg->local_comm_id; 1987 1984 1988 - if (rej_msg->reason == IB_CM_REJ_TIMEOUT) { 1985 + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { 1989 1986 spin_lock_irqsave(&cm.lock, flags); 1990 - timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari), 1987 + timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), 1991 1988 remote_id); 1992 1989 if (!timewait_info) { 1993 1990 spin_unlock_irqrestore(&cm.lock, flags); 1994 1991 return NULL; 1995 1992 } 1996 1993 cm_id_priv = idr_find(&cm.local_id_table, 1997 - (int) timewait_info->work.local_id); 1994 + (__force int) timewait_info->work.local_id); 1998 1995 if (cm_id_priv) { 1999 1996 if (cm_id_priv->id.remote_id == remote_id) 2000 1997 atomic_inc(&cm_id_priv->refcount); ··· 2035 2032 /* fall through */ 2036 2033 case IB_CM_REQ_RCVD: 2037 2034 case IB_CM_MRA_REQ_SENT: 2038 - if (rej_msg->reason == IB_CM_REJ_STALE_CONN) 2035 + if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) 2039 2036 cm_enter_timewait(cm_id_priv); 2040 2037 else 2041 2038 cm_reset_to_idle(cm_id_priv); ··· 2556 2553 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, 2557 2554 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); 2558 2555 sidr_req_msg->request_id = cm_id_priv->id.local_id; 2559 - sidr_req_msg->pkey = param->pkey; 2556 + sidr_req_msg->pkey = cpu_to_be16(param->pkey); 2560 2557 sidr_req_msg->service_id = param->service_id; 2561 2558 2562 2559 if (param->private_data && param->private_data_len) ··· 2583 2580 goto out; 2584 2581 2585 2582 cm_id->service_id = param->service_id; 2586 - cm_id->service_mask = ~0ULL; 2583 + cm_id->service_mask = __constant_cpu_to_be64(~0ULL); 2587 2584 cm_id_priv->timeout_ms = param->timeout_ms; 2588 2585 cm_id_priv->max_cm_retries = param->max_cm_retries; 2589 2586 ret = cm_alloc_msg(cm_id_priv, &msg); ··· 2624 2621 sidr_req_msg = (struct cm_sidr_req_msg *) 2625 2622 work->mad_recv_wc->recv_buf.mad; 2626 2623 param = &work->cm_event.param.sidr_req_rcvd; 2627 - param->pkey = sidr_req_msg->pkey; 2624 + param->pkey = __be16_to_cpu(sidr_req_msg->pkey); 2628 2625 param->listen_id = listen_id; 2629 2626 param->device = work->port->mad_agent->device; 2630 2627 param->port = work->port->port_num; ··· 2648 2645 sidr_req_msg = (struct cm_sidr_req_msg *) 2649 2646 work->mad_recv_wc->recv_buf.mad; 2650 2647 wc = work->mad_recv_wc->wc; 2651 - cm_id_priv->av.dgid.global.subnet_prefix = wc->slid; 2648 + cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); 2652 2649 cm_id_priv->av.dgid.global.interface_id = 0; 2653 2650 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2654 2651 &cm_id_priv->av); ··· 2676 2673 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 2677 2674 cm_id_priv->id.context = cur_cm_id_priv->id.context; 2678 2675 cm_id_priv->id.service_id = sidr_req_msg->service_id; 2679 - cm_id_priv->id.service_mask = ~0ULL; 2676 + cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL); 2680 2677 2681 2678 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); 2682 2679 cm_process_work(cm_id_priv, work); ··· 3178 3175 } 3179 3176 EXPORT_SYMBOL(ib_cm_init_qp_attr); 3180 3177 3181 - static u64 cm_get_ca_guid(struct ib_device *device) 3178 + static __be64 cm_get_ca_guid(struct ib_device *device) 3182 3179 { 3183 3180 struct ib_device_attr *device_attr; 3184 - u64 guid; 3181 + __be64 guid; 3185 3182 int ret; 3186 3183 3187 3184 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
+96 -98
drivers/infiniband/core/cm_msgs.h
··· 34 34 #if !defined(CM_MSGS_H) 35 35 #define CM_MSGS_H 36 36 37 - #include <ib_mad.h> 37 + #include <rdma/ib_mad.h> 38 38 39 39 /* 40 40 * Parameters to routines below should be in network-byte order, and values ··· 43 43 44 44 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ 45 45 46 - enum cm_msg_attr_id { 47 - CM_REQ_ATTR_ID = __constant_htons(0x0010), 48 - CM_MRA_ATTR_ID = __constant_htons(0x0011), 49 - CM_REJ_ATTR_ID = __constant_htons(0x0012), 50 - CM_REP_ATTR_ID = __constant_htons(0x0013), 51 - CM_RTU_ATTR_ID = __constant_htons(0x0014), 52 - CM_DREQ_ATTR_ID = __constant_htons(0x0015), 53 - CM_DREP_ATTR_ID = __constant_htons(0x0016), 54 - CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017), 55 - CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018), 56 - CM_LAP_ATTR_ID = __constant_htons(0x0019), 57 - CM_APR_ATTR_ID = __constant_htons(0x001A) 58 - }; 46 + #define CM_REQ_ATTR_ID __constant_htons(0x0010) 47 + #define CM_MRA_ATTR_ID __constant_htons(0x0011) 48 + #define CM_REJ_ATTR_ID __constant_htons(0x0012) 49 + #define CM_REP_ATTR_ID __constant_htons(0x0013) 50 + #define CM_RTU_ATTR_ID __constant_htons(0x0014) 51 + #define CM_DREQ_ATTR_ID __constant_htons(0x0015) 52 + #define CM_DREP_ATTR_ID __constant_htons(0x0016) 53 + #define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017) 54 + #define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018) 55 + #define CM_LAP_ATTR_ID __constant_htons(0x0019) 56 + #define CM_APR_ATTR_ID __constant_htons(0x001A) 59 57 60 58 enum cm_msg_sequence { 61 59 CM_MSG_SEQUENCE_REQ, ··· 65 67 struct cm_req_msg { 66 68 struct ib_mad_hdr hdr; 67 69 68 - u32 local_comm_id; 69 - u32 rsvd4; 70 - u64 service_id; 71 - u64 local_ca_guid; 72 - u32 rsvd24; 73 - u32 local_qkey; 70 + __be32 local_comm_id; 71 + __be32 rsvd4; 72 + __be64 service_id; 73 + __be64 local_ca_guid; 74 + __be32 rsvd24; 75 + __be32 local_qkey; 74 76 /* local QPN:24, responder resources:8 */ 75 - u32 offset32; 77 + __be32 offset32; 76 78 /* local EECN:24, initiator depth:8 */ 77 - u32 offset36; 79 + __be32 offset36; 78 80 /* 79 81 * remote EECN:24, remote CM response timeout:5, 80 82 * transport service type:2, end-to-end flow control:1 81 83 */ 82 - u32 offset40; 84 + __be32 offset40; 83 85 /* starting PSN:24, local CM response timeout:5, retry count:3 */ 84 - u32 offset44; 85 - u16 pkey; 86 + __be32 offset44; 87 + __be16 pkey; 86 88 /* path MTU:4, RDC exists:1, RNR retry count:3. */ 87 89 u8 offset50; 88 90 /* max CM Retries:4, SRQ:1, rsvd:3 */ 89 91 u8 offset51; 90 92 91 - u16 primary_local_lid; 92 - u16 primary_remote_lid; 93 + __be16 primary_local_lid; 94 + __be16 primary_remote_lid; 93 95 union ib_gid primary_local_gid; 94 96 union ib_gid primary_remote_gid; 95 97 /* flow label:20, rsvd:6, packet rate:6 */ 96 - u32 primary_offset88; 98 + __be32 primary_offset88; 97 99 u8 primary_traffic_class; 98 100 u8 primary_hop_limit; 99 101 /* SL:4, subnet local:1, rsvd:3 */ ··· 101 103 /* local ACK timeout:5, rsvd:3 */ 102 104 u8 primary_offset95; 103 105 104 - u16 alt_local_lid; 105 - u16 alt_remote_lid; 106 + __be16 alt_local_lid; 107 + __be16 alt_remote_lid; 106 108 union ib_gid alt_local_gid; 107 109 union ib_gid alt_remote_gid; 108 110 /* flow label:20, rsvd:6, packet rate:6 */ 109 - u32 alt_offset132; 111 + __be32 alt_offset132; 110 112 u8 alt_traffic_class; 111 113 u8 alt_hop_limit; 112 114 /* SL:4, subnet local:1, rsvd:3 */ ··· 118 120 119 121 } __attribute__ ((packed)); 120 122 121 - static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 123 + static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg) 122 124 { 123 125 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8); 124 126 } 125 127 126 - static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn) 128 + static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) 127 129 { 128 130 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 129 131 (be32_to_cpu(req_msg->offset32) & ··· 206 208 0xFFFFFFFE)); 207 209 } 208 210 209 - static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 211 + static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg) 210 212 { 211 213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8); 212 214 } 213 215 214 216 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg, 215 - u32 starting_psn) 217 + __be32 starting_psn) 216 218 { 217 219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 218 220 (be32_to_cpu(req_msg->offset44) & 0x000000FF)); ··· 286 288 ((srq & 0x1) << 3)); 287 289 } 288 290 289 - static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 291 + static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg) 290 292 { 291 - return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12)); 293 + return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12); 292 294 } 293 295 294 296 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg, 295 - u32 flow_label) 297 + __be32 flow_label) 296 298 { 297 299 req_msg->primary_offset88 = cpu_to_be32( 298 300 (be32_to_cpu(req_msg->primary_offset88) & ··· 348 350 (local_ack_timeout << 3)); 349 351 } 350 352 351 - static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 353 + static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg) 352 354 { 353 - return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12)); 355 + return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12); 354 356 } 355 357 356 358 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg, 357 - u32 flow_label) 359 + __be32 flow_label) 358 360 { 359 361 req_msg->alt_offset132 = cpu_to_be32( 360 362 (be32_to_cpu(req_msg->alt_offset132) & ··· 420 422 struct cm_mra_msg { 421 423 struct ib_mad_hdr hdr; 422 424 423 - u32 local_comm_id; 424 - u32 remote_comm_id; 425 + __be32 local_comm_id; 426 + __be32 remote_comm_id; 425 427 /* message MRAed:2, rsvd:6 */ 426 428 u8 offset8; 427 429 /* service timeout:5, rsvd:3 */ ··· 456 458 struct cm_rej_msg { 457 459 struct ib_mad_hdr hdr; 458 460 459 - u32 local_comm_id; 460 - u32 remote_comm_id; 461 + __be32 local_comm_id; 462 + __be32 remote_comm_id; 461 463 /* message REJected:2, rsvd:6 */ 462 464 u8 offset8; 463 465 /* reject info length:7, rsvd:1. */ 464 466 u8 offset9; 465 - u16 reason; 467 + __be16 reason; 466 468 u8 ari[IB_CM_REJ_ARI_LENGTH]; 467 469 468 470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE]; ··· 493 495 struct cm_rep_msg { 494 496 struct ib_mad_hdr hdr; 495 497 496 - u32 local_comm_id; 497 - u32 remote_comm_id; 498 - u32 local_qkey; 498 + __be32 local_comm_id; 499 + __be32 remote_comm_id; 500 + __be32 local_qkey; 499 501 /* local QPN:24, rsvd:8 */ 500 - u32 offset12; 502 + __be32 offset12; 501 503 /* local EECN:24, rsvd:8 */ 502 - u32 offset16; 504 + __be32 offset16; 503 505 /* starting PSN:24 rsvd:8 */ 504 - u32 offset20; 506 + __be32 offset20; 505 507 u8 resp_resources; 506 508 u8 initiator_depth; 507 509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */ 508 510 u8 offset26; 509 511 /* RNR retry count:3, SRQ:1, rsvd:5 */ 510 512 u8 offset27; 511 - u64 local_ca_guid; 513 + __be64 local_ca_guid; 512 514 513 515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE]; 514 516 515 517 } __attribute__ ((packed)); 516 518 517 - static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 519 + static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg) 518 520 { 519 521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8); 520 522 } 521 523 522 - static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn) 524 + static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) 523 525 { 524 526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 525 527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); 526 528 } 527 529 528 - static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 530 + static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) 529 531 { 530 532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); 531 533 } 532 534 533 535 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg, 534 - u32 starting_psn) 536 + __be32 starting_psn) 535 537 { 536 538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) | 537 539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF)); ··· 598 600 struct cm_rtu_msg { 599 601 struct ib_mad_hdr hdr; 600 602 601 - u32 local_comm_id; 602 - u32 remote_comm_id; 603 + __be32 local_comm_id; 604 + __be32 remote_comm_id; 603 605 604 606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE]; 605 607 ··· 608 610 struct cm_dreq_msg { 609 611 struct ib_mad_hdr hdr; 610 612 611 - u32 local_comm_id; 612 - u32 remote_comm_id; 613 + __be32 local_comm_id; 614 + __be32 remote_comm_id; 613 615 /* remote QPN/EECN:24, rsvd:8 */ 614 - u32 offset8; 616 + __be32 offset8; 615 617 616 618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE]; 617 619 618 620 } __attribute__ ((packed)); 619 621 620 - static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 622 + static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg) 621 623 { 622 624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8); 623 625 } 624 626 625 - static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn) 627 + static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) 626 628 { 627 629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 628 630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF)); ··· 631 633 struct cm_drep_msg { 632 634 struct ib_mad_hdr hdr; 633 635 634 - u32 local_comm_id; 635 - u32 remote_comm_id; 636 + __be32 local_comm_id; 637 + __be32 remote_comm_id; 636 638 637 639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE]; 638 640 ··· 641 643 struct cm_lap_msg { 642 644 struct ib_mad_hdr hdr; 643 645 644 - u32 local_comm_id; 645 - u32 remote_comm_id; 646 + __be32 local_comm_id; 647 + __be32 remote_comm_id; 646 648 647 - u32 rsvd8; 649 + __be32 rsvd8; 648 650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */ 649 - u32 offset12; 650 - u32 rsvd16; 651 + __be32 offset12; 652 + __be32 rsvd16; 651 653 652 - u16 alt_local_lid; 653 - u16 alt_remote_lid; 654 + __be16 alt_local_lid; 655 + __be16 alt_remote_lid; 654 656 union ib_gid alt_local_gid; 655 657 union ib_gid alt_remote_gid; 656 658 /* flow label:20, rsvd:4, traffic class:8 */ 657 - u32 offset56; 659 + __be32 offset56; 658 660 u8 alt_hop_limit; 659 661 /* rsvd:2, packet rate:6 */ 660 - uint8_t offset61; 662 + u8 offset61; 661 663 /* SL:4, subnet local:1, rsvd:3 */ 662 - uint8_t offset62; 664 + u8 offset62; 663 665 /* local ACK timeout:5, rsvd:3 */ 664 - uint8_t offset63; 666 + u8 offset63; 665 667 666 668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE]; 667 669 } __attribute__ ((packed)); 668 670 669 - static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 671 + static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg) 670 672 { 671 673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8); 672 674 } 673 675 674 - static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn) 676 + static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) 675 677 { 676 678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 677 679 (be32_to_cpu(lap_msg->offset12) & ··· 691 693 0xFFFFFF07)); 692 694 } 693 695 694 - static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 696 + static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg) 695 697 { 696 - return be32_to_cpu(lap_msg->offset56) >> 12; 698 + return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12); 697 699 } 698 700 699 701 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg, 700 - u32 flow_label) 702 + __be32 flow_label) 701 703 { 702 - lap_msg->offset56 = cpu_to_be32((flow_label << 12) | 703 - (be32_to_cpu(lap_msg->offset56) & 704 - 0x00000FFF)); 704 + lap_msg->offset56 = cpu_to_be32( 705 + (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) | 706 + (be32_to_cpu(flow_label) << 12)); 705 707 } 706 708 707 709 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg) ··· 764 766 struct cm_apr_msg { 765 767 struct ib_mad_hdr hdr; 766 768 767 - u32 local_comm_id; 768 - u32 remote_comm_id; 769 + __be32 local_comm_id; 770 + __be32 remote_comm_id; 769 771 770 772 u8 info_length; 771 773 u8 ap_status; ··· 777 779 struct cm_sidr_req_msg { 778 780 struct ib_mad_hdr hdr; 779 781 780 - u32 request_id; 781 - u16 pkey; 782 - u16 rsvd; 783 - u64 service_id; 782 + __be32 request_id; 783 + __be16 pkey; 784 + __be16 rsvd; 785 + __be64 service_id; 784 786 785 787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; 786 788 } __attribute__ ((packed)); ··· 788 790 struct cm_sidr_rep_msg { 789 791 struct ib_mad_hdr hdr; 790 792 791 - u32 request_id; 793 + __be32 request_id; 792 794 u8 status; 793 795 u8 info_length; 794 - u16 rsvd; 796 + __be16 rsvd; 795 797 /* QPN:24, rsvd:8 */ 796 - u32 offset8; 797 - u64 service_id; 798 - u32 qkey; 798 + __be32 offset8; 799 + __be64 service_id; 800 + __be32 qkey; 799 801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH]; 800 802 801 803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE]; 802 804 } __attribute__ ((packed)); 803 805 804 - static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 806 + static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg) 805 807 { 806 808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8); 807 809 } 808 810 809 811 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg, 810 - u32 qpn) 812 + __be32 qpn) 811 813 { 812 814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | 813 815 (be32_to_cpu(sidr_rep_msg->offset8) &
+1 -1
drivers/infiniband/core/core_priv.h
··· 38 38 #include <linux/list.h> 39 39 #include <linux/spinlock.h> 40 40 41 - #include <ib_verbs.h> 41 + #include <rdma/ib_verbs.h> 42 42 43 43 int ib_device_register_sysfs(struct ib_device *device); 44 44 void ib_device_unregister_sysfs(struct ib_device *device);
+1
drivers/infiniband/core/device.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU
+7 -1
drivers/infiniband/core/fmr_pool.c
··· 39 39 #include <linux/jhash.h> 40 40 #include <linux/kthread.h> 41 41 42 - #include <ib_fmr_pool.h> 42 + #include <rdma/ib_fmr_pool.h> 43 43 44 44 #include "core_priv.h" 45 45 ··· 334 334 { 335 335 struct ib_pool_fmr *fmr; 336 336 struct ib_pool_fmr *tmp; 337 + LIST_HEAD(fmr_list); 337 338 int i; 338 339 339 340 kthread_stop(pool->thread); ··· 342 341 343 342 i = 0; 344 343 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 344 + if (fmr->remap_count) { 345 + INIT_LIST_HEAD(&fmr_list); 346 + list_add_tail(&fmr->fmr->list, &fmr_list); 347 + ib_unmap_fmr(&fmr_list); 348 + } 345 349 ib_dealloc_fmr(fmr->fmr); 346 350 list_del(&fmr->list); 347 351 kfree(fmr);
+8 -7
drivers/infiniband/core/mad.c
··· 693 693 goto out; 694 694 } 695 695 696 - build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index, 696 + build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), 697 + send_wr->wr.ud.pkey_index, 697 698 send_wr->wr.ud.port_num, &mad_wc); 698 699 699 700 /* No GRH for DR SMP */ ··· 1555 1554 } 1556 1555 1557 1556 struct ib_mad_send_wr_private* 1558 - ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) 1557 + ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) 1559 1558 { 1560 1559 struct ib_mad_send_wr_private *mad_send_wr; 1561 1560 ··· 1598 1597 struct ib_mad_send_wr_private *mad_send_wr; 1599 1598 struct ib_mad_send_wc mad_send_wc; 1600 1599 unsigned long flags; 1601 - u64 tid; 1600 + __be64 tid; 1602 1601 1603 1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1604 1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); ··· 2166 2165 * Defined behavior is to complete response 2167 2166 * before request 2168 2167 */ 2169 - build_smp_wc(local->wr_id, IB_LID_PERMISSIVE, 2168 + build_smp_wc(local->wr_id, 2169 + be16_to_cpu(IB_LID_PERMISSIVE), 2170 2170 0 /* pkey index */, 2171 2171 recv_mad_agent->agent.port_num, &wc); 2172 2172 ··· 2296 2294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2297 2295 } 2298 2296 2299 - static void ib_mad_thread_completion_handler(struct ib_cq *cq) 2297 + static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) 2300 2298 { 2301 2299 struct ib_mad_port_private *port_priv = cq->cq_context; 2302 2300 ··· 2576 2574 2577 2575 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2578 2576 port_priv->cq = ib_create_cq(port_priv->device, 2579 - (ib_comp_handler) 2580 - ib_mad_thread_completion_handler, 2577 + ib_mad_thread_completion_handler, 2581 2578 NULL, port_priv, cq_size); 2582 2579 if (IS_ERR(port_priv->cq)) { 2583 2580 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
+5 -5
drivers/infiniband/core/mad_priv.h
··· 40 40 #include <linux/pci.h> 41 41 #include <linux/kthread.h> 42 42 #include <linux/workqueue.h> 43 - #include <ib_mad.h> 44 - #include <ib_smi.h> 43 + #include <rdma/ib_mad.h> 44 + #include <rdma/ib_smi.h> 45 45 46 46 47 47 #define PFX "ib_mad: " ··· 121 121 struct ib_send_wr send_wr; 122 122 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 123 123 u64 wr_id; /* client WR ID */ 124 - u64 tid; 124 + __be64 tid; 125 125 unsigned long timeout; 126 126 int retries; 127 127 int retry; ··· 144 144 struct ib_send_wr send_wr; 145 145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 146 146 u64 wr_id; /* client WR ID */ 147 - u64 tid; 147 + __be64 tid; 148 148 }; 149 149 150 150 struct ib_mad_mgmt_method_table { ··· 210 210 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 211 211 212 212 struct ib_mad_send_wr_private * 213 - ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid); 213 + ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); 214 214 215 215 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 216 216 struct ib_mad_send_wc *mad_send_wc);
+245 -66
drivers/infiniband/core/mad_rmpp.c
··· 61 61 int seg_num; 62 62 int newwin; 63 63 64 - u64 tid; 64 + __be64 tid; 65 65 u32 src_qp; 66 66 u16 slid; 67 67 u8 mgmt_class; ··· 100 100 } 101 101 } 102 102 103 + static int data_offset(u8 mgmt_class) 104 + { 105 + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 106 + return offsetof(struct ib_sa_mad, data); 107 + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 108 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 109 + return offsetof(struct ib_vendor_mad, data); 110 + else 111 + return offsetof(struct ib_rmpp_mad, data); 112 + } 113 + 114 + static void format_ack(struct ib_rmpp_mad *ack, 115 + struct ib_rmpp_mad *data, 116 + struct mad_rmpp_recv *rmpp_recv) 117 + { 118 + unsigned long flags; 119 + 120 + memcpy(&ack->mad_hdr, &data->mad_hdr, 121 + data_offset(data->mad_hdr.mgmt_class)); 122 + 123 + ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 124 + ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; 125 + ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 126 + 127 + spin_lock_irqsave(&rmpp_recv->lock, flags); 128 + rmpp_recv->last_ack = rmpp_recv->seg_num; 129 + ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); 130 + ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); 131 + spin_unlock_irqrestore(&rmpp_recv->lock, flags); 132 + } 133 + 134 + static void ack_recv(struct mad_rmpp_recv *rmpp_recv, 135 + struct ib_mad_recv_wc *recv_wc) 136 + { 137 + struct ib_mad_send_buf *msg; 138 + struct ib_send_wr *bad_send_wr; 139 + int hdr_len, ret; 140 + 141 + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 142 + msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 143 + recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 144 + hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 145 + GFP_KERNEL); 146 + if (!msg) 147 + return; 148 + 149 + format_ack((struct ib_rmpp_mad *) msg->mad, 150 + (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 151 + ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 152 + &bad_send_wr); 153 + if (ret) 154 + ib_free_send_mad(msg); 155 + } 156 + 157 + static int alloc_response_msg(struct ib_mad_agent *agent, 158 + struct ib_mad_recv_wc *recv_wc, 159 + struct ib_mad_send_buf **msg) 160 + { 161 + struct ib_mad_send_buf *m; 162 + struct ib_ah *ah; 163 + int hdr_len; 164 + 165 + ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, 166 + recv_wc->recv_buf.grh, agent->port_num); 167 + if (IS_ERR(ah)) 168 + return PTR_ERR(ah); 169 + 170 + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 171 + m = ib_create_send_mad(agent, recv_wc->wc->src_qp, 172 + recv_wc->wc->pkey_index, ah, 1, hdr_len, 173 + sizeof(struct ib_rmpp_mad) - hdr_len, 174 + GFP_KERNEL); 175 + if (IS_ERR(m)) { 176 + ib_destroy_ah(ah); 177 + return PTR_ERR(m); 178 + } 179 + *msg = m; 180 + return 0; 181 + } 182 + 183 + static void free_msg(struct ib_mad_send_buf *msg) 184 + { 185 + ib_destroy_ah(msg->send_wr.wr.ud.ah); 186 + ib_free_send_mad(msg); 187 + } 188 + 189 + static void nack_recv(struct ib_mad_agent_private *agent, 190 + struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) 191 + { 192 + struct ib_mad_send_buf *msg; 193 + struct ib_rmpp_mad *rmpp_mad; 194 + struct ib_send_wr *bad_send_wr; 195 + int ret; 196 + 197 + ret = alloc_response_msg(&agent->agent, recv_wc, &msg); 198 + if (ret) 199 + return; 200 + 201 + rmpp_mad = (struct ib_rmpp_mad *) msg->mad; 202 + memcpy(rmpp_mad, recv_wc->recv_buf.mad, 203 + data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); 204 + 205 + rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 206 + rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; 207 + rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; 208 + ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 209 + rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; 210 + rmpp_mad->rmpp_hdr.seg_num = 0; 211 + rmpp_mad->rmpp_hdr.paylen_newwin = 0; 212 + 213 + ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); 214 + if (ret) 215 + free_msg(msg); 216 + } 217 + 103 218 static void recv_timeout_handler(void *data) 104 219 { 105 220 struct mad_rmpp_recv *rmpp_recv = data; ··· 230 115 list_del(&rmpp_recv->list); 231 116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 232 117 233 - /* TODO: send abort. */ 234 118 rmpp_wc = rmpp_recv->rmpp_wc; 119 + nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); 235 120 destroy_rmpp_recv(rmpp_recv); 236 121 ib_free_recv_mad(rmpp_wc); 237 122 } ··· 343 228 list_add_tail(&rmpp_recv->list, &agent->rmpp_list); 344 229 345 230 return cur_rmpp_recv; 346 - } 347 - 348 - static int data_offset(u8 mgmt_class) 349 - { 350 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 351 - return offsetof(struct ib_sa_mad, data); 352 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 353 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 354 - return offsetof(struct ib_vendor_mad, data); 355 - else 356 - return offsetof(struct ib_rmpp_mad, data); 357 - } 358 - 359 - static void format_ack(struct ib_rmpp_mad *ack, 360 - struct ib_rmpp_mad *data, 361 - struct mad_rmpp_recv *rmpp_recv) 362 - { 363 - unsigned long flags; 364 - 365 - memcpy(&ack->mad_hdr, &data->mad_hdr, 366 - data_offset(data->mad_hdr.mgmt_class)); 367 - 368 - ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 369 - ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; 370 - ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 371 - 372 - spin_lock_irqsave(&rmpp_recv->lock, flags); 373 - rmpp_recv->last_ack = rmpp_recv->seg_num; 374 - ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); 375 - ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); 376 - spin_unlock_irqrestore(&rmpp_recv->lock, flags); 377 - } 378 - 379 - static void ack_recv(struct mad_rmpp_recv *rmpp_recv, 380 - struct ib_mad_recv_wc *recv_wc) 381 - { 382 - struct ib_mad_send_buf *msg; 383 - struct ib_send_wr *bad_send_wr; 384 - int hdr_len, ret; 385 - 386 - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 387 - msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 388 - recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 389 - hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 390 - GFP_KERNEL); 391 - if (!msg) 392 - return; 393 - 394 - format_ack((struct ib_rmpp_mad *) msg->mad, 395 - (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 396 - ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 397 - &bad_send_wr); 398 - if (ret) 399 - ib_free_send_mad(msg); 400 231 } 401 232 402 233 static inline int get_last_flag(struct ib_mad_recv_buf *seg) ··· 620 559 return ib_send_mad(mad_send_wr); 621 560 } 622 561 562 + static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 563 + u8 rmpp_status) 564 + { 565 + struct ib_mad_send_wr_private *mad_send_wr; 566 + struct ib_mad_send_wc wc; 567 + unsigned long flags; 568 + 569 + spin_lock_irqsave(&agent->lock, flags); 570 + mad_send_wr = ib_find_send_mad(agent, tid); 571 + if (!mad_send_wr) 572 + goto out; /* Unmatched send */ 573 + 574 + if ((mad_send_wr->last_ack == mad_send_wr->total_seg) || 575 + (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 576 + goto out; /* Send is already done */ 577 + 578 + ib_mark_mad_done(mad_send_wr); 579 + spin_unlock_irqrestore(&agent->lock, flags); 580 + 581 + wc.status = IB_WC_REM_ABORT_ERR; 582 + wc.vendor_err = rmpp_status; 583 + wc.wr_id = mad_send_wr->wr_id; 584 + ib_mad_complete_send_wr(mad_send_wr, &wc); 585 + return; 586 + out: 587 + spin_unlock_irqrestore(&agent->lock, flags); 588 + } 589 + 623 590 static void process_rmpp_ack(struct ib_mad_agent_private *agent, 624 591 struct ib_mad_recv_wc *mad_recv_wc) 625 592 { ··· 657 568 int seg_num, newwin, ret; 658 569 659 570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 660 - if (rmpp_mad->rmpp_hdr.rmpp_status) 571 + if (rmpp_mad->rmpp_hdr.rmpp_status) { 572 + abort_send(agent, rmpp_mad->mad_hdr.tid, 573 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 574 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 661 575 return; 576 + } 662 577 663 578 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 664 579 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 580 + if (newwin < seg_num) { 581 + abort_send(agent, rmpp_mad->mad_hdr.tid, 582 + IB_MGMT_RMPP_STATUS_W2S); 583 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 584 + return; 585 + } 665 586 666 587 spin_lock_irqsave(&agent->lock, flags); 667 588 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); ··· 682 583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 683 584 goto out; /* Send is already done */ 684 585 685 - if (seg_num > mad_send_wr->total_seg) 686 - goto out; /* Bad ACK */ 586 + if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) { 587 + spin_unlock_irqrestore(&agent->lock, flags); 588 + abort_send(agent, rmpp_mad->mad_hdr.tid, 589 + IB_MGMT_RMPP_STATUS_S2B); 590 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 591 + return; 592 + } 687 593 688 594 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) 689 595 goto out; /* Old ACK */ ··· 732 628 spin_unlock_irqrestore(&agent->lock, flags); 733 629 } 734 630 631 + static struct ib_mad_recv_wc * 632 + process_rmpp_data(struct ib_mad_agent_private *agent, 633 + struct ib_mad_recv_wc *mad_recv_wc) 634 + { 635 + struct ib_rmpp_hdr *rmpp_hdr; 636 + u8 rmpp_status; 637 + 638 + rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; 639 + 640 + if (rmpp_hdr->rmpp_status) { 641 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; 642 + goto bad; 643 + } 644 + 645 + if (rmpp_hdr->seg_num == __constant_htonl(1)) { 646 + if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 647 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 648 + goto bad; 649 + } 650 + return start_rmpp(agent, mad_recv_wc); 651 + } else { 652 + if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { 653 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 654 + goto bad; 655 + } 656 + return continue_rmpp(agent, mad_recv_wc); 657 + } 658 + bad: 659 + nack_recv(agent, mad_recv_wc, rmpp_status); 660 + ib_free_recv_mad(mad_recv_wc); 661 + return NULL; 662 + } 663 + 664 + static void process_rmpp_stop(struct ib_mad_agent_private *agent, 665 + struct ib_mad_recv_wc *mad_recv_wc) 666 + { 667 + struct ib_rmpp_mad *rmpp_mad; 668 + 669 + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 670 + 671 + if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 672 + abort_send(agent, rmpp_mad->mad_hdr.tid, 673 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 674 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 675 + } else 676 + abort_send(agent, rmpp_mad->mad_hdr.tid, 677 + rmpp_mad->rmpp_hdr.rmpp_status); 678 + } 679 + 680 + static void process_rmpp_abort(struct ib_mad_agent_private *agent, 681 + struct ib_mad_recv_wc *mad_recv_wc) 682 + { 683 + struct ib_rmpp_mad *rmpp_mad; 684 + 685 + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 686 + 687 + if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 688 + rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 689 + abort_send(agent, rmpp_mad->mad_hdr.tid, 690 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 691 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 692 + } else 693 + abort_send(agent, rmpp_mad->mad_hdr.tid, 694 + rmpp_mad->rmpp_hdr.rmpp_status); 695 + } 696 + 735 697 struct ib_mad_recv_wc * 736 698 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, 737 699 struct ib_mad_recv_wc *mad_recv_wc) ··· 808 638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) 809 639 return mad_recv_wc; 810 640 811 - if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) 641 + if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 642 + abort_send(agent, rmpp_mad->mad_hdr.tid, 643 + IB_MGMT_RMPP_STATUS_UNV); 644 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 812 645 goto out; 646 + } 813 647 814 648 switch (rmpp_mad->rmpp_hdr.rmpp_type) { 815 649 case IB_MGMT_RMPP_TYPE_DATA: 816 - if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) 817 - return start_rmpp(agent, mad_recv_wc); 818 - else 819 - return continue_rmpp(agent, mad_recv_wc); 650 + return process_rmpp_data(agent, mad_recv_wc); 820 651 case IB_MGMT_RMPP_TYPE_ACK: 821 652 process_rmpp_ack(agent, mad_recv_wc); 822 653 break; 823 654 case IB_MGMT_RMPP_TYPE_STOP: 655 + process_rmpp_stop(agent, mad_recv_wc); 656 + break; 824 657 case IB_MGMT_RMPP_TYPE_ABORT: 825 - /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ 658 + process_rmpp_abort(agent, mad_recv_wc); 826 659 break; 827 660 default: 661 + abort_send(agent, rmpp_mad->mad_hdr.tid, 662 + IB_MGMT_RMPP_STATUS_BADT); 663 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 828 664 break; 829 665 } 830 666 out: ··· 890 714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 891 715 msg = (struct ib_mad_send_buf *) (unsigned long) 892 716 mad_send_wc->wr_id; 893 - ib_free_send_mad(msg); 717 + if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) 718 + ib_free_send_mad(msg); 719 + else 720 + free_msg(msg); 894 721 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 895 722 } 896 723
+2 -1
drivers/infiniband/core/packer.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 33 32 * $Id: packer.c 1349 2004-12-16 21:09:43Z roland $ 34 33 */ 35 34 36 - #include <ib_pack.h> 35 + #include <rdma/ib_pack.h> 37 36 38 37 static u64 value_read(int offset, int size, void *structure) 39 38 {
+3 -3
drivers/infiniband/core/sa_query.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 + * Copyright (c) 2005 Voltaire, Inc.  All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two 6 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 44 44 #include <linux/kref.h> 45 45 #include <linux/idr.h> 46 46 47 - #include <ib_pack.h> 48 - #include <ib_sa.h> 47 + #include <rdma/ib_pack.h> 48 + #include <rdma/ib_sa.h> 49 49 50 50 MODULE_AUTHOR("Roland Dreier"); 51 51 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
+7 -6
drivers/infiniband/core/smi.c
··· 1 1 /* 2 - * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 - * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 - * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 - * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 2 + * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. 3 + * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 4 + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 5 + * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 6 + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 7 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 8 * 8 9 * This software is available to you under a choice of one of two 9 10 * licenses. You may choose to be licensed under the terms of the GNU ··· 37 36 * $Id: smi.c 1389 2004-12-27 22:56:47Z roland $ 38 37 */ 39 38 40 - #include <ib_smi.h> 39 + #include <rdma/ib_smi.h> 41 40 #include "smi.h" 42 41 43 42 /*
+21 -19
drivers/infiniband/core/sysfs.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 36 34 37 35 #include "core_priv.h" 38 36 39 - #include <ib_mad.h> 37 + #include <rdma/ib_mad.h> 40 38 41 39 struct ib_port { 42 40 struct kobject kobj; ··· 255 253 return ret; 256 254 257 255 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 258 - be16_to_cpu(((u16 *) gid.raw)[0]), 259 - be16_to_cpu(((u16 *) gid.raw)[1]), 260 - be16_to_cpu(((u16 *) gid.raw)[2]), 261 - be16_to_cpu(((u16 *) gid.raw)[3]), 262 - be16_to_cpu(((u16 *) gid.raw)[4]), 263 - be16_to_cpu(((u16 *) gid.raw)[5]), 264 - be16_to_cpu(((u16 *) gid.raw)[6]), 265 - be16_to_cpu(((u16 *) gid.raw)[7])); 256 + be16_to_cpu(((__be16 *) gid.raw)[0]), 257 + be16_to_cpu(((__be16 *) gid.raw)[1]), 258 + be16_to_cpu(((__be16 *) gid.raw)[2]), 259 + be16_to_cpu(((__be16 *) gid.raw)[3]), 260 + be16_to_cpu(((__be16 *) gid.raw)[4]), 261 + be16_to_cpu(((__be16 *) gid.raw)[5]), 262 + be16_to_cpu(((__be16 *) gid.raw)[6]), 263 + be16_to_cpu(((__be16 *) gid.raw)[7])); 266 264 } 267 265 268 266 static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, ··· 334 332 break; 335 333 case 16: 336 334 ret = sprintf(buf, "%u\n", 337 - be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8))); 335 + be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8))); 338 336 break; 339 337 case 32: 340 338 ret = sprintf(buf, "%u\n", 341 - be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8))); 339 + be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8))); 342 340 break; 343 341 default: 344 342 ret = 0; ··· 600 598 return ret; 601 599 602 600 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 603 - be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]), 604 - be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]), 605 - be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]), 606 - be16_to_cpu(((u16 *) &attr.sys_image_guid)[3])); 601 + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]), 602 + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]), 603 + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]), 604 + be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3])); 607 605 } 608 606 609 607 static ssize_t show_node_guid(struct class_device *cdev, char *buf) ··· 617 615 return ret; 618 616 619 617 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 620 - be16_to_cpu(((u16 *) &attr.node_guid)[0]), 621 - be16_to_cpu(((u16 *) &attr.node_guid)[1]), 622 - be16_to_cpu(((u16 *) &attr.node_guid)[2]), 623 - be16_to_cpu(((u16 *) &attr.node_guid)[3])); 618 + be16_to_cpu(((__be16 *) &attr.node_guid)[0]), 619 + be16_to_cpu(((__be16 *) &attr.node_guid)[1]), 620 + be16_to_cpu(((__be16 *) &attr.node_guid)[2]), 621 + be16_to_cpu(((__be16 *) &attr.node_guid)[3])); 624 622 } 625 623 626 624 static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
+157 -307
drivers/infiniband/core/ucm.c
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 74 73 static struct idr ctx_id_table; 75 74 static int ctx_id_rover = 0; 76 75 77 - static struct ib_ucm_context *ib_ucm_ctx_get(int id) 76 + static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) 78 77 { 79 78 struct ib_ucm_context *ctx; 80 79 81 80 down(&ctx_id_mutex); 82 81 ctx = idr_find(&ctx_id_table, id); 83 - if (ctx) 84 - ctx->ref++; 82 + if (!ctx) 83 + ctx = ERR_PTR(-ENOENT); 84 + else if (ctx->file != file) 85 + ctx = ERR_PTR(-EINVAL); 86 + else 87 + atomic_inc(&ctx->ref); 85 88 up(&ctx_id_mutex); 86 89 87 90 return ctx; ··· 93 88 94 89 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) 95 90 { 91 + if (atomic_dec_and_test(&ctx->ref)) 92 + wake_up(&ctx->wait); 93 + } 94 + 95 + static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) 96 + { 97 + struct ib_ucm_context *ctx; 96 98 struct ib_ucm_event *uevent; 97 99 98 100 down(&ctx_id_mutex); 99 - 100 - ctx->ref--; 101 - if (!ctx->ref) 101 + ctx = idr_find(&ctx_id_table, id); 102 + if (!ctx) 103 + ctx = ERR_PTR(-ENOENT); 104 + else if (ctx->file != file) 105 + ctx = ERR_PTR(-EINVAL); 106 + else 102 107 idr_remove(&ctx_id_table, ctx->id); 103 - 104 108 up(&ctx_id_mutex); 105 109 106 - if (ctx->ref) 107 - return; 110 + if (IS_ERR(ctx)) 111 + return PTR_ERR(ctx); 108 112 109 - down(&ctx->file->mutex); 113 + atomic_dec(&ctx->ref); 114 + wait_event(ctx->wait, !atomic_read(&ctx->ref)); 110 115 116 + /* No new events will be generated after destroying the cm_id. */ 117 + if (!IS_ERR(ctx->cm_id)) 118 + ib_destroy_cm_id(ctx->cm_id); 119 + 120 + /* Cleanup events not yet reported to the user. */ 121 + down(&file->mutex); 111 122 list_del(&ctx->file_list); 112 123 while (!list_empty(&ctx->events)) { 113 124 ··· 138 117 139 118 kfree(uevent); 140 119 } 120 + up(&file->mutex); 141 121 142 - up(&ctx->file->mutex); 143 - 144 - ucm_dbg("Destroyed CM ID <%d>\n", ctx->id); 145 - 146 - ib_destroy_cm_id(ctx->cm_id); 147 122 kfree(ctx); 123 + return 0; 148 124 } 149 125 150 126 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ··· 153 135 if (!ctx) 154 136 return NULL; 155 137 156 - ctx->ref = 1; /* user reference */ 138 + atomic_set(&ctx->ref, 1); 139 + init_waitqueue_head(&ctx->wait); 157 140 ctx->file = file; 158 141 159 142 INIT_LIST_HEAD(&ctx->events); 160 - init_MUTEX(&ctx->mutex); 161 143 162 144 list_add_tail(&ctx->file_list, &file->ctxs); 163 145 ··· 195 177 if (!kpath || !upath) 196 178 return; 197 179 198 - memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid)); 199 - memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid)); 180 + memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid); 181 + memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid); 200 182 201 183 upath->dlid = kpath->dlid; 202 184 upath->slid = kpath->slid; ··· 219 201 kpath->packet_life_time_selector; 220 202 } 221 203 222 - static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, 204 + static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, 205 + struct ib_ucm_req_event_resp *ureq, 223 206 struct ib_cm_req_event_param *kreq) 224 207 { 225 - ureq->listen_id = (long)kreq->listen_id->context; 208 + ureq->listen_id = ctx->id; 226 209 227 210 ureq->remote_ca_guid = kreq->remote_ca_guid; 228 211 ureq->remote_qkey = kreq->remote_qkey; ··· 259 240 urep->srq = krep->srq; 260 241 } 261 242 262 - static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej, 263 - struct ib_cm_rej_event_param *krej) 264 - { 265 - urej->reason = krej->reason; 266 - } 267 - 268 - static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra, 269 - struct ib_cm_mra_event_param *kmra) 270 - { 271 - umra->timeout = kmra->service_timeout; 272 - } 273 - 274 - static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap, 275 - struct ib_cm_lap_event_param *klap) 276 - { 277 - ib_ucm_event_path_get(&ulap->path, klap->alternate_path); 278 - } 279 - 280 - static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr, 281 - struct ib_cm_apr_event_param *kapr) 282 - { 283 - uapr->status = kapr->ap_status; 284 - } 285 - 286 - static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq, 243 + static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, 244 + struct ib_ucm_sidr_req_event_resp *ureq, 287 245 struct ib_cm_sidr_req_event_param *kreq) 288 246 { 289 - ureq->listen_id = (long)kreq->listen_id->context; 247 + ureq->listen_id = ctx->id; 290 248 ureq->pkey = kreq->pkey; 291 249 } 292 250 ··· 275 279 urep->qpn = krep->qpn; 276 280 }; 277 281 278 - static int ib_ucm_event_process(struct ib_cm_event *evt, 282 + static int ib_ucm_event_process(struct ib_ucm_context *ctx, 283 + struct ib_cm_event *evt, 279 284 struct ib_ucm_event *uvt) 280 285 { 281 286 void *info = NULL; 282 - int result; 283 287 284 288 switch (evt->event) { 285 289 case IB_CM_REQ_RECEIVED: 286 - ib_ucm_event_req_get(&uvt->resp.u.req_resp, 290 + ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, 287 291 &evt->param.req_rcvd); 288 292 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; 289 - uvt->resp.present |= (evt->param.req_rcvd.primary_path ? 290 - IB_UCM_PRES_PRIMARY : 0); 293 + uvt->resp.present = IB_UCM_PRES_PRIMARY; 291 294 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ? 292 295 IB_UCM_PRES_ALTERNATE : 0); 293 296 break; ··· 294 299 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp, 295 300 &evt->param.rep_rcvd); 296 301 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 297 - 298 302 break; 299 303 case IB_CM_RTU_RECEIVED: 300 304 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE; 301 305 uvt->resp.u.send_status = evt->param.send_status; 302 - 303 306 break; 304 307 case IB_CM_DREQ_RECEIVED: 305 308 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE; 306 309 uvt->resp.u.send_status = evt->param.send_status; 307 - 308 310 break; 309 311 case IB_CM_DREP_RECEIVED: 310 312 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE; 311 313 uvt->resp.u.send_status = evt->param.send_status; 312 - 313 314 break; 314 315 case IB_CM_MRA_RECEIVED: 315 - ib_ucm_event_mra_get(&uvt->resp.u.mra_resp, 316 - &evt->param.mra_rcvd); 316 + uvt->resp.u.mra_resp.timeout = 317 + evt->param.mra_rcvd.service_timeout; 317 318 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE; 318 - 319 319 break; 320 320 case IB_CM_REJ_RECEIVED: 321 - ib_ucm_event_rej_get(&uvt->resp.u.rej_resp, 322 - &evt->param.rej_rcvd); 321 + uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason; 323 322 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 324 323 uvt->info_len = evt->param.rej_rcvd.ari_length; 325 324 info = evt->param.rej_rcvd.ari; 326 - 327 325 break; 328 326 case IB_CM_LAP_RECEIVED: 329 - ib_ucm_event_lap_get(&uvt->resp.u.lap_resp, 330 - &evt->param.lap_rcvd); 327 + ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path, 328 + evt->param.lap_rcvd.alternate_path); 331 329 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE; 332 - uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ? 333 - IB_UCM_PRES_ALTERNATE : 0); 330 + uvt->resp.present = IB_UCM_PRES_ALTERNATE; 334 331 break; 335 332 case IB_CM_APR_RECEIVED: 336 - ib_ucm_event_apr_get(&uvt->resp.u.apr_resp, 337 - &evt->param.apr_rcvd); 333 + uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status; 338 334 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE; 339 335 uvt->info_len = evt->param.apr_rcvd.info_len; 340 336 info = evt->param.apr_rcvd.apr_info; 341 - 342 337 break; 343 338 case IB_CM_SIDR_REQ_RECEIVED: 344 - ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp, 339 + ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, 345 340 &evt->param.sidr_req_rcvd); 346 341 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 347 - 348 342 break; 349 343 case IB_CM_SIDR_REP_RECEIVED: 350 344 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp, ··· 341 357 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 342 358 uvt->info_len = evt->param.sidr_rep_rcvd.info_len; 343 359 info = evt->param.sidr_rep_rcvd.info; 344 - 345 360 break; 346 361 default: 347 362 uvt->resp.u.send_status = evt->param.send_status; 348 - 349 363 break; 350 364 } 351 365 352 - if (uvt->data_len && evt->private_data) { 353 - 366 + if (uvt->data_len) { 354 367 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL); 355 - if (!uvt->data) { 356 - result = -ENOMEM; 357 - goto error; 358 - } 368 + if (!uvt->data) 369 + goto err1; 359 370 360 371 memcpy(uvt->data, evt->private_data, uvt->data_len); 361 372 uvt->resp.present |= IB_UCM_PRES_DATA; 362 373 } 363 374 364 - if (uvt->info_len && info) { 365 - 375 + if (uvt->info_len) { 366 376 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL); 367 - if (!uvt->info) { 368 - result = -ENOMEM; 369 - goto error; 370 - } 377 + if (!uvt->info) 378 + goto err2; 371 379 372 380 memcpy(uvt->info, info, uvt->info_len); 373 381 uvt->resp.present |= IB_UCM_PRES_INFO; 374 382 } 375 - 376 383 return 0; 377 - error: 378 - kfree(uvt->info); 384 + 385 + err2: 379 386 kfree(uvt->data); 380 - return result; 387 + err1: 388 + return -ENOMEM; 381 389 } 382 390 383 391 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, ··· 379 403 struct ib_ucm_context *ctx; 380 404 int result = 0; 381 405 int id; 382 - /* 383 - * lookup correct context based on event type. 384 - */ 385 - switch (event->event) { 386 - case IB_CM_REQ_RECEIVED: 387 - id = (long)event->param.req_rcvd.listen_id->context; 388 - break; 389 - case IB_CM_SIDR_REQ_RECEIVED: 390 - id = (long)event->param.sidr_req_rcvd.listen_id->context; 391 - break; 392 - default: 393 - id = (long)cm_id->context; 394 - break; 395 - } 396 406 397 - ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event); 398 - 399 - ctx = ib_ucm_ctx_get(id); 400 - if (!ctx) 401 - return -ENOENT; 407 + ctx = cm_id->context; 402 408 403 409 if (event->event == IB_CM_REQ_RECEIVED || 404 410 event->event == IB_CM_SIDR_REQ_RECEIVED) 405 411 id = IB_UCM_CM_ID_INVALID; 412 + else 413 + id = ctx->id; 406 414 407 415 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 408 - if (!uevent) { 409 - result = -ENOMEM; 410 - goto done; 411 - } 416 + if (!uevent) 417 + goto err1; 412 418 413 419 memset(uevent, 0, sizeof(*uevent)); 414 - 415 420 uevent->resp.id = id; 416 421 uevent->resp.event = event->event; 417 422 418 - result = ib_ucm_event_process(event, uevent); 423 + result = ib_ucm_event_process(ctx, event, uevent); 419 424 if (result) 420 - goto done; 425 + goto err2; 421 426 422 427 uevent->ctx = ctx; 423 - uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED || 424 - event->event == IB_CM_SIDR_REQ_RECEIVED ) ? 425 - cm_id : NULL); 428 + uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; 426 429 427 430 down(&ctx->file->mutex); 428 - 429 431 list_add_tail(&uevent->file_list, &ctx->file->events); 430 432 list_add_tail(&uevent->ctx_list, &ctx->events); 431 - 432 433 wake_up_interruptible(&ctx->file->poll_wait); 433 - 434 434 up(&ctx->file->mutex); 435 - done: 436 - ctx->error = result; 437 - ib_ucm_ctx_put(ctx); /* func reference */ 438 - return result; 435 + return 0; 436 + 437 + err2: 438 + kfree(uevent); 439 + err1: 440 + /* Destroy new cm_id's */ 441 + return (id == IB_UCM_CM_ID_INVALID); 439 442 } 440 443 441 444 static ssize_t ib_ucm_event(struct ib_ucm_file *file, ··· 472 517 goto done; 473 518 } 474 519 475 - ctx->cm_id = uevent->cm_id; 476 - ctx->cm_id->cm_handler = ib_ucm_event_handler; 477 - ctx->cm_id->context = (void *)(unsigned long)ctx->id; 520 + ctx->cm_id = uevent->cm_id; 521 + ctx->cm_id->context = ctx; 478 522 479 523 uevent->resp.id = ctx->id; 480 524 ··· 539 585 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 540 586 return -EFAULT; 541 587 588 + down(&file->mutex); 542 589 ctx = ib_ucm_ctx_alloc(file); 590 + up(&file->mutex); 543 591 if (!ctx) 544 592 return -ENOMEM; 545 593 546 - ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, 547 - (void *)(unsigned long)ctx->id); 548 - if (!ctx->cm_id) { 549 - result = -ENOMEM; 550 - goto err_cm; 594 + ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); 595 + if (IS_ERR(ctx->cm_id)) { 596 + result = PTR_ERR(ctx->cm_id); 597 + goto err; 551 598 } 552 599 553 600 resp.id = ctx->id; 554 601 if (copy_to_user((void __user *)(unsigned long)cmd.response, 555 602 &resp, sizeof(resp))) { 556 603 result = -EFAULT; 557 - goto err_ret; 604 + goto err; 558 605 } 559 606 560 607 return 0; 561 - err_ret: 562 - ib_destroy_cm_id(ctx->cm_id); 563 - err_cm: 564 - ib_ucm_ctx_put(ctx); /* user reference */ 565 608 609 + err: 610 + ib_ucm_destroy_ctx(file, ctx->id); 566 611 return result; 567 612 } 568 613 ··· 570 617 int in_len, int out_len) 571 618 { 572 619 struct ib_ucm_destroy_id cmd; 573 - struct ib_ucm_context *ctx; 574 620 575 621 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 576 622 return -EFAULT; 577 623 578 - ctx = ib_ucm_ctx_get(cmd.id); 579 - if (!ctx) 580 - return -ENOENT; 581 - 582 - ib_ucm_ctx_put(ctx); /* user reference */ 583 - ib_ucm_ctx_put(ctx); /* func reference */ 584 - 585 - return 0; 624 + return ib_ucm_destroy_ctx(file, cmd.id); 586 625 } 587 626 588 627 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, ··· 592 647 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 593 648 return -EFAULT; 594 649 595 - ctx = ib_ucm_ctx_get(cmd.id); 596 - if (!ctx) 597 - return -ENOENT; 598 - 599 - down(&ctx->file->mutex); 600 - if (ctx->file != file) { 601 - result = -EINVAL; 602 - goto done; 603 - } 650 + ctx = ib_ucm_ctx_get(file, cmd.id); 651 + if (IS_ERR(ctx)) 652 + return PTR_ERR(ctx); 604 653 605 654 resp.service_id = ctx->cm_id->service_id; 606 655 resp.service_mask = ctx->cm_id->service_mask; ··· 605 666 &resp, sizeof(resp))) 606 667 result = -EFAULT; 607 668 608 - done: 609 - up(&ctx->file->mutex); 610 - ib_ucm_ctx_put(ctx); /* func reference */ 669 + ib_ucm_ctx_put(ctx); 611 670 return result; 612 671 } 613 672 ··· 620 683 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 621 684 return -EFAULT; 622 685 623 - ctx = ib_ucm_ctx_get(cmd.id); 624 - if (!ctx) 625 - return -ENOENT; 686 + ctx = ib_ucm_ctx_get(file, cmd.id); 687 + if (IS_ERR(ctx)) 688 + return PTR_ERR(ctx); 626 689 627 - down(&ctx->file->mutex); 628 - if (ctx->file != file) 629 - result = -EINVAL; 630 - else 631 - result = ib_cm_listen(ctx->cm_id, cmd.service_id, 632 - cmd.service_mask); 633 - 634 - up(&ctx->file->mutex); 635 - ib_ucm_ctx_put(ctx); /* func reference */ 690 + result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask); 691 + ib_ucm_ctx_put(ctx); 636 692 return result; 637 693 } 638 694 ··· 640 710 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 641 711 return -EFAULT; 642 712 643 - ctx = ib_ucm_ctx_get(cmd.id); 644 - if (!ctx) 645 - return -ENOENT; 713 + ctx = ib_ucm_ctx_get(file, cmd.id); 714 + if (IS_ERR(ctx)) 715 + return PTR_ERR(ctx); 646 716 647 - down(&ctx->file->mutex); 648 - if (ctx->file != file) 649 - result = -EINVAL; 650 - else 651 - result = ib_cm_establish(ctx->cm_id); 652 - 653 - up(&ctx->file->mutex); 654 - ib_ucm_ctx_put(ctx); /* func reference */ 717 + result = ib_cm_establish(ctx->cm_id); 718 + ib_ucm_ctx_put(ctx); 655 719 return result; 656 720 } 657 721 ··· 692 768 return -EFAULT; 693 769 } 694 770 695 - memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid)); 696 - memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid)); 771 + memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid); 772 + memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid); 697 773 698 774 sa_path->dlid = ucm_path.dlid; 699 775 sa_path->slid = ucm_path.slid; ··· 763 839 param.max_cm_retries = cmd.max_cm_retries; 764 840 param.srq = cmd.srq; 765 841 766 - ctx = ib_ucm_ctx_get(cmd.id); 767 - if (!ctx) { 768 - result = -ENOENT; 769 - goto done; 770 - } 771 - 772 - down(&ctx->file->mutex); 773 - if (ctx->file != file) 774 - result = -EINVAL; 775 - else 842 + ctx = ib_ucm_ctx_get(file, cmd.id); 843 + if (!IS_ERR(ctx)) { 776 844 result = ib_send_cm_req(ctx->cm_id, &param); 845 + ib_ucm_ctx_put(ctx); 846 + } else 847 + result = PTR_ERR(ctx); 777 848 778 - up(&ctx->file->mutex); 779 - ib_ucm_ctx_put(ctx); /* func reference */ 780 849 done: 781 850 kfree(param.private_data); 782 851 kfree(param.primary_path); 783 852 kfree(param.alternate_path); 784 - 785 853 return result; 786 854 } 787 855 ··· 806 890 param.rnr_retry_count = cmd.rnr_retry_count; 807 891 param.srq = cmd.srq; 808 892 809 - ctx = ib_ucm_ctx_get(cmd.id); 810 - if (!ctx) { 811 - result = -ENOENT; 812 - goto done; 813 - } 814 - 815 - down(&ctx->file->mutex); 816 - if (ctx->file != file) 817 - result = -EINVAL; 818 - else 893 + ctx = ib_ucm_ctx_get(file, cmd.id); 894 + if (!IS_ERR(ctx)) { 819 895 result = ib_send_cm_rep(ctx->cm_id, &param); 896 + ib_ucm_ctx_put(ctx); 897 + } else 898 + result = PTR_ERR(ctx); 820 899 821 - up(&ctx->file->mutex); 822 - ib_ucm_ctx_put(ctx); /* func reference */ 823 - done: 824 900 kfree(param.private_data); 825 - 826 901 return result; 827 902 } 828 903 ··· 835 928 if (result) 836 929 return result; 837 930 838 - ctx = ib_ucm_ctx_get(cmd.id); 839 - if (!ctx) { 840 - result = -ENOENT; 841 - goto done; 842 - } 843 - 844 - down(&ctx->file->mutex); 845 - if (ctx->file != file) 846 - result = -EINVAL; 847 - else 931 + ctx = ib_ucm_ctx_get(file, cmd.id); 932 + if (!IS_ERR(ctx)) { 848 933 result = func(ctx->cm_id, private_data, cmd.len); 934 + ib_ucm_ctx_put(ctx); 935 + } else 936 + result = PTR_ERR(ctx); 849 937 850 - up(&ctx->file->mutex); 851 - ib_ucm_ctx_put(ctx); /* func reference */ 852 - done: 853 938 kfree(private_data); 854 - 855 939 return result; 856 940 } 857 941 ··· 893 995 if (result) 894 996 goto done; 895 997 896 - ctx = ib_ucm_ctx_get(cmd.id); 897 - if (!ctx) { 898 - result = -ENOENT; 899 - goto done; 900 - } 901 - 902 - down(&ctx->file->mutex); 903 - if (ctx->file != file) 904 - result = -EINVAL; 905 - else 906 - result = func(ctx->cm_id, cmd.status, 907 - info, cmd.info_len, 998 + ctx = ib_ucm_ctx_get(file, cmd.id); 999 + if (!IS_ERR(ctx)) { 1000 + result = func(ctx->cm_id, cmd.status, info, cmd.info_len, 908 1001 data, cmd.data_len); 1002 + ib_ucm_ctx_put(ctx); 1003 + } else 1004 + result = PTR_ERR(ctx); 909 1005 910 - up(&ctx->file->mutex); 911 - ib_ucm_ctx_put(ctx); /* func reference */ 912 1006 done: 913 1007 kfree(data); 914 1008 kfree(info); 915 - 916 1009 return result; 917 1010 } 918 1011 ··· 937 1048 if (result) 938 1049 return result; 939 1050 940 - ctx = ib_ucm_ctx_get(cmd.id); 941 - if (!ctx) { 942 - result = -ENOENT; 943 - goto done; 944 - } 1051 + ctx = ib_ucm_ctx_get(file, cmd.id); 1052 + if (!IS_ERR(ctx)) { 1053 + result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len); 1054 + ib_ucm_ctx_put(ctx); 1055 + } else 1056 + result = PTR_ERR(ctx); 945 1057 946 - down(&ctx->file->mutex); 947 - if (ctx->file != file) 948 - result = -EINVAL; 949 - else 950 - result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, 951 - data, cmd.len); 952 - 953 - up(&ctx->file->mutex); 954 - ib_ucm_ctx_put(ctx); /* func reference */ 955 - done: 956 1058 kfree(data); 957 - 958 1059 return result; 959 1060 } 960 1061 ··· 969 1090 if (result) 970 1091 goto done; 971 1092 972 - ctx = ib_ucm_ctx_get(cmd.id); 973 - if (!ctx) { 974 - result = -ENOENT; 975 - goto done; 976 - } 977 - 978 - down(&ctx->file->mutex); 979 - if (ctx->file != file) 980 - result = -EINVAL; 981 - else 1093 + ctx = ib_ucm_ctx_get(file, cmd.id); 1094 + if (!IS_ERR(ctx)) { 982 1095 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); 1096 + ib_ucm_ctx_put(ctx); 1097 + } else 1098 + result = PTR_ERR(ctx); 983 1099 984 - up(&ctx->file->mutex); 985 - ib_ucm_ctx_put(ctx); /* func reference */ 986 1100 done: 987 1101 kfree(data); 988 1102 kfree(path); 989 - 990 1103 return result; 991 1104 } 992 1105 ··· 1011 1140 param.max_cm_retries = cmd.max_cm_retries; 1012 1141 param.pkey = cmd.pkey; 1013 1142 1014 - ctx = ib_ucm_ctx_get(cmd.id); 1015 - if (!ctx) { 1016 - result = -ENOENT; 1017 - goto done; 1018 - } 1019 - 1020 - down(&ctx->file->mutex); 1021 - if (ctx->file != file) 1022 - result = -EINVAL; 1023 - else 1143 + ctx = ib_ucm_ctx_get(file, cmd.id); 1144 + if (!IS_ERR(ctx)) { 1024 1145 result = ib_send_cm_sidr_req(ctx->cm_id, &param); 1146 + ib_ucm_ctx_put(ctx); 1147 + } else 1148 + result = PTR_ERR(ctx); 1025 1149 1026 - up(&ctx->file->mutex); 1027 - ib_ucm_ctx_put(ctx); /* func reference */ 1028 1150 done: 1029 1151 kfree(param.private_data); 1030 1152 kfree(param.path); 1031 - 1032 1153 return result; 1033 1154 } 1034 1155 ··· 1047 1184 if (result) 1048 1185 goto done; 1049 1186 1050 - param.qp_num = cmd.qpn; 1051 - param.qkey = cmd.qkey; 1052 - param.status = cmd.status; 1053 - param.info_length = cmd.info_len; 1054 - param.private_data_len = cmd.data_len; 1187 + param.qp_num = cmd.qpn; 1188 + param.qkey = cmd.qkey; 1189 + param.status = cmd.status; 1190 + param.info_length = cmd.info_len; 1191 + param.private_data_len = cmd.data_len; 1055 1192 1056 - ctx = ib_ucm_ctx_get(cmd.id); 1057 - if (!ctx) { 1058 - result = -ENOENT; 1059 - goto done; 1060 - } 1061 - 1062 - down(&ctx->file->mutex); 1063 - if (ctx->file != file) 1064 - result = -EINVAL; 1065 - else 1193 + ctx = ib_ucm_ctx_get(file, cmd.id); 1194 + if (!IS_ERR(ctx)) { 1066 1195 result = ib_send_cm_sidr_rep(ctx->cm_id, &param); 1196 + ib_ucm_ctx_put(ctx); 1197 + } else 1198 + result = PTR_ERR(ctx); 1067 1199 1068 - up(&ctx->file->mutex); 1069 - ib_ucm_ctx_put(ctx); /* func reference */ 1070 1200 done: 1071 1201 kfree(param.private_data); 1072 1202 kfree(param.info); 1073 - 1074 1203 return result; 1075 1204 } 1076 1205 ··· 1160 1305 struct ib_ucm_context *ctx; 1161 1306 1162 1307 down(&file->mutex); 1163 - 1164 1308 while (!list_empty(&file->ctxs)) { 1165 1309 1166 1310 ctx = list_entry(file->ctxs.next, 1167 1311 struct ib_ucm_context, file_list); 1168 1312 1169 - up(&ctx->file->mutex); 1170 - ib_ucm_ctx_put(ctx); /* user reference */ 1313 + up(&file->mutex); 1314 + ib_ucm_destroy_ctx(file, ctx->id); 1171 1315 down(&file->mutex); 1172 1316 } 1173 - 1174 1317 up(&file->mutex); 1175 - 1176 1318 kfree(file); 1177 - 1178 - ucm_dbg("Deleted struct\n"); 1179 1319 return 0; 1180 1320 } 1181 1321
+5 -8
drivers/infiniband/core/ucm.h
··· 40 40 #include <linux/cdev.h> 41 41 #include <linux/idr.h> 42 42 43 - #include <ib_cm.h> 44 - #include <ib_user_cm.h> 43 + #include <rdma/ib_cm.h> 44 + #include <rdma/ib_user_cm.h> 45 45 46 46 #define IB_UCM_CM_ID_INVALID 0xffffffff 47 47 48 48 struct ib_ucm_file { 49 49 struct semaphore mutex; 50 50 struct file *filp; 51 - /* 52 - * list of pending events 53 - */ 51 + 54 52 struct list_head ctxs; /* list of active connections */ 55 53 struct list_head events; /* list of pending events */ 56 54 wait_queue_head_t poll_wait; ··· 56 58 57 59 struct ib_ucm_context { 58 60 int id; 59 - int ref; 60 - int error; 61 + wait_queue_head_t wait; 62 + atomic_t ref; 61 63 62 64 struct ib_ucm_file *file; 63 65 struct ib_cm_id *cm_id; 64 - struct semaphore mutex; 65 66 66 67 struct list_head events; /* list of pending events. */ 67 68 struct list_head file_list; /* member in file ctx list */
+6 -5
drivers/infiniband/core/ud_header.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 35 34 36 35 #include <linux/errno.h> 37 36 38 - #include <ib_pack.h> 37 + #include <rdma/ib_pack.h> 39 38 40 39 #define STRUCT_FIELD(header, field) \ 41 40 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ ··· 195 194 struct ib_ud_header *header) 196 195 { 197 196 int header_len; 197 + u16 packet_length; 198 198 199 199 memset(header, 0, sizeof *header); 200 200 ··· 210 208 header->lrh.link_version = 0; 211 209 header->lrh.link_next_header = 212 210 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; 213 - header->lrh.packet_length = (IB_LRH_BYTES + 211 + packet_length = (IB_LRH_BYTES + 214 212 IB_BTH_BYTES + 215 213 IB_DETH_BYTES + 216 214 payload_bytes + ··· 219 217 220 218 header->grh_present = grh_present; 221 219 if (grh_present) { 222 - header->lrh.packet_length += IB_GRH_BYTES / 4; 223 - 220 + packet_length += IB_GRH_BYTES / 4; 224 221 header->grh.ip_version = 6; 225 222 header->grh.payload_length = 226 223 cpu_to_be16((IB_BTH_BYTES + ··· 230 229 header->grh.next_header = 0x1b; 231 230 } 232 231 233 - cpu_to_be16s(&header->lrh.packet_length); 232 + header->lrh.packet_length = cpu_to_be16(packet_length); 234 233 235 234 if (header->immediate_present) 236 235 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+5 -5
drivers/infiniband/core/user_mad.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 5 * 6 6 * This software is available to you under a choice of one of two ··· 49 49 #include <asm/uaccess.h> 50 50 #include <asm/semaphore.h> 51 51 52 - #include <ib_mad.h> 53 - #include <ib_user_mad.h> 52 + #include <rdma/ib_mad.h> 53 + #include <rdma/ib_user_mad.h> 54 54 55 55 MODULE_AUTHOR("Roland Dreier"); 56 56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); ··· 271 271 struct ib_send_wr *bad_wr; 272 272 struct ib_rmpp_mad *rmpp_mad; 273 273 u8 method; 274 - u64 *tid; 274 + __be64 *tid; 275 275 int ret, length, hdr_len, data_len, rmpp_hdr_size; 276 276 int rmpp_active = 0; 277 277 ··· 316 316 if (packet->mad.hdr.grh_present) { 317 317 ah_attr.ah_flags = IB_AH_GRH; 318 318 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 319 - ah_attr.grh.flow_label = packet->mad.hdr.flow_label; 319 + ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 320 320 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 321 321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 322 322 }
+9 -2
drivers/infiniband/core/uverbs.h
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 6 * 5 7 * This software is available to you under a choice of one of two 6 8 * licenses. You may choose to be licensed under the terms of the GNU ··· 45 43 #include <linux/kref.h> 46 44 #include <linux/idr.h> 47 45 48 - #include <ib_verbs.h> 49 - #include <ib_user_verbs.h> 46 + #include <rdma/ib_verbs.h> 47 + #include <rdma/ib_user_verbs.h> 50 48 51 49 struct ib_uverbs_device { 52 50 int devnum; ··· 99 97 extern struct idr ib_uverbs_ah_idr; 100 98 extern struct idr ib_uverbs_cq_idr; 101 99 extern struct idr ib_uverbs_qp_idr; 100 + extern struct idr ib_uverbs_srq_idr; 102 101 103 102 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); 104 103 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); 105 104 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); 105 + void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); 106 106 107 107 int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, 108 108 void *addr, size_t size, int write); ··· 133 129 IB_UVERBS_DECLARE_CMD(destroy_qp); 134 130 IB_UVERBS_DECLARE_CMD(attach_mcast); 135 131 IB_UVERBS_DECLARE_CMD(detach_mcast); 132 + IB_UVERBS_DECLARE_CMD(create_srq); 133 + IB_UVERBS_DECLARE_CMD(modify_srq); 134 + IB_UVERBS_DECLARE_CMD(destroy_srq); 136 135 137 136 #endif /* UVERBS_H */
+180 -2
drivers/infiniband/core/uverbs_cmd.c
··· 724 724 struct ib_uobject *uobj; 725 725 struct ib_pd *pd; 726 726 struct ib_cq *scq, *rcq; 727 + struct ib_srq *srq; 727 728 struct ib_qp *qp; 728 729 struct ib_qp_init_attr attr; 729 730 int ret; ··· 748 747 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 749 748 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle); 750 749 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle); 750 + srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL; 751 751 752 752 if (!pd || pd->uobject->context != file->ucontext || 753 753 !scq || scq->uobject->context != file->ucontext || 754 - !rcq || rcq->uobject->context != file->ucontext) { 754 + !rcq || rcq->uobject->context != file->ucontext || 755 + (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) { 755 756 ret = -EINVAL; 756 757 goto err_up; 757 758 } ··· 762 759 attr.qp_context = file; 763 760 attr.send_cq = scq; 764 761 attr.recv_cq = rcq; 765 - attr.srq = NULL; 762 + attr.srq = srq; 766 763 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 767 764 attr.qp_type = cmd.qp_type; 768 765 ··· 1003 1000 if (qp && qp->uobject->context == file->ucontext) 1004 1001 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1005 1002 1003 + up(&ib_uverbs_idr_mutex); 1004 + 1005 + return ret ? ret : in_len; 1006 + } 1007 + 1008 + ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 1009 + const char __user *buf, int in_len, 1010 + int out_len) 1011 + { 1012 + struct ib_uverbs_create_srq cmd; 1013 + struct ib_uverbs_create_srq_resp resp; 1014 + struct ib_udata udata; 1015 + struct ib_uobject *uobj; 1016 + struct ib_pd *pd; 1017 + struct ib_srq *srq; 1018 + struct ib_srq_init_attr attr; 1019 + int ret; 1020 + 1021 + if (out_len < sizeof resp) 1022 + return -ENOSPC; 1023 + 1024 + if (copy_from_user(&cmd, buf, sizeof cmd)) 1025 + return -EFAULT; 1026 + 1027 + INIT_UDATA(&udata, buf + sizeof cmd, 1028 + (unsigned long) cmd.response + sizeof resp, 1029 + in_len - sizeof cmd, out_len - sizeof resp); 1030 + 1031 + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 1032 + if (!uobj) 1033 + return -ENOMEM; 1034 + 1035 + down(&ib_uverbs_idr_mutex); 1036 + 1037 + pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle); 1038 + 1039 + if (!pd || pd->uobject->context != file->ucontext) { 1040 + ret = -EINVAL; 1041 + goto err_up; 1042 + } 1043 + 1044 + attr.event_handler = ib_uverbs_srq_event_handler; 1045 + attr.srq_context = file; 1046 + attr.attr.max_wr = cmd.max_wr; 1047 + attr.attr.max_sge = cmd.max_sge; 1048 + attr.attr.srq_limit = cmd.srq_limit; 1049 + 1050 + uobj->user_handle = cmd.user_handle; 1051 + uobj->context = file->ucontext; 1052 + 1053 + srq = pd->device->create_srq(pd, &attr, &udata); 1054 + if (IS_ERR(srq)) { 1055 + ret = PTR_ERR(srq); 1056 + goto err_up; 1057 + } 1058 + 1059 + srq->device = pd->device; 1060 + srq->pd = pd; 1061 + srq->uobject = uobj; 1062 + srq->event_handler = attr.event_handler; 1063 + srq->srq_context = attr.srq_context; 1064 + atomic_inc(&pd->usecnt); 1065 + atomic_set(&srq->usecnt, 0); 1066 + 1067 + memset(&resp, 0, sizeof resp); 1068 + 1069 + retry: 1070 + if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) { 1071 + ret = -ENOMEM; 1072 + goto err_destroy; 1073 + } 1074 + 1075 + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); 1076 + 1077 + if (ret == -EAGAIN) 1078 + goto retry; 1079 + if (ret) 1080 + goto err_destroy; 1081 + 1082 + resp.srq_handle = uobj->id; 1083 + 1084 + spin_lock_irq(&file->ucontext->lock); 1085 + list_add_tail(&uobj->list, &file->ucontext->srq_list); 1086 + spin_unlock_irq(&file->ucontext->lock); 1087 + 1088 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 1089 + &resp, sizeof resp)) { 1090 + ret = -EFAULT; 1091 + goto err_list; 1092 + } 1093 + 1094 + up(&ib_uverbs_idr_mutex); 1095 + 1096 + return in_len; 1097 + 1098 + err_list: 1099 + spin_lock_irq(&file->ucontext->lock); 1100 + list_del(&uobj->list); 1101 + spin_unlock_irq(&file->ucontext->lock); 1102 + 1103 + err_destroy: 1104 + ib_destroy_srq(srq); 1105 + 1106 + err_up: 1107 + up(&ib_uverbs_idr_mutex); 1108 + 1109 + kfree(uobj); 1110 + return ret; 1111 + } 1112 + 1113 + ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 1114 + const char __user *buf, int in_len, 1115 + int out_len) 1116 + { 1117 + struct ib_uverbs_modify_srq cmd; 1118 + struct ib_srq *srq; 1119 + struct ib_srq_attr attr; 1120 + int ret; 1121 + 1122 + if (copy_from_user(&cmd, buf, sizeof cmd)) 1123 + return -EFAULT; 1124 + 1125 + down(&ib_uverbs_idr_mutex); 1126 + 1127 + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1128 + if (!srq || srq->uobject->context != file->ucontext) { 1129 + ret = -EINVAL; 1130 + goto out; 1131 + } 1132 + 1133 + attr.max_wr = cmd.max_wr; 1134 + attr.max_sge = cmd.max_sge; 1135 + attr.srq_limit = cmd.srq_limit; 1136 + 1137 + ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 1138 + 1139 + out: 1140 + up(&ib_uverbs_idr_mutex); 1141 + 1142 + return ret ? ret : in_len; 1143 + } 1144 + 1145 + ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 1146 + const char __user *buf, int in_len, 1147 + int out_len) 1148 + { 1149 + struct ib_uverbs_destroy_srq cmd; 1150 + struct ib_srq *srq; 1151 + struct ib_uobject *uobj; 1152 + int ret = -EINVAL; 1153 + 1154 + if (copy_from_user(&cmd, buf, sizeof cmd)) 1155 + return -EFAULT; 1156 + 1157 + down(&ib_uverbs_idr_mutex); 1158 + 1159 + srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1160 + if (!srq || srq->uobject->context != file->ucontext) 1161 + goto out; 1162 + 1163 + uobj = srq->uobject; 1164 + 1165 + ret = ib_destroy_srq(srq); 1166 + if (ret) 1167 + goto out; 1168 + 1169 + idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1170 + 1171 + spin_lock_irq(&file->ucontext->lock); 1172 + list_del(&uobj->list); 1173 + spin_unlock_irq(&file->ucontext->lock); 1174 + 1175 + kfree(uobj); 1176 + 1177 + out: 1006 1178 up(&ib_uverbs_idr_mutex); 1007 1179 1008 1180 return ret ? ret : in_len;
+21 -1
drivers/infiniband/core/uverbs_main.c
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 6 * 5 7 * This software is available to you under a choice of one of two 6 8 * licenses. You may choose to be licensed under the terms of the GNU ··· 69 67 DEFINE_IDR(ib_uverbs_ah_idr); 70 68 DEFINE_IDR(ib_uverbs_cq_idr); 71 69 DEFINE_IDR(ib_uverbs_qp_idr); 70 + DEFINE_IDR(ib_uverbs_srq_idr); 72 71 73 72 static spinlock_t map_lock; 74 73 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); ··· 94 91 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, 95 92 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, 96 93 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, 94 + [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, 95 + [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, 96 + [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, 97 97 }; 98 98 99 99 static struct vfsmount *uverbs_event_mnt; ··· 131 125 kfree(uobj); 132 126 } 133 127 134 - /* XXX Free SRQs */ 128 + list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { 129 + struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id); 130 + idr_remove(&ib_uverbs_srq_idr, uobj->id); 131 + ib_destroy_srq(srq); 132 + list_del(&uobj->list); 133 + kfree(uobj); 134 + } 135 + 135 136 /* XXX Free MWs */ 136 137 137 138 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { ··· 354 341 { 355 342 ib_uverbs_async_handler(context_ptr, 356 343 event->element.qp->uobject->user_handle, 344 + event->event); 345 + } 346 + 347 + void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 348 + { 349 + ib_uverbs_async_handler(context_ptr, 350 + event->element.srq->uobject->user_handle, 357 351 event->event); 358 352 } 359 353
+1
drivers/infiniband/core/uverbs_mem.c
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU
+63 -2
drivers/infiniband/core/verbs.c
··· 4 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 8 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 9 * 9 10 * This software is available to you under a choice of one of two ··· 41 40 #include <linux/errno.h> 42 41 #include <linux/err.h> 43 42 44 - #include <ib_verbs.h> 45 - #include <ib_cache.h> 43 + #include <rdma/ib_verbs.h> 44 + #include <rdma/ib_cache.h> 46 45 47 46 /* Protection domains */ 48 47 ··· 153 152 return ret; 154 153 } 155 154 EXPORT_SYMBOL(ib_destroy_ah); 155 + 156 + /* Shared receive queues */ 157 + 158 + struct ib_srq *ib_create_srq(struct ib_pd *pd, 159 + struct ib_srq_init_attr *srq_init_attr) 160 + { 161 + struct ib_srq *srq; 162 + 163 + if (!pd->device->create_srq) 164 + return ERR_PTR(-ENOSYS); 165 + 166 + srq = pd->device->create_srq(pd, srq_init_attr, NULL); 167 + 168 + if (!IS_ERR(srq)) { 169 + srq->device = pd->device; 170 + srq->pd = pd; 171 + srq->uobject = NULL; 172 + srq->event_handler = srq_init_attr->event_handler; 173 + srq->srq_context = srq_init_attr->srq_context; 174 + atomic_inc(&pd->usecnt); 175 + atomic_set(&srq->usecnt, 0); 176 + } 177 + 178 + return srq; 179 + } 180 + EXPORT_SYMBOL(ib_create_srq); 181 + 182 + int ib_modify_srq(struct ib_srq *srq, 183 + struct ib_srq_attr *srq_attr, 184 + enum ib_srq_attr_mask srq_attr_mask) 185 + { 186 + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); 187 + } 188 + EXPORT_SYMBOL(ib_modify_srq); 189 + 190 + int ib_query_srq(struct ib_srq *srq, 191 + struct ib_srq_attr *srq_attr) 192 + { 193 + return srq->device->query_srq ? 194 + srq->device->query_srq(srq, srq_attr) : -ENOSYS; 195 + } 196 + EXPORT_SYMBOL(ib_query_srq); 197 + 198 + int ib_destroy_srq(struct ib_srq *srq) 199 + { 200 + struct ib_pd *pd; 201 + int ret; 202 + 203 + if (atomic_read(&srq->usecnt)) 204 + return -EBUSY; 205 + 206 + pd = srq->pd; 207 + 208 + ret = srq->device->destroy_srq(srq); 209 + if (!ret) 210 + atomic_dec(&pd->usecnt); 211 + 212 + return ret; 213 + } 214 + EXPORT_SYMBOL(ib_destroy_srq); 156 215 157 216 /* Queue pairs */ 158 217
+1 -3
drivers/infiniband/hw/mthca/Makefile
··· 1 - EXTRA_CFLAGS += -Idrivers/infiniband/include 2 - 3 1 ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 4 2 EXTRA_CFLAGS += -DDEBUG 5 3 endif ··· 7 9 ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ 8 10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ 9 11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ 10 - mthca_provider.o mthca_memfree.o mthca_uar.o 12 + mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
+116
drivers/infiniband/hw/mthca/mthca_allocator.c
··· 177 177 178 178 kfree(array->page_list); 179 179 } 180 + 181 + /* 182 + * Handling for queue buffers -- we allocate a bunch of memory and 183 + * register it in a memory region at HCA virtual address 0. If the 184 + * requested size is > max_direct, we split the allocation into 185 + * multiple pages, so we don't require too much contiguous memory. 186 + */ 187 + 188 + int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, 189 + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, 190 + int hca_write, struct mthca_mr *mr) 191 + { 192 + int err = -ENOMEM; 193 + int npages, shift; 194 + u64 *dma_list = NULL; 195 + dma_addr_t t; 196 + int i; 197 + 198 + if (size <= max_direct) { 199 + *is_direct = 1; 200 + npages = 1; 201 + shift = get_order(size) + PAGE_SHIFT; 202 + 203 + buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, 204 + size, &t, GFP_KERNEL); 205 + if (!buf->direct.buf) 206 + return -ENOMEM; 207 + 208 + pci_unmap_addr_set(&buf->direct, mapping, t); 209 + 210 + memset(buf->direct.buf, 0, size); 211 + 212 + while (t & ((1 << shift) - 1)) { 213 + --shift; 214 + npages *= 2; 215 + } 216 + 217 + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 218 + if (!dma_list) 219 + goto err_free; 220 + 221 + for (i = 0; i < npages; ++i) 222 + dma_list[i] = t + i * (1 << shift); 223 + } else { 224 + *is_direct = 0; 225 + npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 226 + shift = PAGE_SHIFT; 227 + 228 + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 229 + if (!dma_list) 230 + return -ENOMEM; 231 + 232 + buf->page_list = kmalloc(npages * sizeof *buf->page_list, 233 + GFP_KERNEL); 234 + if (!buf->page_list) 235 + goto err_out; 236 + 237 + for (i = 0; i < npages; ++i) 238 + buf->page_list[i].buf = NULL; 239 + 240 + for (i = 0; i < npages; ++i) { 241 + buf->page_list[i].buf = 242 + dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 243 + &t, GFP_KERNEL); 244 + if (!buf->page_list[i].buf) 245 + goto err_free; 246 + 247 + dma_list[i] = t; 248 + pci_unmap_addr_set(&buf->page_list[i], mapping, t); 249 + 250 + memset(buf->page_list[i].buf, 0, PAGE_SIZE); 251 + } 252 + } 253 + 254 + err = mthca_mr_alloc_phys(dev, pd->pd_num, 255 + dma_list, shift, npages, 256 + 0, size, 257 + MTHCA_MPT_FLAG_LOCAL_READ | 258 + (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), 259 + mr); 260 + if (err) 261 + goto err_free; 262 + 263 + kfree(dma_list); 264 + 265 + return 0; 266 + 267 + err_free: 268 + mthca_buf_free(dev, size, buf, *is_direct, NULL); 269 + 270 + err_out: 271 + kfree(dma_list); 272 + 273 + return err; 274 + } 275 + 276 + void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, 277 + int is_direct, struct mthca_mr *mr) 278 + { 279 + int i; 280 + 281 + if (mr) 282 + mthca_free_mr(dev, mr); 283 + 284 + if (is_direct) 285 + dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 286 + pci_unmap_addr(&buf->direct, mapping)); 287 + else { 288 + for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 289 + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 290 + buf->page_list[i].buf, 291 + pci_unmap_addr(&buf->page_list[i], 292 + mapping)); 293 + kfree(buf->page_list); 294 + } 295 + }
+14 -14
drivers/infiniband/hw/mthca/mthca_av.c
··· 35 35 36 36 #include <linux/init.h> 37 37 38 - #include <ib_verbs.h> 39 - #include <ib_cache.h> 38 + #include <rdma/ib_verbs.h> 39 + #include <rdma/ib_cache.h> 40 40 41 41 #include "mthca_dev.h" 42 42 43 43 struct mthca_av { 44 - u32 port_pd; 45 - u8 reserved1; 46 - u8 g_slid; 47 - u16 dlid; 48 - u8 reserved2; 49 - u8 gid_index; 50 - u8 msg_sr; 51 - u8 hop_limit; 52 - u32 sl_tclass_flowlabel; 53 - u32 dgid[4]; 44 + __be32 port_pd; 45 + u8 reserved1; 46 + u8 g_slid; 47 + __be16 dlid; 48 + u8 reserved2; 49 + u8 gid_index; 50 + u8 msg_sr; 51 + u8 hop_limit; 52 + __be32 sl_tclass_flowlabel; 53 + __be32 dgid[4]; 54 54 }; 55 55 56 56 int mthca_create_ah(struct mthca_dev *dev, ··· 128 128 av, (unsigned long) ah->avdma); 129 129 for (j = 0; j < 8; ++j) 130 130 printk(KERN_DEBUG " [%2x] %08x\n", 131 - j * 4, be32_to_cpu(((u32 *) av)[j])); 131 + j * 4, be32_to_cpu(((__be32 *) av)[j])); 132 132 } 133 133 134 134 if (ah->type == MTHCA_AH_ON_HCA) { ··· 169 169 170 170 header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28; 171 171 header->lrh.destination_lid = ah->av->dlid; 172 - header->lrh.source_lid = ah->av->g_slid & 0x7f; 172 + header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f); 173 173 if (ah->av->g_slid & 0x80) { 174 174 header->grh_present = 1; 175 175 header->grh.traffic_class =
+81 -25
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 37 36 #include <linux/pci.h> 38 37 #include <linux/errno.h> 39 38 #include <asm/io.h> 40 - #include <ib_mad.h> 39 + #include <rdma/ib_mad.h> 41 40 42 41 #include "mthca_dev.h" 43 42 #include "mthca_config_reg.h" ··· 109 108 CMD_SW2HW_SRQ = 0x35, 110 109 CMD_HW2SW_SRQ = 0x36, 111 110 CMD_QUERY_SRQ = 0x37, 111 + CMD_ARM_SRQ = 0x40, 112 112 113 113 /* QP/EE commands */ 114 114 CMD_RST2INIT_QPEE = 0x19, ··· 221 219 * (and some architectures such as ia64 implement memcpy_toio 222 220 * in terms of writeb). 223 221 */ 224 - __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); 225 - __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); 226 - __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4); 227 - __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); 228 - __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); 229 - __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4); 222 + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4); 223 + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4); 224 + __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4); 225 + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4); 226 + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4); 227 + __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4); 230 228 231 229 /* __raw_writel may not order writes. */ 232 230 wmb(); 233 231 234 - __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) | 235 - (event ? (1 << HCA_E_BIT) : 0) | 236 - (op_modifier << HCR_OPMOD_SHIFT) | 237 - op), dev->hcr + 6 * 4); 232 + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | 233 + (event ? (1 << HCA_E_BIT) : 0) | 234 + (op_modifier << HCR_OPMOD_SHIFT) | 235 + op), dev->hcr + 6 * 4); 238 236 239 237 out: 240 238 up(&dev->cmd.hcr_sem); ··· 275 273 goto out; 276 274 } 277 275 278 - if (out_is_imm) { 279 - memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64)); 280 - be64_to_cpus(out_param); 281 - } 276 + if (out_is_imm) 277 + *out_param = 278 + (u64) be32_to_cpu((__force __be32) 279 + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | 280 + (u64) be32_to_cpu((__force __be32) 281 + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); 282 282 283 - *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 283 + *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 284 284 285 285 out: 286 286 up(&dev->cmd.poll_sem); ··· 1033 1029 1034 1030 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1035 1031 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); 1032 + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1033 + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); 1036 1034 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1037 1035 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); 1038 1036 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", ··· 1088 1082 return err; 1089 1083 } 1090 1084 1085 + static void get_board_id(void *vsd, char *board_id) 1086 + { 1087 + int i; 1088 + 1089 + #define VSD_OFFSET_SIG1 0x00 1090 + #define VSD_OFFSET_SIG2 0xde 1091 + #define VSD_OFFSET_MLX_BOARD_ID 0xd0 1092 + #define VSD_OFFSET_TS_BOARD_ID 0x20 1093 + 1094 + #define VSD_SIGNATURE_TOPSPIN 0x5ad 1095 + 1096 + memset(board_id, 0, MTHCA_BOARD_ID_LEN); 1097 + 1098 + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && 1099 + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { 1100 + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN); 1101 + } else { 1102 + /* 1103 + * The board ID is a string but the firmware byte 1104 + * swaps each 4-byte word before passing it back to 1105 + * us. Therefore we need to swab it before printing. 1106 + */ 1107 + for (i = 0; i < 4; ++i) 1108 + ((u32 *) board_id)[i] = 1109 + swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); 1110 + } 1111 + } 1112 + 1091 1113 int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1092 1114 struct mthca_adapter *adapter, u8 *status) 1093 1115 { ··· 1128 1094 #define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 1129 1095 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1130 1096 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1097 + #define QUERY_ADAPTER_VSD_OFFSET 0x20 1131 1098 1132 1099 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1133 1100 if (IS_ERR(mailbox)) ··· 1146 1111 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1147 1112 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1148 1113 1114 + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1115 + adapter->board_id); 1116 + 1149 1117 out: 1150 1118 mthca_free_mailbox(dev, mailbox); 1151 1119 return err; ··· 1159 1121 u8 *status) 1160 1122 { 1161 1123 struct mthca_mailbox *mailbox; 1162 - u32 *inbox; 1124 + __be32 *inbox; 1163 1125 int err; 1164 1126 1165 1127 #define INIT_HCA_IN_SIZE 0x200 ··· 1285 1247 #define INIT_IB_FLAG_SIG (1 << 18) 1286 1248 #define INIT_IB_FLAG_NG (1 << 17) 1287 1249 #define INIT_IB_FLAG_G0 (1 << 16) 1288 - #define INIT_IB_FLAG_1X (1 << 8) 1289 - #define INIT_IB_FLAG_4X (1 << 9) 1290 - #define INIT_IB_FLAG_12X (1 << 11) 1291 1250 #define INIT_IB_VL_SHIFT 4 1251 + #define INIT_IB_PORT_WIDTH_SHIFT 8 1292 1252 #define INIT_IB_MTU_SHIFT 12 1293 1253 #define INIT_IB_MAX_GID_OFFSET 0x06 1294 1254 #define INIT_IB_MAX_PKEY_OFFSET 0x0a ··· 1302 1266 memset(inbox, 0, INIT_IB_IN_SIZE); 1303 1267 1304 1268 flags = 0; 1305 - flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0; 1306 - flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0; 1307 1269 flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; 1308 1270 flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; 1309 1271 flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; 1310 1272 flags |= param->vl_cap << INIT_IB_VL_SHIFT; 1273 + flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT; 1311 1274 flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; 1312 1275 MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); 1313 1276 ··· 1377 1342 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1378 1343 { 1379 1344 struct mthca_mailbox *mailbox; 1380 - u64 *inbox; 1345 + __be64 *inbox; 1381 1346 int err; 1382 1347 1383 1348 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); ··· 1503 1468 CMD_TIME_CLASS_A, status); 1504 1469 } 1505 1470 1471 + int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1472 + int srq_num, u8 *status) 1473 + { 1474 + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, 1475 + CMD_TIME_CLASS_A, status); 1476 + } 1477 + 1478 + int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1479 + int srq_num, u8 *status) 1480 + { 1481 + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, 1482 + CMD_HW2SW_SRQ, 1483 + CMD_TIME_CLASS_A, status); 1484 + } 1485 + 1486 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) 1487 + { 1488 + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, 1489 + CMD_TIME_CLASS_B, status); 1490 + } 1491 + 1506 1492 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1507 1493 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 1508 1494 u8 *status) ··· 1569 1513 if (i % 8 == 0) 1570 1514 printk(" [%02x] ", i * 4); 1571 1515 printk(" %08x", 1572 - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1516 + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); 1573 1517 if ((i + 1) % 8 == 0) 1574 1518 printk("\n"); 1575 1519 } ··· 1589 1533 if (i % 8 == 0) 1590 1534 printk("[%02x] ", i * 4); 1591 1535 printk(" %08x", 1592 - be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1536 + be32_to_cpu(((__be32 *) mailbox->buf)[i + 2])); 1593 1537 if ((i + 1) % 8 == 0) 1594 1538 printk("\n"); 1595 1539 }
+13 -7
drivers/infiniband/hw/mthca/mthca_cmd.h
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 36 35 #ifndef MTHCA_CMD_H 37 36 #define MTHCA_CMD_H 38 37 39 - #include <ib_verbs.h> 38 + #include <rdma/ib_verbs.h> 40 39 41 40 #define MTHCA_MAILBOX_SIZE 4096 42 41 ··· 184 183 }; 185 184 186 185 struct mthca_adapter { 187 - u32 vendor_id; 188 - u32 device_id; 189 - u32 revision_id; 190 - u8 inta_pin; 186 + u32 vendor_id; 187 + u32 device_id; 188 + u32 revision_id; 189 + char board_id[MTHCA_BOARD_ID_LEN]; 190 + u8 inta_pin; 191 191 }; 192 192 193 193 struct mthca_init_hca_param { ··· 220 218 }; 221 219 222 220 struct mthca_init_ib_param { 223 - int enable_1x; 224 - int enable_4x; 221 + int port_width; 225 222 int vl_cap; 226 223 int mtu_cap; 227 224 u16 gid_cap; ··· 298 297 int cq_num, u8 *status); 299 298 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 300 299 int cq_num, u8 *status); 300 + int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 301 + int srq_num, u8 *status); 302 + int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 303 + int srq_num, u8 *status); 304 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); 301 305 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 302 306 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 303 307 u8 *status);
+1
drivers/infiniband/hw/mthca/mthca_config_reg.h
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU
+81 -175
drivers/infiniband/hw/mthca/mthca_cq.c
··· 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. 5 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 7 * 6 8 * This software is available to you under a choice of one of two 7 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 39 37 #include <linux/init.h> 40 38 #include <linux/hardirq.h> 41 39 42 - #include <ib_pack.h> 40 + #include <rdma/ib_pack.h> 43 41 44 42 #include "mthca_dev.h" 45 43 #include "mthca_cmd.h" ··· 57 55 * Must be packed because start is 64 bits but only aligned to 32 bits. 58 56 */ 59 57 struct mthca_cq_context { 60 - u32 flags; 61 - u64 start; 62 - u32 logsize_usrpage; 63 - u32 error_eqn; /* Tavor only */ 64 - u32 comp_eqn; 65 - u32 pd; 66 - u32 lkey; 67 - u32 last_notified_index; 68 - u32 solicit_producer_index; 69 - u32 consumer_index; 70 - u32 producer_index; 71 - u32 cqn; 72 - u32 ci_db; /* Arbel only */ 73 - u32 state_db; /* Arbel only */ 74 - u32 reserved; 58 + __be32 flags; 59 + __be64 start; 60 + __be32 logsize_usrpage; 61 + __be32 error_eqn; /* Tavor only */ 62 + __be32 comp_eqn; 63 + __be32 pd; 64 + __be32 lkey; 65 + __be32 last_notified_index; 66 + __be32 solicit_producer_index; 67 + __be32 consumer_index; 68 + __be32 producer_index; 69 + __be32 cqn; 70 + __be32 ci_db; /* Arbel only */ 71 + __be32 state_db; /* Arbel only */ 72 + u32 reserved; 75 73 } __attribute__((packed)); 76 74 77 75 #define MTHCA_CQ_STATUS_OK ( 0 << 28) ··· 110 108 }; 111 109 112 110 struct mthca_cqe { 113 - u32 my_qpn; 114 - u32 my_ee; 115 - u32 rqpn; 116 - u16 sl_g_mlpath; 117 - u16 rlid; 118 - u32 imm_etype_pkey_eec; 119 - u32 byte_cnt; 120 - u32 wqe; 121 - u8 opcode; 122 - u8 is_send; 123 - u8 reserved; 124 - u8 owner; 111 + __be32 my_qpn; 112 + __be32 my_ee; 113 + __be32 rqpn; 114 + __be16 sl_g_mlpath; 115 + __be16 rlid; 116 + __be32 imm_etype_pkey_eec; 117 + __be32 byte_cnt; 118 + __be32 wqe; 119 + u8 opcode; 120 + u8 is_send; 121 + u8 reserved; 122 + u8 owner; 125 123 }; 126 124 127 125 struct mthca_err_cqe { 128 - u32 my_qpn; 129 - u32 reserved1[3]; 130 - u8 syndrome; 131 - u8 reserved2; 132 - u16 db_cnt; 133 - u32 reserved3; 134 - u32 wqe; 135 - u8 opcode; 136 - u8 reserved4[2]; 137 - u8 owner; 126 + __be32 my_qpn; 127 + u32 reserved1[3]; 128 + u8 syndrome; 129 + u8 reserved2; 130 + __be16 db_cnt; 131 + u32 reserved3; 132 + __be32 wqe; 133 + u8 opcode; 134 + u8 reserved4[2]; 135 + u8 owner; 138 136 }; 139 137 140 138 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) ··· 193 191 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, 194 192 int incr) 195 193 { 196 - u32 doorbell[2]; 194 + __be32 doorbell[2]; 197 195 198 196 if (mthca_is_memfree(dev)) { 199 197 *cq->set_ci_db = cpu_to_be32(cq->cons_index); ··· 224 222 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 225 223 } 226 224 227 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 225 + void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 226 + struct mthca_srq *srq) 228 227 { 229 228 struct mthca_cq *cq; 230 229 struct mthca_cqe *cqe; ··· 266 263 */ 267 264 while (prod_index > cq->cons_index) { 268 265 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 269 - if (cqe->my_qpn == cpu_to_be32(qpn)) 266 + if (cqe->my_qpn == cpu_to_be32(qpn)) { 267 + if (srq) 268 + mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); 270 269 ++nfreed; 270 + } 271 271 else if (nfreed) 272 272 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 273 273 cq->ibcq.cqe), ··· 297 291 { 298 292 int err; 299 293 int dbd; 300 - u32 new_wqe; 294 + __be32 new_wqe; 301 295 302 296 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { 303 297 mthca_dbg(dev, "local QP operation err " ··· 371 365 break; 372 366 } 373 367 368 + /* 369 + * Mem-free HCAs always generate one CQE per WQE, even in the 370 + * error case, so we don't have to check the doorbell count, etc. 371 + */ 372 + if (mthca_is_memfree(dev)) 373 + return 0; 374 + 374 375 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); 375 376 if (err) 376 377 return err; ··· 386 373 * If we're at the end of the WQE chain, or we've used up our 387 374 * doorbell count, free the CQE. Otherwise just update it for 388 375 * the next poll operation. 389 - * 390 - * This does not apply to mem-free HCAs: they don't use the 391 - * doorbell count field, and so we should always free the CQE. 392 376 */ 393 - if (mthca_is_memfree(dev) || 394 - !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) 377 + if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) 395 378 return 0; 396 379 397 380 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); ··· 459 450 >> wq->wqe_shift); 460 451 entry->wr_id = (*cur_qp)->wrid[wqe_index + 461 452 (*cur_qp)->rq.max]; 453 + } else if ((*cur_qp)->ibqp.srq) { 454 + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); 455 + u32 wqe = be32_to_cpu(cqe->wqe); 456 + wq = NULL; 457 + wqe_index = wqe >> srq->wqe_shift; 458 + entry->wr_id = srq->wrid[wqe_index]; 459 + mthca_free_srq_wqe(srq, wqe); 462 460 } else { 463 461 wq = &(*cur_qp)->rq; 464 462 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 465 463 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 466 464 } 467 465 468 - if (wq->last_comp < wqe_index) 469 - wq->tail += wqe_index - wq->last_comp; 470 - else 471 - wq->tail += wqe_index + wq->max - wq->last_comp; 466 + if (wq) { 467 + if (wq->last_comp < wqe_index) 468 + wq->tail += wqe_index - wq->last_comp; 469 + else 470 + wq->tail += wqe_index + wq->max - wq->last_comp; 472 471 473 - wq->last_comp = wqe_index; 474 - 475 - if (0) 476 - mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 477 - is_send ? "Send" : "Receive", 478 - (*cur_qp)->qpn, wqe_index, wq->max); 472 + wq->last_comp = wqe_index; 473 + } 479 474 480 475 if (is_error) { 481 476 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, ··· 597 584 598 585 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 599 586 { 600 - u32 doorbell[2]; 587 + __be32 doorbell[2]; 601 588 602 589 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 603 590 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 604 591 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 605 592 to_mcq(cq)->cqn); 606 - doorbell[1] = 0xffffffff; 593 + doorbell[1] = (__force __be32) 0xffffffff; 607 594 608 595 mthca_write64(doorbell, 609 596 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, ··· 615 602 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 616 603 { 617 604 struct mthca_cq *cq = to_mcq(ibcq); 618 - u32 doorbell[2]; 605 + __be32 doorbell[2]; 619 606 u32 sn; 620 - u32 ci; 607 + __be32 ci; 621 608 622 609 sn = cq->arm_sn & 3; 623 610 ci = cpu_to_be32(cq->cons_index); ··· 650 637 651 638 static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 652 639 { 653 - int i; 654 - int size; 655 - 656 - if (cq->is_direct) 657 - dma_free_coherent(&dev->pdev->dev, 658 - (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, 659 - cq->queue.direct.buf, 660 - pci_unmap_addr(&cq->queue.direct, 661 - mapping)); 662 - else { 663 - size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE; 664 - for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) 665 - if (cq->queue.page_list[i].buf) 666 - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 667 - cq->queue.page_list[i].buf, 668 - pci_unmap_addr(&cq->queue.page_list[i], 669 - mapping)); 670 - 671 - kfree(cq->queue.page_list); 672 - } 673 - } 674 - 675 - static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size, 676 - struct mthca_cq *cq) 677 - { 678 - int err = -ENOMEM; 679 - int npages, shift; 680 - u64 *dma_list = NULL; 681 - dma_addr_t t; 682 - int i; 683 - 684 - if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) { 685 - cq->is_direct = 1; 686 - npages = 1; 687 - shift = get_order(size) + PAGE_SHIFT; 688 - 689 - cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, 690 - size, &t, GFP_KERNEL); 691 - if (!cq->queue.direct.buf) 692 - return -ENOMEM; 693 - 694 - pci_unmap_addr_set(&cq->queue.direct, mapping, t); 695 - 696 - memset(cq->queue.direct.buf, 0, size); 697 - 698 - while (t & ((1 << shift) - 1)) { 699 - --shift; 700 - npages *= 2; 701 - } 702 - 703 - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 704 - if (!dma_list) 705 - goto err_free; 706 - 707 - for (i = 0; i < npages; ++i) 708 - dma_list[i] = t + i * (1 << shift); 709 - } else { 710 - cq->is_direct = 0; 711 - npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 712 - shift = PAGE_SHIFT; 713 - 714 - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 715 - if (!dma_list) 716 - return -ENOMEM; 717 - 718 - cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list, 719 - GFP_KERNEL); 720 - if (!cq->queue.page_list) 721 - goto err_out; 722 - 723 - for (i = 0; i < npages; ++i) 724 - cq->queue.page_list[i].buf = NULL; 725 - 726 - for (i = 0; i < npages; ++i) { 727 - cq->queue.page_list[i].buf = 728 - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 729 - &t, GFP_KERNEL); 730 - if (!cq->queue.page_list[i].buf) 731 - goto err_free; 732 - 733 - dma_list[i] = t; 734 - pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t); 735 - 736 - memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE); 737 - } 738 - } 739 - 740 - err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, 741 - dma_list, shift, npages, 742 - 0, size, 743 - MTHCA_MPT_FLAG_LOCAL_WRITE | 744 - MTHCA_MPT_FLAG_LOCAL_READ, 745 - &cq->mr); 746 - if (err) 747 - goto err_free; 748 - 749 - kfree(dma_list); 750 - 751 - return 0; 752 - 753 - err_free: 754 - mthca_free_cq_buf(dev, cq); 755 - 756 - err_out: 757 - kfree(dma_list); 758 - 759 - return err; 640 + mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, 641 + &cq->queue, cq->is_direct, &cq->mr); 760 642 } 761 643 762 644 int mthca_init_cq(struct mthca_dev *dev, int nent, ··· 703 795 cq_context = mailbox->buf; 704 796 705 797 if (cq->is_kernel) { 706 - err = mthca_alloc_cq_buf(dev, size, cq); 798 + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, 799 + &cq->queue, &cq->is_direct, 800 + &dev->driver_pd, 1, &cq->mr); 707 801 if (err) 708 802 goto err_out_mailbox; 709 803 ··· 721 811 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 722 812 MTHCA_CQ_STATE_DISARMED | 723 813 MTHCA_CQ_FLAG_TR); 724 - cq_context->start = cpu_to_be64(0); 725 814 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 726 815 if (ctx) 727 816 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); ··· 766 857 return 0; 767 858 768 859 err_out_free_mr: 769 - if (cq->is_kernel) { 770 - mthca_free_mr(dev, &cq->mr); 860 + if (cq->is_kernel) 771 861 mthca_free_cq_buf(dev, cq); 772 - } 773 862 774 863 err_out_mailbox: 775 864 mthca_free_mailbox(dev, mailbox); ··· 811 904 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 812 905 813 906 if (0) { 814 - u32 *ctx = mailbox->buf; 907 + __be32 *ctx = mailbox->buf; 815 908 int j; 816 909 817 910 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", ··· 835 928 wait_event(cq->wait, !atomic_read(&cq->refcount)); 836 929 837 930 if (cq->is_kernel) { 838 - mthca_free_mr(dev, &cq->mr); 839 931 mthca_free_cq_buf(dev, cq); 840 932 if (mthca_is_memfree(dev)) { 841 933 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
+43 -9
drivers/infiniband/hw/mthca/mthca_dev.h
··· 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 5 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 7 * 6 8 * This software is available to you under a choice of one of two 7 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 66 64 67 65 enum { 68 66 MTHCA_MAX_PORTS = 2 67 + }; 68 + 69 + enum { 70 + MTHCA_BOARD_ID_LEN = 64 69 71 }; 70 72 71 73 enum { ··· 148 142 int reserved_mcgs; 149 143 int num_pds; 150 144 int reserved_pds; 145 + u8 port_width_cap; 151 146 }; 152 147 153 148 struct mthca_alloc { ··· 218 211 struct mthca_icm_table *table; 219 212 }; 220 213 214 + struct mthca_srq_table { 215 + struct mthca_alloc alloc; 216 + spinlock_t lock; 217 + struct mthca_array srq; 218 + struct mthca_icm_table *table; 219 + }; 220 + 221 221 struct mthca_qp_table { 222 222 struct mthca_alloc alloc; 223 223 u32 rdb_base; ··· 260 246 unsigned long device_cap_flags; 261 247 262 248 u32 rev_id; 249 + char board_id[MTHCA_BOARD_ID_LEN]; 263 250 264 251 /* firmware info */ 265 252 u64 fw_ver; ··· 306 291 struct mthca_mr_table mr_table; 307 292 struct mthca_eq_table eq_table; 308 293 struct mthca_cq_table cq_table; 294 + struct mthca_srq_table srq_table; 309 295 struct mthca_qp_table qp_table; 310 296 struct mthca_av_table av_table; 311 297 struct mthca_mcg_table mcg_table; ··· 347 331 348 332 #define MTHCA_PUT(dest, source, offset) \ 349 333 do { \ 350 - __typeof__(source) *__p = \ 351 - (__typeof__(source) *) ((char *) (dest) + (offset)); \ 334 + void *__d = ((char *) (dest) + (offset)); \ 352 335 switch (sizeof(source)) { \ 353 - case 1: *__p = (source); break; \ 354 - case 2: *__p = cpu_to_be16(source); break; \ 355 - case 4: *__p = cpu_to_be32(source); break; \ 356 - case 8: *__p = cpu_to_be64(source); break; \ 357 - default: __buggy_use_of_MTHCA_PUT(); \ 336 + case 1: *(u8 *) __d = (source); break; \ 337 + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ 338 + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ 339 + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ 340 + default: __buggy_use_of_MTHCA_PUT(); \ 358 341 } \ 359 342 } while (0) 360 343 ··· 369 354 void mthca_array_clear(struct mthca_array *array, int index); 370 355 int mthca_array_init(struct mthca_array *array, int nent); 371 356 void mthca_array_cleanup(struct mthca_array *array, int nent); 357 + int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, 358 + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, 359 + int hca_write, struct mthca_mr *mr); 360 + void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, 361 + int is_direct, struct mthca_mr *mr); 372 362 373 363 int mthca_init_uar_table(struct mthca_dev *dev); 374 364 int mthca_init_pd_table(struct mthca_dev *dev); 375 365 int mthca_init_mr_table(struct mthca_dev *dev); 376 366 int mthca_init_eq_table(struct mthca_dev *dev); 377 367 int mthca_init_cq_table(struct mthca_dev *dev); 368 + int mthca_init_srq_table(struct mthca_dev *dev); 378 369 int mthca_init_qp_table(struct mthca_dev *dev); 379 370 int mthca_init_av_table(struct mthca_dev *dev); 380 371 int mthca_init_mcg_table(struct mthca_dev *dev); ··· 390 369 void mthca_cleanup_mr_table(struct mthca_dev *dev); 391 370 void mthca_cleanup_eq_table(struct mthca_dev *dev); 392 371 void mthca_cleanup_cq_table(struct mthca_dev *dev); 372 + void mthca_cleanup_srq_table(struct mthca_dev *dev); 393 373 void mthca_cleanup_qp_table(struct mthca_dev *dev); 394 374 void mthca_cleanup_av_table(struct mthca_dev *dev); 395 375 void mthca_cleanup_mcg_table(struct mthca_dev *dev); ··· 441 419 void mthca_free_cq(struct mthca_dev *dev, 442 420 struct mthca_cq *cq); 443 421 void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 444 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); 422 + void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 423 + struct mthca_srq *srq); 424 + 425 + int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 426 + struct ib_srq_attr *attr, struct mthca_srq *srq); 427 + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 428 + void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 429 + enum ib_event_type event_type); 430 + void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 431 + int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 432 + struct ib_recv_wr **bad_wr); 433 + int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 434 + struct ib_recv_wr **bad_wr); 445 435 446 436 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 447 437 enum ib_event_type event_type); ··· 467 433 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 468 434 struct ib_recv_wr **bad_wr); 469 435 int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 470 - int index, int *dbd, u32 *new_wqe); 436 + int index, int *dbd, __be32 *new_wqe); 471 437 int mthca_alloc_qp(struct mthca_dev *dev, 472 438 struct mthca_pd *pd, 473 439 struct mthca_cq *send_cq,
+7 -6
drivers/infiniband/hw/mthca/mthca_doorbell.h
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 58 57 __raw_writeq((__force u64) val, dest); 59 58 } 60 59 61 - static inline void mthca_write64(u32 val[2], void __iomem *dest, 60 + static inline void mthca_write64(__be32 val[2], void __iomem *dest, 62 61 spinlock_t *doorbell_lock) 63 62 { 64 63 __raw_writeq(*(u64 *) val, dest); 65 64 } 66 65 67 - static inline void mthca_write_db_rec(u32 val[2], u32 *db) 66 + static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) 68 67 { 69 68 *(u64 *) db = *(u64 *) val; 70 69 } ··· 87 86 __raw_writel(((__force u32 *) &val)[1], dest + 4); 88 87 } 89 88 90 - static inline void mthca_write64(u32 val[2], void __iomem *dest, 89 + static inline void mthca_write64(__be32 val[2], void __iomem *dest, 91 90 spinlock_t *doorbell_lock) 92 91 { 93 92 unsigned long flags; 94 93 95 94 spin_lock_irqsave(doorbell_lock, flags); 96 - __raw_writel(val[0], dest); 97 - __raw_writel(val[1], dest + 4); 95 + __raw_writel((__force u32) val[0], dest); 96 + __raw_writel((__force u32) val[1], dest + 4); 98 97 spin_unlock_irqrestore(doorbell_lock, flags); 99 98 } 100 99 101 - static inline void mthca_write_db_rec(u32 val[2], u32 *db) 100 + static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) 102 101 { 103 102 db[0] = val[0]; 104 103 wmb();
+32 -31
drivers/infiniband/hw/mthca/mthca_eq.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 52 51 * Must be packed because start is 64 bits but only aligned to 32 bits. 53 52 */ 54 53 struct mthca_eq_context { 55 - u32 flags; 56 - u64 start; 57 - u32 logsize_usrpage; 58 - u32 tavor_pd; /* reserved for Arbel */ 59 - u8 reserved1[3]; 60 - u8 intr; 61 - u32 arbel_pd; /* lost_count for Tavor */ 62 - u32 lkey; 63 - u32 reserved2[2]; 64 - u32 consumer_index; 65 - u32 producer_index; 66 - u32 reserved3[4]; 54 + __be32 flags; 55 + __be64 start; 56 + __be32 logsize_usrpage; 57 + __be32 tavor_pd; /* reserved for Arbel */ 58 + u8 reserved1[3]; 59 + u8 intr; 60 + __be32 arbel_pd; /* lost_count for Tavor */ 61 + __be32 lkey; 62 + u32 reserved2[2]; 63 + __be32 consumer_index; 64 + __be32 producer_index; 65 + u32 reserved3[4]; 67 66 } __attribute__((packed)); 68 67 69 68 #define MTHCA_EQ_STATUS_OK ( 0 << 28) ··· 128 127 union { 129 128 u32 raw[6]; 130 129 struct { 131 - u32 cqn; 130 + __be32 cqn; 132 131 } __attribute__((packed)) comp; 133 132 struct { 134 - u16 reserved1; 135 - u16 token; 136 - u32 reserved2; 137 - u8 reserved3[3]; 138 - u8 status; 139 - u64 out_param; 133 + u16 reserved1; 134 + __be16 token; 135 + u32 reserved2; 136 + u8 reserved3[3]; 137 + u8 status; 138 + __be64 out_param; 140 139 } __attribute__((packed)) cmd; 141 140 struct { 142 - u32 qpn; 141 + __be32 qpn; 143 142 } __attribute__((packed)) qp; 144 143 struct { 145 - u32 cqn; 146 - u32 reserved1; 147 - u8 reserved2[3]; 148 - u8 syndrome; 144 + __be32 cqn; 145 + u32 reserved1; 146 + u8 reserved2[3]; 147 + u8 syndrome; 149 148 } __attribute__((packed)) cq_err; 150 149 struct { 151 - u32 reserved1[2]; 152 - u32 port; 150 + u32 reserved1[2]; 151 + __be32 port; 153 152 } __attribute__((packed)) port_change; 154 153 } event; 155 154 u8 reserved3[3]; ··· 168 167 169 168 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) 170 169 { 171 - u32 doorbell[2]; 170 + __be32 doorbell[2]; 172 171 173 172 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); 174 173 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); ··· 191 190 { 192 191 /* See comment in tavor_set_eq_ci() above. */ 193 192 wmb(); 194 - __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + 195 - eq->eqn * 8); 193 + __raw_writel((__force u32) cpu_to_be32(ci), 194 + dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); 196 195 /* We still want ordering, just not swabbing, so add a barrier */ 197 196 mb(); 198 197 } ··· 207 206 208 207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) 209 208 { 210 - u32 doorbell[2]; 209 + __be32 doorbell[2]; 211 210 212 211 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); 213 212 doorbell[1] = 0; ··· 225 224 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) 226 225 { 227 226 if (!mthca_is_memfree(dev)) { 228 - u32 doorbell[2]; 227 + __be32 doorbell[2]; 229 228 230 229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); 231 230 doorbell[1] = cpu_to_be32(cqn);
+6 -4
drivers/infiniband/hw/mthca/mthca_mad.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 34 32 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 35 33 */ 36 34 37 - #include <ib_verbs.h> 38 - #include <ib_mad.h> 39 - #include <ib_smi.h> 35 + #include <rdma/ib_verbs.h> 36 + #include <rdma/ib_mad.h> 37 + #include <rdma/ib_smi.h> 40 38 41 39 #include "mthca_dev.h" 42 40 #include "mthca_cmd.h" ··· 194 192 { 195 193 int err; 196 194 u8 status; 197 - u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE; 195 + u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 198 196 199 197 /* Forward locally generated traps to the SM */ 200 198 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
+106 -73
drivers/infiniband/hw/mthca/mthca_main.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 35 34 */ 36 35 37 36 #include <linux/config.h> 38 - #include <linux/version.h> 39 37 #include <linux/module.h> 40 38 #include <linux/init.h> 41 39 #include <linux/errno.h> ··· 171 171 mdev->limits.reserved_mrws = dev_lim->reserved_mrws; 172 172 mdev->limits.reserved_uars = dev_lim->reserved_uars; 173 173 mdev->limits.reserved_pds = dev_lim->reserved_pds; 174 + mdev->limits.port_width_cap = dev_lim->max_port_width; 174 175 175 176 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. 176 177 May be doable since hardware supports it for SRQ. ··· 213 212 struct mthca_dev_lim dev_lim; 214 213 struct mthca_profile profile; 215 214 struct mthca_init_hca_param init_hca; 216 - struct mthca_adapter adapter; 217 215 218 216 err = mthca_SYS_EN(mdev, &status); 219 217 if (err) { ··· 253 253 profile = default_profile; 254 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 255 255 profile.uarc_size = 0; 256 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 257 + profile.num_srq = dev_lim.max_srqs; 256 258 257 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 258 260 if (err < 0) ··· 272 270 goto err_disable; 273 271 } 274 272 275 - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); 276 - if (err) { 277 - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); 278 - goto err_close; 279 - } 280 - if (status) { 281 - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " 282 - "aborting.\n", status); 283 - err = -EINVAL; 284 - goto err_close; 285 - } 286 - 287 - mdev->eq_table.inta_pin = adapter.inta_pin; 288 - mdev->rev_id = adapter.revision_id; 289 - 290 273 return 0; 291 - 292 - err_close: 293 - mthca_CLOSE_HCA(mdev, 0, &status); 294 274 295 275 err_disable: 296 276 mthca_SYS_DIS(mdev, &status); ··· 426 442 } 427 443 428 444 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 429 - dev_lim->cqc_entry_sz, 430 - mdev->limits.num_cqs, 431 - mdev->limits.reserved_cqs, 0); 445 + dev_lim->cqc_entry_sz, 446 + mdev->limits.num_cqs, 447 + mdev->limits.reserved_cqs, 0); 432 448 if (!mdev->cq_table.table) { 433 449 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 434 450 err = -ENOMEM; 435 451 goto err_unmap_rdb; 452 + } 453 + 454 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { 455 + mdev->srq_table.table = 456 + mthca_alloc_icm_table(mdev, init_hca->srqc_base, 457 + dev_lim->srq_entry_sz, 458 + mdev->limits.num_srqs, 459 + mdev->limits.reserved_srqs, 0); 460 + if (!mdev->srq_table.table) { 461 + mthca_err(mdev, "Failed to map SRQ context memory, " 462 + "aborting.\n"); 463 + err = -ENOMEM; 464 + goto err_unmap_cq; 465 + } 436 466 } 437 467 438 468 /* ··· 464 466 if (!mdev->mcg_table.table) { 465 467 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 466 468 err = -ENOMEM; 467 - goto err_unmap_cq; 469 + goto err_unmap_srq; 468 470 } 469 471 470 472 return 0; 473 + 474 + err_unmap_srq: 475 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 476 + mthca_free_icm_table(mdev, mdev->srq_table.table); 471 477 472 478 err_unmap_cq: 473 479 mthca_free_icm_table(mdev, mdev->cq_table.table); ··· 508 506 struct mthca_dev_lim dev_lim; 509 507 struct mthca_profile profile; 510 508 struct mthca_init_hca_param init_hca; 511 - struct mthca_adapter adapter; 512 509 u64 icm_size; 513 510 u8 status; 514 511 int err; ··· 552 551 profile = default_profile; 553 552 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 554 553 profile.num_udav = 0; 554 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 555 + profile.num_srq = dev_lim.max_srqs; 555 556 556 557 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 557 558 if ((int) icm_size < 0) { ··· 577 574 goto err_free_icm; 578 575 } 579 576 580 - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); 581 - if (err) { 582 - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); 583 - goto err_free_icm; 584 - } 585 - if (status) { 586 - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " 587 - "aborting.\n", status); 588 - err = -EINVAL; 589 - goto err_free_icm; 590 - } 591 - 592 - mdev->eq_table.inta_pin = adapter.inta_pin; 593 - mdev->rev_id = adapter.revision_id; 594 - 595 577 return 0; 596 578 597 579 err_free_icm: 580 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 581 + mthca_free_icm_table(mdev, mdev->srq_table.table); 598 582 mthca_free_icm_table(mdev, mdev->cq_table.table); 599 583 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 600 584 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); ··· 604 614 return err; 605 615 } 606 616 617 + static void mthca_close_hca(struct mthca_dev *mdev) 618 + { 619 + u8 status; 620 + 621 + mthca_CLOSE_HCA(mdev, 0, &status); 622 + 623 + if (mthca_is_memfree(mdev)) { 624 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 625 + mthca_free_icm_table(mdev, mdev->srq_table.table); 626 + mthca_free_icm_table(mdev, mdev->cq_table.table); 627 + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 628 + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 629 + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); 630 + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); 631 + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); 632 + mthca_unmap_eq_icm(mdev); 633 + 634 + mthca_UNMAP_ICM_AUX(mdev, &status); 635 + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); 636 + 637 + mthca_UNMAP_FA(mdev, &status); 638 + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 639 + 640 + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 641 + mthca_DISABLE_LAM(mdev, &status); 642 + } else 643 + mthca_SYS_DIS(mdev, &status); 644 + } 645 + 607 646 static int __devinit mthca_init_hca(struct mthca_dev *mdev) 608 647 { 648 + u8 status; 649 + int err; 650 + struct mthca_adapter adapter; 651 + 609 652 if (mthca_is_memfree(mdev)) 610 - return mthca_init_arbel(mdev); 653 + err = mthca_init_arbel(mdev); 611 654 else 612 - return mthca_init_tavor(mdev); 655 + err = mthca_init_tavor(mdev); 656 + 657 + if (err) 658 + return err; 659 + 660 + err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); 661 + if (err) { 662 + mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); 663 + goto err_close; 664 + } 665 + if (status) { 666 + mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " 667 + "aborting.\n", status); 668 + err = -EINVAL; 669 + goto err_close; 670 + } 671 + 672 + mdev->eq_table.inta_pin = adapter.inta_pin; 673 + mdev->rev_id = adapter.revision_id; 674 + memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); 675 + 676 + return 0; 677 + 678 + err_close: 679 + mthca_close_hca(mdev); 680 + return err; 613 681 } 614 682 615 683 static int __devinit mthca_setup_hca(struct mthca_dev *dev) ··· 757 709 goto err_cmd_poll; 758 710 } 759 711 712 + err = mthca_init_srq_table(dev); 713 + if (err) { 714 + mthca_err(dev, "Failed to initialize " 715 + "shared receive queue table, aborting.\n"); 716 + goto err_cq_table_free; 717 + } 718 + 760 719 err = mthca_init_qp_table(dev); 761 720 if (err) { 762 721 mthca_err(dev, "Failed to initialize " 763 722 "queue pair table, aborting.\n"); 764 - goto err_cq_table_free; 723 + goto err_srq_table_free; 765 724 } 766 725 767 726 err = mthca_init_av_table(dev); ··· 792 737 793 738 err_qp_table_free: 794 739 mthca_cleanup_qp_table(dev); 740 + 741 + err_srq_table_free: 742 + mthca_cleanup_srq_table(dev); 795 743 796 744 err_cq_table_free: 797 745 mthca_cleanup_cq_table(dev); ··· 902 844 return 0; 903 845 } 904 846 905 - static void mthca_close_hca(struct mthca_dev *mdev) 906 - { 907 - u8 status; 908 - 909 - mthca_CLOSE_HCA(mdev, 0, &status); 910 - 911 - if (mthca_is_memfree(mdev)) { 912 - mthca_free_icm_table(mdev, mdev->cq_table.table); 913 - mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 914 - mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); 915 - mthca_free_icm_table(mdev, mdev->qp_table.qp_table); 916 - mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); 917 - mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); 918 - mthca_unmap_eq_icm(mdev); 919 - 920 - mthca_UNMAP_ICM_AUX(mdev, &status); 921 - mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); 922 - 923 - mthca_UNMAP_FA(mdev, &status); 924 - mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 925 - 926 - if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 927 - mthca_DISABLE_LAM(mdev, &status); 928 - } else 929 - mthca_SYS_DIS(mdev, &status); 930 - } 931 - 932 847 /* Types of supported HCA */ 933 848 enum { 934 849 TAVOR, /* MT23108 */ ··· 918 887 int is_memfree; 919 888 int is_pcie; 920 889 } mthca_hca_table[] = { 921 - [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 }, 922 - [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 }, 923 - [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 }, 890 + [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 }, 891 + [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 }, 892 + [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 }, 924 893 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 } 925 894 }; 926 895 ··· 1082 1051 mthca_cleanup_mcg_table(mdev); 1083 1052 mthca_cleanup_av_table(mdev); 1084 1053 mthca_cleanup_qp_table(mdev); 1054 + mthca_cleanup_srq_table(mdev); 1085 1055 mthca_cleanup_cq_table(mdev); 1086 1056 mthca_cmd_use_polling(mdev); 1087 1057 mthca_cleanup_eq_table(mdev); ··· 1132 1100 mthca_cleanup_mcg_table(mdev); 1133 1101 mthca_cleanup_av_table(mdev); 1134 1102 mthca_cleanup_qp_table(mdev); 1103 + mthca_cleanup_srq_table(mdev); 1135 1104 mthca_cleanup_cq_table(mdev); 1136 1105 mthca_cmd_use_polling(mdev); 1137 1106 mthca_cleanup_eq_table(mdev);
+20 -16
drivers/infiniband/hw/mthca/mthca_mcg.c
··· 42 42 }; 43 43 44 44 struct mthca_mgm { 45 - u32 next_gid_index; 46 - u32 reserved[3]; 47 - u8 gid[16]; 48 - u32 qp[MTHCA_QP_PER_MGM]; 45 + __be32 next_gid_index; 46 + u32 reserved[3]; 47 + u8 gid[16]; 48 + __be32 qp[MTHCA_QP_PER_MGM]; 49 49 }; 50 50 51 51 static const u8 zero_gid[16]; /* automatically initialized to 0 */ ··· 94 94 if (0) 95 95 mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" 96 96 "%04x:%04x:%04x:%04x is %04x\n", 97 - be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]), 98 - be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]), 99 - be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]), 100 - be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]), 97 + be16_to_cpu(((__be16 *) gid)[0]), 98 + be16_to_cpu(((__be16 *) gid)[1]), 99 + be16_to_cpu(((__be16 *) gid)[2]), 100 + be16_to_cpu(((__be16 *) gid)[3]), 101 + be16_to_cpu(((__be16 *) gid)[4]), 102 + be16_to_cpu(((__be16 *) gid)[5]), 103 + be16_to_cpu(((__be16 *) gid)[6]), 104 + be16_to_cpu(((__be16 *) gid)[7]), 101 105 *hash); 102 106 103 107 *index = *hash; ··· 262 258 if (index == -1) { 263 259 mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " 264 260 "not found\n", 265 - be16_to_cpu(((u16 *) gid->raw)[0]), 266 - be16_to_cpu(((u16 *) gid->raw)[1]), 267 - be16_to_cpu(((u16 *) gid->raw)[2]), 268 - be16_to_cpu(((u16 *) gid->raw)[3]), 269 - be16_to_cpu(((u16 *) gid->raw)[4]), 270 - be16_to_cpu(((u16 *) gid->raw)[5]), 271 - be16_to_cpu(((u16 *) gid->raw)[6]), 272 - be16_to_cpu(((u16 *) gid->raw)[7])); 261 + be16_to_cpu(((__be16 *) gid->raw)[0]), 262 + be16_to_cpu(((__be16 *) gid->raw)[1]), 263 + be16_to_cpu(((__be16 *) gid->raw)[2]), 264 + be16_to_cpu(((__be16 *) gid->raw)[3]), 265 + be16_to_cpu(((__be16 *) gid->raw)[4]), 266 + be16_to_cpu(((__be16 *) gid->raw)[5]), 267 + be16_to_cpu(((__be16 *) gid->raw)[6]), 268 + be16_to_cpu(((__be16 *) gid->raw)[7])); 273 269 err = -EINVAL; 274 270 goto out; 275 271 }
+9 -3
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 286 285 { 287 286 struct mthca_icm_table *table; 288 287 int num_icm; 288 + unsigned chunk_size; 289 289 int i; 290 290 u8 status; 291 291 ··· 307 305 table->icm[i] = NULL; 308 306 309 307 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 310 - table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 308 + chunk_size = MTHCA_TABLE_CHUNK_SIZE; 309 + if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) 310 + chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; 311 + 312 + table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 311 313 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 312 314 __GFP_NOWARN); 313 315 if (!table->icm[i]) ··· 487 481 } 488 482 } 489 483 490 - int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) 484 + int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) 491 485 { 492 486 int group; 493 487 int start, end, dir; ··· 570 564 571 565 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); 572 566 573 - *db = (u32 *) &page->db_rec[j]; 567 + *db = (__be32 *) &page->db_rec[j]; 574 568 575 569 out: 576 570 up(&dev->db_tab->mutex);
+3 -2
drivers/infiniband/hw/mthca/mthca_memfree.h
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 138 137 139 138 struct mthca_db_page { 140 139 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); 141 - u64 *db_rec; 140 + __be64 *db_rec; 142 141 dma_addr_t mapping; 143 142 }; 144 143 ··· 173 172 174 173 int mthca_init_db_tab(struct mthca_dev *dev); 175 174 void mthca_cleanup_db_tab(struct mthca_dev *dev); 176 - int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); 175 + int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db); 177 176 void mthca_free_db(struct mthca_dev *dev, int type, int db_index); 178 177 179 178 #endif /* MTHCA_MEMFREE_H */
+18 -17
drivers/infiniband/hw/mthca/mthca_mr.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 51 50 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 52 51 */ 53 52 struct mthca_mpt_entry { 54 - u32 flags; 55 - u32 page_size; 56 - u32 key; 57 - u32 pd; 58 - u64 start; 59 - u64 length; 60 - u32 lkey; 61 - u32 window_count; 62 - u32 window_count_limit; 63 - u64 mtt_seg; 64 - u32 mtt_sz; /* Arbel only */ 65 - u32 reserved[2]; 53 + __be32 flags; 54 + __be32 page_size; 55 + __be32 key; 56 + __be32 pd; 57 + __be64 start; 58 + __be64 length; 59 + __be32 lkey; 60 + __be32 window_count; 61 + __be32 window_count_limit; 62 + __be64 mtt_seg; 63 + __be32 mtt_sz; /* Arbel only */ 64 + u32 reserved[2]; 66 65 } __attribute__((packed)); 67 66 68 67 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) ··· 248 247 int start_index, u64 *buffer_list, int list_len) 249 248 { 250 249 struct mthca_mailbox *mailbox; 251 - u64 *mtt_entry; 250 + __be64 *mtt_entry; 252 251 int err = 0; 253 252 u8 status; 254 253 int i; ··· 390 389 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 391 390 if (i % 4 == 0) 392 391 printk("[%02x] ", i * 4); 393 - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 392 + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); 394 393 if ((i + 1) % 4 == 0) 395 394 printk("\n"); 396 395 } ··· 459 458 static void mthca_free_region(struct mthca_dev *dev, u32 lkey) 460 459 { 461 460 mthca_table_put(dev, dev->mr_table.mpt_table, 462 - arbel_key_to_hw_index(lkey)); 461 + key_to_hw_index(dev, lkey)); 463 462 464 463 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); 465 464 } ··· 563 562 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) { 564 563 if (i % 4 == 0) 565 564 printk("[%02x] ", i * 4); 566 - printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i])); 565 + printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i])); 567 566 if ((i + 1) % 4 == 0) 568 567 printk("\n"); 569 568 } ··· 670 669 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size)); 671 670 mpt_entry.start = cpu_to_be64(iova); 672 671 673 - writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key); 672 + __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); 674 673 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, 675 674 offsetof(struct mthca_mpt_entry, window_count) - 676 675 offsetof(struct mthca_mpt_entry, start));
+1
drivers/infiniband/hw/mthca/mthca_pd.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU
+2
drivers/infiniband/hw/mthca/mthca_profile.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 102 101 profile[MTHCA_RES_UARC].size = request->uarc_size; 103 102 104 103 profile[MTHCA_RES_QP].num = request->num_qp; 104 + profile[MTHCA_RES_SRQ].num = request->num_srq; 105 105 profile[MTHCA_RES_EQP].num = request->num_qp; 106 106 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; 107 107 profile[MTHCA_RES_CQ].num = request->num_cq;
+2
drivers/infiniband/hw/mthca/mthca_profile.h
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 42 41 struct mthca_profile { 43 42 int num_qp; 44 43 int rdb_per_qp; 44 + int num_srq; 45 45 int num_cq; 46 46 int num_mcg; 47 47 int num_mpt;
+105 -10
drivers/infiniband/hw/mthca/mthca_provider.c
··· 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 4 * Copyright (c) 2005 Cisco Systems. All rights reserved. 5 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 7 * 6 8 * This software is available to you under a choice of one of two 7 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 36 34 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $ 37 35 */ 38 36 39 - #include <ib_smi.h> 37 + #include <rdma/ib_smi.h> 40 38 #include <linux/mm.h> 41 39 42 40 #include "mthca_dev.h" ··· 81 79 } 82 80 83 81 props->device_cap_flags = mdev->device_cap_flags; 84 - props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) & 82 + props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 85 83 0xffffff; 86 - props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30)); 87 - props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32)); 84 + props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 85 + props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); 88 86 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 89 87 memcpy(&props->node_guid, out_mad->data + 12, 8); 90 88 ··· 120 118 if (!in_mad || !out_mad) 121 119 goto out; 122 120 121 + memset(props, 0, sizeof *props); 122 + 123 123 memset(in_mad, 0, sizeof *in_mad); 124 124 in_mad->base_version = 1; 125 125 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; ··· 140 136 goto out; 141 137 } 142 138 143 - props->lid = be16_to_cpup((u16 *) (out_mad->data + 16)); 139 + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 144 140 props->lmc = out_mad->data[34] & 0x7; 145 - props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18)); 141 + props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 146 142 props->sm_sl = out_mad->data[36] & 0xf; 147 143 props->state = out_mad->data[32] & 0xf; 148 144 props->phys_state = out_mad->data[33] >> 4; 149 - props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20)); 145 + props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 150 146 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 147 + props->max_msg_sz = 0x80000000; 151 148 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 152 - props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48)); 149 + props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 153 150 props->active_width = out_mad->data[31] & 0xf; 154 151 props->active_speed = out_mad->data[35] >> 4; 155 152 ··· 226 221 goto out; 227 222 } 228 223 229 - *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]); 224 + *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 230 225 231 226 out: 232 227 kfree(in_mad); ··· 421 416 { 422 417 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); 423 418 kfree(ah); 419 + 420 + return 0; 421 + } 422 + 423 + static struct ib_srq *mthca_create_srq(struct ib_pd *pd, 424 + struct ib_srq_init_attr *init_attr, 425 + struct ib_udata *udata) 426 + { 427 + struct mthca_create_srq ucmd; 428 + struct mthca_ucontext *context = NULL; 429 + struct mthca_srq *srq; 430 + int err; 431 + 432 + srq = kmalloc(sizeof *srq, GFP_KERNEL); 433 + if (!srq) 434 + return ERR_PTR(-ENOMEM); 435 + 436 + if (pd->uobject) { 437 + context = to_mucontext(pd->uobject->context); 438 + 439 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 440 + return ERR_PTR(-EFAULT); 441 + 442 + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 443 + context->db_tab, ucmd.db_index, 444 + ucmd.db_page); 445 + 446 + if (err) 447 + goto err_free; 448 + 449 + srq->mr.ibmr.lkey = ucmd.lkey; 450 + srq->db_index = ucmd.db_index; 451 + } 452 + 453 + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), 454 + &init_attr->attr, srq); 455 + 456 + if (err && pd->uobject) 457 + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, 458 + context->db_tab, ucmd.db_index); 459 + 460 + if (err) 461 + goto err_free; 462 + 463 + if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { 464 + mthca_free_srq(to_mdev(pd->device), srq); 465 + err = -EFAULT; 466 + goto err_free; 467 + } 468 + 469 + return &srq->ibsrq; 470 + 471 + err_free: 472 + kfree(srq); 473 + 474 + return ERR_PTR(err); 475 + } 476 + 477 + static int mthca_destroy_srq(struct ib_srq *srq) 478 + { 479 + struct mthca_ucontext *context; 480 + 481 + if (srq->uobject) { 482 + context = to_mucontext(srq->uobject->context); 483 + 484 + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 485 + context->db_tab, to_msrq(srq)->db_index); 486 + } 487 + 488 + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 489 + kfree(srq); 424 490 425 491 return 0; 426 492 } ··· 1032 956 } 1033 957 } 1034 958 959 + static ssize_t show_board(struct class_device *cdev, char *buf) 960 + { 961 + struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); 962 + return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); 963 + } 964 + 1035 965 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1036 966 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1037 967 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 968 + static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1038 969 1039 970 static struct class_device_attribute *mthca_class_attributes[] = { 1040 971 &class_device_attr_hw_rev, 1041 972 &class_device_attr_fw_ver, 1042 - &class_device_attr_hca_type 973 + &class_device_attr_hca_type, 974 + &class_device_attr_board_id 1043 975 }; 1044 976 1045 977 int mthca_register_device(struct mthca_dev *dev) ··· 1074 990 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1075 991 dev->ib_dev.create_ah = mthca_ah_create; 1076 992 dev->ib_dev.destroy_ah = mthca_ah_destroy; 993 + 994 + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 995 + dev->ib_dev.create_srq = mthca_create_srq; 996 + dev->ib_dev.destroy_srq = mthca_destroy_srq; 997 + 998 + if (mthca_is_memfree(dev)) 999 + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1000 + else 1001 + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; 1002 + } 1003 + 1077 1004 dev->ib_dev.create_qp = mthca_create_qp; 1078 1005 dev->ib_dev.modify_qp = mthca_modify_qp; 1079 1006 dev->ib_dev.destroy_qp = mthca_destroy_qp;
+41 -13
drivers/infiniband/hw/mthca/mthca_provider.h
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 5 * 5 6 * This software is available to you under a choice of one of two 6 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 37 36 #ifndef MTHCA_PROVIDER_H 38 37 #define MTHCA_PROVIDER_H 39 38 40 - #include <ib_verbs.h> 41 - #include <ib_pack.h> 39 + #include <rdma/ib_verbs.h> 40 + #include <rdma/ib_pack.h> 42 41 43 42 #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 44 43 #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) ··· 49 48 struct mthca_buf_list { 50 49 void *buf; 51 50 DECLARE_PCI_UNMAP_ADDR(mapping) 51 + }; 52 + 53 + union mthca_buf { 54 + struct mthca_buf_list direct; 55 + struct mthca_buf_list *page_list; 52 56 }; 53 57 54 58 struct mthca_uar { ··· 187 181 188 182 /* Next fields are Arbel only */ 189 183 int set_ci_db_index; 190 - u32 *set_ci_db; 184 + __be32 *set_ci_db; 191 185 int arm_db_index; 192 - u32 *arm_db; 186 + __be32 *arm_db; 193 187 int arm_sn; 194 188 195 - union { 196 - struct mthca_buf_list direct; 197 - struct mthca_buf_list *page_list; 198 - } queue; 189 + union mthca_buf queue; 199 190 struct mthca_mr mr; 200 191 wait_queue_head_t wait; 192 + }; 193 + 194 + struct mthca_srq { 195 + struct ib_srq ibsrq; 196 + spinlock_t lock; 197 + atomic_t refcount; 198 + int srqn; 199 + int max; 200 + int max_gs; 201 + int wqe_shift; 202 + int first_free; 203 + int last_free; 204 + u16 counter; /* Arbel only */ 205 + int db_index; /* Arbel only */ 206 + __be32 *db; /* Arbel only */ 207 + void *last; 208 + 209 + int is_direct; 210 + u64 *wrid; 211 + union mthca_buf queue; 212 + struct mthca_mr mr; 213 + 214 + wait_queue_head_t wait; 201 215 }; 202 216 203 217 struct mthca_wq { ··· 232 206 int wqe_shift; 233 207 234 208 int db_index; /* Arbel only */ 235 - u32 *db; 209 + __be32 *db; 236 210 }; 237 211 238 212 struct mthca_qp { ··· 253 227 int send_wqe_offset; 254 228 255 229 u64 *wrid; 256 - union { 257 - struct mthca_buf_list direct; 258 - struct mthca_buf_list *page_list; 259 - } queue; 230 + union mthca_buf queue; 260 231 261 232 wait_queue_head_t wait; 262 233 }; ··· 298 275 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) 299 276 { 300 277 return container_of(ibcq, struct mthca_cq, ibcq); 278 + } 279 + 280 + static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) 281 + { 282 + return container_of(ibsrq, struct mthca_srq, ibsrq); 301 283 } 302 284 303 285 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
+105 -257
drivers/infiniband/hw/mthca/mthca_qp.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 4 6 * 5 7 * This software is available to you under a choice of one of two 6 8 * licenses. You may choose to be licensed under the terms of the GNU ··· 37 35 38 36 #include <linux/init.h> 39 37 40 - #include <ib_verbs.h> 41 - #include <ib_cache.h> 42 - #include <ib_pack.h> 38 + #include <rdma/ib_verbs.h> 39 + #include <rdma/ib_cache.h> 40 + #include <rdma/ib_pack.h> 43 41 44 42 #include "mthca_dev.h" 45 43 #include "mthca_cmd.h" 46 44 #include "mthca_memfree.h" 45 + #include "mthca_wqe.h" 47 46 48 47 enum { 49 48 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, ··· 98 95 }; 99 96 100 97 struct mthca_qp_path { 101 - u32 port_pkey; 102 - u8 rnr_retry; 103 - u8 g_mylmc; 104 - u16 rlid; 105 - u8 ackto; 106 - u8 mgid_index; 107 - u8 static_rate; 108 - u8 hop_limit; 109 - u32 sl_tclass_flowlabel; 110 - u8 rgid[16]; 98 + __be32 port_pkey; 99 + u8 rnr_retry; 100 + u8 g_mylmc; 101 + __be16 rlid; 102 + u8 ackto; 103 + u8 mgid_index; 104 + u8 static_rate; 105 + u8 hop_limit; 106 + __be32 sl_tclass_flowlabel; 107 + u8 rgid[16]; 111 108 } __attribute__((packed)); 112 109 113 110 struct mthca_qp_context { 114 - u32 flags; 115 - u32 tavor_sched_queue; /* Reserved on Arbel */ 116 - u8 mtu_msgmax; 117 - u8 rq_size_stride; /* Reserved on Tavor */ 118 - u8 sq_size_stride; /* Reserved on Tavor */ 119 - u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 120 - u32 usr_page; 121 - u32 local_qpn; 122 - u32 remote_qpn; 123 - u32 reserved1[2]; 111 + __be32 flags; 112 + __be32 tavor_sched_queue; /* Reserved on Arbel */ 113 + u8 mtu_msgmax; 114 + u8 rq_size_stride; /* Reserved on Tavor */ 115 + u8 sq_size_stride; /* Reserved on Tavor */ 116 + u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 117 + __be32 usr_page; 118 + __be32 local_qpn; 119 + __be32 remote_qpn; 120 + u32 reserved1[2]; 124 121 struct mthca_qp_path pri_path; 125 122 struct mthca_qp_path alt_path; 126 - u32 rdd; 127 - u32 pd; 128 - u32 wqe_base; 129 - u32 wqe_lkey; 130 - u32 params1; 131 - u32 reserved2; 132 - u32 next_send_psn; 133 - u32 cqn_snd; 134 - u32 snd_wqe_base_l; /* Next send WQE on Tavor */ 135 - u32 snd_db_index; /* (debugging only entries) */ 136 - u32 last_acked_psn; 137 - u32 ssn; 138 - u32 params2; 139 - u32 rnr_nextrecvpsn; 140 - u32 ra_buff_indx; 141 - u32 cqn_rcv; 142 - u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 143 - u32 rcv_db_index; /* (debugging only entries) */ 144 - u32 qkey; 145 - u32 srqn; 146 - u32 rmsn; 147 - u16 rq_wqe_counter; /* reserved on Tavor */ 148 - u16 sq_wqe_counter; /* reserved on Tavor */ 149 - u32 reserved3[18]; 123 + __be32 rdd; 124 + __be32 pd; 125 + __be32 wqe_base; 126 + __be32 wqe_lkey; 127 + __be32 params1; 128 + __be32 reserved2; 129 + __be32 next_send_psn; 130 + __be32 cqn_snd; 131 + __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 132 + __be32 snd_db_index; /* (debugging only entries) */ 133 + __be32 last_acked_psn; 134 + __be32 ssn; 135 + __be32 params2; 136 + __be32 rnr_nextrecvpsn; 137 + __be32 ra_buff_indx; 138 + __be32 cqn_rcv; 139 + __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 140 + __be32 rcv_db_index; /* (debugging only entries) */ 141 + __be32 qkey; 142 + __be32 srqn; 143 + __be32 rmsn; 144 + __be16 rq_wqe_counter; /* reserved on Tavor */ 145 + __be16 sq_wqe_counter; /* reserved on Tavor */ 146 + u32 reserved3[18]; 150 147 } __attribute__((packed)); 151 148 152 149 struct mthca_qp_param { 153 - u32 opt_param_mask; 154 - u32 reserved1; 150 + __be32 opt_param_mask; 151 + u32 reserved1; 155 152 struct mthca_qp_context context; 156 - u32 reserved2[62]; 153 + u32 reserved2[62]; 157 154 } __attribute__((packed)); 158 155 159 156 enum { ··· 174 171 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 175 172 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 176 173 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 177 - }; 178 - 179 - enum { 180 - MTHCA_NEXT_DBD = 1 << 7, 181 - MTHCA_NEXT_FENCE = 1 << 6, 182 - MTHCA_NEXT_CQ_UPDATE = 1 << 3, 183 - MTHCA_NEXT_EVENT_GEN = 1 << 2, 184 - MTHCA_NEXT_SOLICIT = 1 << 1, 185 - 186 - MTHCA_MLX_VL15 = 1 << 17, 187 - MTHCA_MLX_SLR = 1 << 16 188 - }; 189 - 190 - enum { 191 - MTHCA_INVAL_LKEY = 0x100 192 - }; 193 - 194 - struct mthca_next_seg { 195 - u32 nda_op; /* [31:6] next WQE [4:0] next opcode */ 196 - u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ 197 - u32 flags; /* [3] CQ [2] Event [1] Solicit */ 198 - u32 imm; /* immediate data */ 199 - }; 200 - 201 - struct mthca_tavor_ud_seg { 202 - u32 reserved1; 203 - u32 lkey; 204 - u64 av_addr; 205 - u32 reserved2[4]; 206 - u32 dqpn; 207 - u32 qkey; 208 - u32 reserved3[2]; 209 - }; 210 - 211 - struct mthca_arbel_ud_seg { 212 - u32 av[8]; 213 - u32 dqpn; 214 - u32 qkey; 215 - u32 reserved[2]; 216 - }; 217 - 218 - struct mthca_bind_seg { 219 - u32 flags; /* [31] Atomic [30] rem write [29] rem read */ 220 - u32 reserved; 221 - u32 new_rkey; 222 - u32 lkey; 223 - u64 addr; 224 - u64 length; 225 - }; 226 - 227 - struct mthca_raddr_seg { 228 - u64 raddr; 229 - u32 rkey; 230 - u32 reserved; 231 - }; 232 - 233 - struct mthca_atomic_seg { 234 - u64 swap_add; 235 - u64 compare; 236 - }; 237 - 238 - struct mthca_data_seg { 239 - u32 byte_count; 240 - u32 lkey; 241 - u64 addr; 242 - }; 243 - 244 - struct mthca_mlx_seg { 245 - u32 nda_op; 246 - u32 nds; 247 - u32 flags; /* [17] VL15 [16] SLR [14:12] static rate 248 - [11:8] SL [3] C [2] E */ 249 - u16 rlid; 250 - u16 vcrc; 251 174 }; 252 175 253 176 static const u8 mthca_opcode[] = { ··· 502 573 503 574 memset(&param, 0, sizeof param); 504 575 505 - param.enable_1x = 1; 506 - param.enable_4x = 1; 507 - param.vl_cap = dev->limits.vl_cap; 508 - param.mtu_cap = dev->limits.mtu_cap; 509 - param.gid_cap = dev->limits.gid_table_len; 510 - param.pkey_cap = dev->limits.pkey_table_len; 576 + param.port_width = dev->limits.port_width_cap; 577 + param.vl_cap = dev->limits.vl_cap; 578 + param.mtu_cap = dev->limits.mtu_cap; 579 + param.gid_cap = dev->limits.gid_table_len; 580 + param.pkey_cap = dev->limits.pkey_table_len; 511 581 512 582 err = mthca_INIT_IB(dev, &param, port, &status); 513 583 if (err) ··· 612 684 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 613 685 614 686 if (mthca_is_memfree(dev)) { 615 - qp_context->rq_size_stride = 616 - ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 617 - qp_context->sq_size_stride = 618 - ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); 687 + if (qp->rq.max) 688 + qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; 689 + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 690 + 691 + if (qp->sq.max) 692 + qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; 693 + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 619 694 } 620 695 621 696 /* leave arbel_sched_queue as 0 */ ··· 787 856 788 857 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 789 858 859 + if (ibqp->srq) 860 + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 861 + 790 862 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 791 863 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 792 864 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); ··· 811 877 qp_context->qkey = cpu_to_be32(attr->qkey); 812 878 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 813 879 } 880 + 881 + if (ibqp->srq) 882 + qp_context->srqn = cpu_to_be32(1 << 24 | 883 + to_msrq(ibqp->srq)->srqn); 814 884 815 885 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 816 886 qp->qpn, 0, mailbox, 0, &status); ··· 863 925 struct mthca_qp *qp) 864 926 { 865 927 int size; 866 - int i; 867 - int npages, shift; 868 - dma_addr_t t; 869 - u64 *dma_list = NULL; 870 928 int err = -ENOMEM; 871 929 872 930 size = sizeof (struct mthca_next_seg) + ··· 912 978 if (!qp->wrid) 913 979 goto err_out; 914 980 915 - if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { 916 - qp->is_direct = 1; 917 - npages = 1; 918 - shift = get_order(size) + PAGE_SHIFT; 919 - 920 - if (0) 921 - mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n", 922 - size, shift); 923 - 924 - qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, 925 - &t, GFP_KERNEL); 926 - if (!qp->queue.direct.buf) 927 - goto err_out; 928 - 929 - pci_unmap_addr_set(&qp->queue.direct, mapping, t); 930 - 931 - memset(qp->queue.direct.buf, 0, size); 932 - 933 - while (t & ((1 << shift) - 1)) { 934 - --shift; 935 - npages *= 2; 936 - } 937 - 938 - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 939 - if (!dma_list) 940 - goto err_out_free; 941 - 942 - for (i = 0; i < npages; ++i) 943 - dma_list[i] = t + i * (1 << shift); 944 - } else { 945 - qp->is_direct = 0; 946 - npages = size / PAGE_SIZE; 947 - shift = PAGE_SHIFT; 948 - 949 - if (0) 950 - mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages); 951 - 952 - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 953 - if (!dma_list) 954 - goto err_out; 955 - 956 - qp->queue.page_list = kmalloc(npages * 957 - sizeof *qp->queue.page_list, 958 - GFP_KERNEL); 959 - if (!qp->queue.page_list) 960 - goto err_out; 961 - 962 - for (i = 0; i < npages; ++i) { 963 - qp->queue.page_list[i].buf = 964 - dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 965 - &t, GFP_KERNEL); 966 - if (!qp->queue.page_list[i].buf) 967 - goto err_out_free; 968 - 969 - memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE); 970 - 971 - pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t); 972 - dma_list[i] = t; 973 - } 974 - } 975 - 976 - err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift, 977 - npages, 0, size, 978 - MTHCA_MPT_FLAG_LOCAL_READ, 979 - &qp->mr); 981 + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 982 + &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 980 983 if (err) 981 - goto err_out_free; 984 + goto err_out; 982 985 983 - kfree(dma_list); 984 986 return 0; 985 987 986 - err_out_free: 987 - if (qp->is_direct) { 988 - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, 989 - pci_unmap_addr(&qp->queue.direct, mapping)); 990 - } else 991 - for (i = 0; i < npages; ++i) { 992 - if (qp->queue.page_list[i].buf) 993 - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 994 - qp->queue.page_list[i].buf, 995 - pci_unmap_addr(&qp->queue.page_list[i], 996 - mapping)); 997 - 998 - } 999 - 1000 - err_out: 988 + err_out: 1001 989 kfree(qp->wrid); 1002 - kfree(dma_list); 1003 990 return err; 1004 991 } 1005 992 1006 993 static void mthca_free_wqe_buf(struct mthca_dev *dev, 1007 994 struct mthca_qp *qp) 1008 995 { 1009 - int i; 1010 - int size = PAGE_ALIGN(qp->send_wqe_offset + 1011 - (qp->sq.max << qp->sq.wqe_shift)); 1012 - 1013 - if (qp->is_direct) { 1014 - dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf, 1015 - pci_unmap_addr(&qp->queue.direct, mapping)); 1016 - } else { 1017 - for (i = 0; i < size / PAGE_SIZE; ++i) { 1018 - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1019 - qp->queue.page_list[i].buf, 1020 - pci_unmap_addr(&qp->queue.page_list[i], 1021 - mapping)); 1022 - } 1023 - } 1024 - 996 + mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 997 + (qp->sq.max << qp->sq.wqe_shift)), 998 + &qp->queue, qp->is_direct, &qp->mr); 1025 999 kfree(qp->wrid); 1026 1000 } 1027 1001 ··· 1270 1428 * unref the mem-free tables and free the QPN in our table. 1271 1429 */ 1272 1430 if (!qp->ibqp.uobject) { 1273 - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1431 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 1432 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1274 1433 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1275 - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1434 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 1435 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1276 1436 1277 - mthca_free_mr(dev, &qp->mr); 1278 1437 mthca_free_memfree(dev, qp); 1279 1438 mthca_free_wqe_buf(dev, qp); 1280 1439 } ··· 1300 1457 { 1301 1458 int header_size; 1302 1459 int err; 1460 + u16 pkey; 1303 1461 1304 1462 ib_ud_header_init(256, /* assume a MAD */ 1305 1463 sqp->ud_header.grh_present, ··· 1311 1467 return err; 1312 1468 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1313 1469 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1314 - (sqp->ud_header.lrh.destination_lid == 0xffff ? 1315 - MTHCA_MLX_SLR : 0) | 1470 + (sqp->ud_header.lrh.destination_lid == 1471 + IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1316 1472 (sqp->ud_header.lrh.service_level << 8)); 1317 1473 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1318 1474 mlx->vcrc = 0; ··· 1332 1488 } 1333 1489 1334 1490 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1335 - if (sqp->ud_header.lrh.destination_lid == 0xffff) 1336 - sqp->ud_header.lrh.source_lid = 0xffff; 1491 + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1492 + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1337 1493 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1338 1494 if (!sqp->qp.ibqp.qp_num) 1339 1495 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1340 - sqp->pkey_index, 1341 - &sqp->ud_header.bth.pkey); 1496 + sqp->pkey_index, &pkey); 1342 1497 else 1343 1498 ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1344 - wr->wr.ud.pkey_index, 1345 - &sqp->ud_header.bth.pkey); 1346 - cpu_to_be16s(&sqp->ud_header.bth.pkey); 1499 + wr->wr.ud.pkey_index, &pkey); 1500 + sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1347 1501 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1348 1502 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1349 1503 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? ··· 1584 1742 1585 1743 out: 1586 1744 if (likely(nreq)) { 1587 - u32 doorbell[2]; 1745 + __be32 doorbell[2]; 1588 1746 1589 1747 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + 1590 1748 qp->send_wqe_offset) | f0 | op0); ··· 1685 1843 1686 1844 out: 1687 1845 if (likely(nreq)) { 1688 - u32 doorbell[2]; 1846 + __be32 doorbell[2]; 1689 1847 1690 1848 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); 1691 1849 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); ··· 1906 2064 1907 2065 out: 1908 2066 if (likely(nreq)) { 1909 - u32 doorbell[2]; 2067 + __be32 doorbell[2]; 1910 2068 1911 2069 doorbell[0] = cpu_to_be32((nreq << 24) | 1912 2070 ((qp->sq.head & 0xffff) << 8) | ··· 2016 2174 } 2017 2175 2018 2176 int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2019 - int index, int *dbd, u32 *new_wqe) 2177 + int index, int *dbd, __be32 *new_wqe) 2020 2178 { 2021 2179 struct mthca_next_seg *next; 2180 + 2181 + /* 2182 + * For SRQs, all WQEs generate a CQE, so we're always at the 2183 + * end of the doorbell chain. 2184 + */ 2185 + if (qp->ibqp.srq) { 2186 + *new_wqe = 0; 2187 + return 0; 2188 + } 2022 2189 2023 2190 if (is_send) 2024 2191 next = get_send_wqe(qp, index); 2025 2192 else 2026 2193 next = get_recv_wqe(qp, index); 2027 2194 2028 - if (mthca_is_memfree(dev)) 2029 - *dbd = 1; 2030 - else 2031 - *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2195 + *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2032 2196 if (next->ee_nds & cpu_to_be32(0x3f)) 2033 2197 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2034 2198 (next->ee_nds & cpu_to_be32(0x3f));
+591
drivers/infiniband/hw/mthca/mthca_srq.c
··· 1 + /* 2 + * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + * 32 + * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ 33 + */ 34 + 35 + #include "mthca_dev.h" 36 + #include "mthca_cmd.h" 37 + #include "mthca_memfree.h" 38 + #include "mthca_wqe.h" 39 + 40 + enum { 41 + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 42 + }; 43 + 44 + struct mthca_tavor_srq_context { 45 + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 46 + __be32 state_pd; 47 + __be32 lkey; 48 + __be32 uar; 49 + __be32 wqe_cnt; 50 + u32 reserved[2]; 51 + }; 52 + 53 + struct mthca_arbel_srq_context { 54 + __be32 state_logsize_srqn; 55 + __be32 lkey; 56 + __be32 db_index; 57 + __be32 logstride_usrpage; 58 + __be64 wqe_base; 59 + __be32 eq_pd; 60 + __be16 limit_watermark; 61 + __be16 wqe_cnt; 62 + u16 reserved1; 63 + __be16 wqe_counter; 64 + u32 reserved2[3]; 65 + }; 66 + 67 + static void *get_wqe(struct mthca_srq *srq, int n) 68 + { 69 + if (srq->is_direct) 70 + return srq->queue.direct.buf + (n << srq->wqe_shift); 71 + else 72 + return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 73 + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 74 + } 75 + 76 + /* 77 + * Return a pointer to the location within a WQE that we're using as a 78 + * link when the WQE is in the free list. We use an offset of 4 79 + * because in the Tavor case, posting a WQE may overwrite the first 80 + * four bytes of the previous WQE. The offset avoids corrupting our 81 + * free list if the WQE has already completed and been put on the free 82 + * list when we post the next WQE. 83 + */ 84 + static inline int *wqe_to_link(void *wqe) 85 + { 86 + return (int *) (wqe + 4); 87 + } 88 + 89 + static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 90 + struct mthca_pd *pd, 91 + struct mthca_srq *srq, 92 + struct mthca_tavor_srq_context *context) 93 + { 94 + memset(context, 0, sizeof *context); 95 + 96 + context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 97 + context->state_pd = cpu_to_be32(pd->pd_num); 98 + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 99 + 100 + if (pd->ibpd.uobject) 101 + context->uar = 102 + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 103 + else 104 + context->uar = cpu_to_be32(dev->driver_uar.index); 105 + } 106 + 107 + static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 108 + struct mthca_pd *pd, 109 + struct mthca_srq *srq, 110 + struct mthca_arbel_srq_context *context) 111 + { 112 + int logsize; 113 + 114 + memset(context, 0, sizeof *context); 115 + 116 + logsize = long_log2(srq->max) + srq->wqe_shift; 117 + context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 118 + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 119 + context->db_index = cpu_to_be32(srq->db_index); 120 + context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 121 + if (pd->ibpd.uobject) 122 + context->logstride_usrpage |= 123 + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 124 + else 125 + context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 126 + context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 127 + } 128 + 129 + static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 130 + { 131 + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 132 + srq->is_direct, &srq->mr); 133 + kfree(srq->wrid); 134 + } 135 + 136 + static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 137 + struct mthca_srq *srq) 138 + { 139 + struct mthca_data_seg *scatter; 140 + void *wqe; 141 + int err; 142 + int i; 143 + 144 + if (pd->ibpd.uobject) 145 + return 0; 146 + 147 + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 148 + if (!srq->wrid) 149 + return -ENOMEM; 150 + 151 + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 152 + MTHCA_MAX_DIRECT_SRQ_SIZE, 153 + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 154 + if (err) { 155 + kfree(srq->wrid); 156 + return err; 157 + } 158 + 159 + /* 160 + * Now initialize the SRQ buffer so that all of the WQEs are 161 + * linked into the list of free WQEs. In addition, set the 162 + * scatter list L_Keys to the sentry value of 0x100. 163 + */ 164 + for (i = 0; i < srq->max; ++i) { 165 + wqe = get_wqe(srq, i); 166 + 167 + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 168 + 169 + for (scatter = wqe + sizeof (struct mthca_next_seg); 170 + (void *) scatter < wqe + (1 << srq->wqe_shift); 171 + ++scatter) 172 + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 173 + } 174 + 175 + return 0; 176 + } 177 + 178 + int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 179 + struct ib_srq_attr *attr, struct mthca_srq *srq) 180 + { 181 + struct mthca_mailbox *mailbox; 182 + u8 status; 183 + int ds; 184 + int err; 185 + 186 + /* Sanity check SRQ size before proceeding */ 187 + if (attr->max_wr > 16 << 20 || attr->max_sge > 64) 188 + return -EINVAL; 189 + 190 + srq->max = attr->max_wr; 191 + srq->max_gs = attr->max_sge; 192 + srq->last = NULL; 193 + srq->counter = 0; 194 + 195 + if (mthca_is_memfree(dev)) 196 + srq->max = roundup_pow_of_two(srq->max + 1); 197 + 198 + ds = min(64UL, 199 + roundup_pow_of_two(sizeof (struct mthca_next_seg) + 200 + srq->max_gs * sizeof (struct mthca_data_seg))); 201 + srq->wqe_shift = long_log2(ds); 202 + 203 + srq->srqn = mthca_alloc(&dev->srq_table.alloc); 204 + if (srq->srqn == -1) 205 + return -ENOMEM; 206 + 207 + if (mthca_is_memfree(dev)) { 208 + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 209 + if (err) 210 + goto err_out; 211 + 212 + if (!pd->ibpd.uobject) { 213 + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 214 + srq->srqn, &srq->db); 215 + if (srq->db_index < 0) { 216 + err = -ENOMEM; 217 + goto err_out_icm; 218 + } 219 + } 220 + } 221 + 222 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 223 + if (IS_ERR(mailbox)) { 224 + err = PTR_ERR(mailbox); 225 + goto err_out_db; 226 + } 227 + 228 + err = mthca_alloc_srq_buf(dev, pd, srq); 229 + if (err) 230 + goto err_out_mailbox; 231 + 232 + spin_lock_init(&srq->lock); 233 + atomic_set(&srq->refcount, 1); 234 + init_waitqueue_head(&srq->wait); 235 + 236 + if (mthca_is_memfree(dev)) 237 + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 238 + else 239 + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 240 + 241 + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 242 + 243 + if (err) { 244 + mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 245 + goto err_out_free_buf; 246 + } 247 + if (status) { 248 + mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 249 + status); 250 + err = -EINVAL; 251 + goto err_out_free_buf; 252 + } 253 + 254 + spin_lock_irq(&dev->srq_table.lock); 255 + if (mthca_array_set(&dev->srq_table.srq, 256 + srq->srqn & (dev->limits.num_srqs - 1), 257 + srq)) { 258 + spin_unlock_irq(&dev->srq_table.lock); 259 + goto err_out_free_srq; 260 + } 261 + spin_unlock_irq(&dev->srq_table.lock); 262 + 263 + mthca_free_mailbox(dev, mailbox); 264 + 265 + srq->first_free = 0; 266 + srq->last_free = srq->max - 1; 267 + 268 + return 0; 269 + 270 + err_out_free_srq: 271 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 272 + if (err) 273 + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 274 + else if (status) 275 + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 276 + 277 + err_out_free_buf: 278 + if (!pd->ibpd.uobject) 279 + mthca_free_srq_buf(dev, srq); 280 + 281 + err_out_mailbox: 282 + mthca_free_mailbox(dev, mailbox); 283 + 284 + err_out_db: 285 + if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 286 + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 287 + 288 + err_out_icm: 289 + mthca_table_put(dev, dev->srq_table.table, srq->srqn); 290 + 291 + err_out: 292 + mthca_free(&dev->srq_table.alloc, srq->srqn); 293 + 294 + return err; 295 + } 296 + 297 + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 298 + { 299 + struct mthca_mailbox *mailbox; 300 + int err; 301 + u8 status; 302 + 303 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 304 + if (IS_ERR(mailbox)) { 305 + mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 306 + return; 307 + } 308 + 309 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 310 + if (err) 311 + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 312 + else if (status) 313 + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 314 + 315 + spin_lock_irq(&dev->srq_table.lock); 316 + mthca_array_clear(&dev->srq_table.srq, 317 + srq->srqn & (dev->limits.num_srqs - 1)); 318 + spin_unlock_irq(&dev->srq_table.lock); 319 + 320 + atomic_dec(&srq->refcount); 321 + wait_event(srq->wait, !atomic_read(&srq->refcount)); 322 + 323 + if (!srq->ibsrq.uobject) { 324 + mthca_free_srq_buf(dev, srq); 325 + if (mthca_is_memfree(dev)) 326 + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 327 + } 328 + 329 + mthca_table_put(dev, dev->srq_table.table, srq->srqn); 330 + mthca_free(&dev->srq_table.alloc, srq->srqn); 331 + mthca_free_mailbox(dev, mailbox); 332 + } 333 + 334 + void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 335 + enum ib_event_type event_type) 336 + { 337 + struct mthca_srq *srq; 338 + struct ib_event event; 339 + 340 + spin_lock(&dev->srq_table.lock); 341 + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 342 + if (srq) 343 + atomic_inc(&srq->refcount); 344 + spin_unlock(&dev->srq_table.lock); 345 + 346 + if (!srq) { 347 + mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 348 + return; 349 + } 350 + 351 + if (!srq->ibsrq.event_handler) 352 + goto out; 353 + 354 + event.device = &dev->ib_dev; 355 + event.event = event_type; 356 + event.element.srq = &srq->ibsrq; 357 + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 358 + 359 + out: 360 + if (atomic_dec_and_test(&srq->refcount)) 361 + wake_up(&srq->wait); 362 + } 363 + 364 + /* 365 + * This function must be called with IRQs disabled. 366 + */ 367 + void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 368 + { 369 + int ind; 370 + 371 + ind = wqe_addr >> srq->wqe_shift; 372 + 373 + spin_lock(&srq->lock); 374 + 375 + if (likely(srq->first_free >= 0)) 376 + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 377 + else 378 + srq->first_free = ind; 379 + 380 + *wqe_to_link(get_wqe(srq, ind)) = -1; 381 + srq->last_free = ind; 382 + 383 + spin_unlock(&srq->lock); 384 + } 385 + 386 + int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 387 + struct ib_recv_wr **bad_wr) 388 + { 389 + struct mthca_dev *dev = to_mdev(ibsrq->device); 390 + struct mthca_srq *srq = to_msrq(ibsrq); 391 + unsigned long flags; 392 + int err = 0; 393 + int first_ind; 394 + int ind; 395 + int next_ind; 396 + int nreq; 397 + int i; 398 + void *wqe; 399 + void *prev_wqe; 400 + 401 + spin_lock_irqsave(&srq->lock, flags); 402 + 403 + first_ind = srq->first_free; 404 + 405 + for (nreq = 0; wr; ++nreq, wr = wr->next) { 406 + ind = srq->first_free; 407 + 408 + if (ind < 0) { 409 + mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 + err = -ENOMEM; 411 + *bad_wr = wr; 412 + return nreq; 413 + } 414 + 415 + wqe = get_wqe(srq, ind); 416 + next_ind = *wqe_to_link(wqe); 417 + prev_wqe = srq->last; 418 + srq->last = wqe; 419 + 420 + ((struct mthca_next_seg *) wqe)->nda_op = 0; 421 + ((struct mthca_next_seg *) wqe)->ee_nds = 0; 422 + /* flags field will always remain 0 */ 423 + 424 + wqe += sizeof (struct mthca_next_seg); 425 + 426 + if (unlikely(wr->num_sge > srq->max_gs)) { 427 + err = -EINVAL; 428 + *bad_wr = wr; 429 + srq->last = prev_wqe; 430 + return nreq; 431 + } 432 + 433 + for (i = 0; i < wr->num_sge; ++i) { 434 + ((struct mthca_data_seg *) wqe)->byte_count = 435 + cpu_to_be32(wr->sg_list[i].length); 436 + ((struct mthca_data_seg *) wqe)->lkey = 437 + cpu_to_be32(wr->sg_list[i].lkey); 438 + ((struct mthca_data_seg *) wqe)->addr = 439 + cpu_to_be64(wr->sg_list[i].addr); 440 + wqe += sizeof (struct mthca_data_seg); 441 + } 442 + 443 + if (i < srq->max_gs) { 444 + ((struct mthca_data_seg *) wqe)->byte_count = 0; 445 + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 446 + ((struct mthca_data_seg *) wqe)->addr = 0; 447 + } 448 + 449 + if (likely(prev_wqe)) { 450 + ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 + cpu_to_be32((ind << srq->wqe_shift) | 1); 452 + wmb(); 453 + ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 + cpu_to_be32(MTHCA_NEXT_DBD); 455 + } 456 + 457 + srq->wrid[ind] = wr->wr_id; 458 + srq->first_free = next_ind; 459 + } 460 + 461 + return nreq; 462 + 463 + if (likely(nreq)) { 464 + __be32 doorbell[2]; 465 + 466 + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 467 + doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 468 + 469 + /* 470 + * Make sure that descriptors are written before 471 + * doorbell is rung. 472 + */ 473 + wmb(); 474 + 475 + mthca_write64(doorbell, 476 + dev->kar + MTHCA_RECEIVE_DOORBELL, 477 + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 478 + } 479 + 480 + spin_unlock_irqrestore(&srq->lock, flags); 481 + return err; 482 + } 483 + 484 + int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 485 + struct ib_recv_wr **bad_wr) 486 + { 487 + struct mthca_dev *dev = to_mdev(ibsrq->device); 488 + struct mthca_srq *srq = to_msrq(ibsrq); 489 + unsigned long flags; 490 + int err = 0; 491 + int ind; 492 + int next_ind; 493 + int nreq; 494 + int i; 495 + void *wqe; 496 + 497 + spin_lock_irqsave(&srq->lock, flags); 498 + 499 + for (nreq = 0; wr; ++nreq, wr = wr->next) { 500 + ind = srq->first_free; 501 + 502 + if (ind < 0) { 503 + mthca_err(dev, "SRQ %06x full\n", srq->srqn); 504 + err = -ENOMEM; 505 + *bad_wr = wr; 506 + return nreq; 507 + } 508 + 509 + wqe = get_wqe(srq, ind); 510 + next_ind = *wqe_to_link(wqe); 511 + 512 + ((struct mthca_next_seg *) wqe)->nda_op = 513 + cpu_to_be32((next_ind << srq->wqe_shift) | 1); 514 + ((struct mthca_next_seg *) wqe)->ee_nds = 0; 515 + /* flags field will always remain 0 */ 516 + 517 + wqe += sizeof (struct mthca_next_seg); 518 + 519 + if (unlikely(wr->num_sge > srq->max_gs)) { 520 + err = -EINVAL; 521 + *bad_wr = wr; 522 + return nreq; 523 + } 524 + 525 + for (i = 0; i < wr->num_sge; ++i) { 526 + ((struct mthca_data_seg *) wqe)->byte_count = 527 + cpu_to_be32(wr->sg_list[i].length); 528 + ((struct mthca_data_seg *) wqe)->lkey = 529 + cpu_to_be32(wr->sg_list[i].lkey); 530 + ((struct mthca_data_seg *) wqe)->addr = 531 + cpu_to_be64(wr->sg_list[i].addr); 532 + wqe += sizeof (struct mthca_data_seg); 533 + } 534 + 535 + if (i < srq->max_gs) { 536 + ((struct mthca_data_seg *) wqe)->byte_count = 0; 537 + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 538 + ((struct mthca_data_seg *) wqe)->addr = 0; 539 + } 540 + 541 + srq->wrid[ind] = wr->wr_id; 542 + srq->first_free = next_ind; 543 + } 544 + 545 + if (likely(nreq)) { 546 + srq->counter += nreq; 547 + 548 + /* 549 + * Make sure that descriptors are written before 550 + * we write doorbell record. 551 + */ 552 + wmb(); 553 + *srq->db = cpu_to_be32(srq->counter); 554 + } 555 + 556 + spin_unlock_irqrestore(&srq->lock, flags); 557 + return err; 558 + } 559 + 560 + int __devinit mthca_init_srq_table(struct mthca_dev *dev) 561 + { 562 + int err; 563 + 564 + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 565 + return 0; 566 + 567 + spin_lock_init(&dev->srq_table.lock); 568 + 569 + err = mthca_alloc_init(&dev->srq_table.alloc, 570 + dev->limits.num_srqs, 571 + dev->limits.num_srqs - 1, 572 + dev->limits.reserved_srqs); 573 + if (err) 574 + return err; 575 + 576 + err = mthca_array_init(&dev->srq_table.srq, 577 + dev->limits.num_srqs); 578 + if (err) 579 + mthca_alloc_cleanup(&dev->srq_table.alloc); 580 + 581 + return err; 582 + } 583 + 584 + void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) 585 + { 586 + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 587 + return; 588 + 589 + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 590 + mthca_alloc_cleanup(&dev->srq_table.alloc); 591 + }
+11
drivers/infiniband/hw/mthca/mthca_user.h
··· 69 69 __u32 reserved; 70 70 }; 71 71 72 + struct mthca_create_srq { 73 + __u32 lkey; 74 + __u32 db_index; 75 + __u64 db_page; 76 + }; 77 + 78 + struct mthca_create_srq_resp { 79 + __u32 srqn; 80 + __u32 reserved; 81 + }; 82 + 72 83 struct mthca_create_qp { 73 84 __u32 lkey; 74 85 __u32 reserved;
+114
drivers/infiniband/hw/mthca/mthca_wqe.h
··· 1 + /* 2 + * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + * 32 + * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $ 33 + */ 34 + 35 + #ifndef MTHCA_WQE_H 36 + #define MTHCA_WQE_H 37 + 38 + #include <linux/types.h> 39 + 40 + enum { 41 + MTHCA_NEXT_DBD = 1 << 7, 42 + MTHCA_NEXT_FENCE = 1 << 6, 43 + MTHCA_NEXT_CQ_UPDATE = 1 << 3, 44 + MTHCA_NEXT_EVENT_GEN = 1 << 2, 45 + MTHCA_NEXT_SOLICIT = 1 << 1, 46 + 47 + MTHCA_MLX_VL15 = 1 << 17, 48 + MTHCA_MLX_SLR = 1 << 16 49 + }; 50 + 51 + enum { 52 + MTHCA_INVAL_LKEY = 0x100 53 + }; 54 + 55 + struct mthca_next_seg { 56 + __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */ 57 + __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */ 58 + __be32 flags; /* [3] CQ [2] Event [1] Solicit */ 59 + __be32 imm; /* immediate data */ 60 + }; 61 + 62 + struct mthca_tavor_ud_seg { 63 + u32 reserved1; 64 + __be32 lkey; 65 + __be64 av_addr; 66 + u32 reserved2[4]; 67 + __be32 dqpn; 68 + __be32 qkey; 69 + u32 reserved3[2]; 70 + }; 71 + 72 + struct mthca_arbel_ud_seg { 73 + __be32 av[8]; 74 + __be32 dqpn; 75 + __be32 qkey; 76 + u32 reserved[2]; 77 + }; 78 + 79 + struct mthca_bind_seg { 80 + __be32 flags; /* [31] Atomic [30] rem write [29] rem read */ 81 + u32 reserved; 82 + __be32 new_rkey; 83 + __be32 lkey; 84 + __be64 addr; 85 + __be64 length; 86 + }; 87 + 88 + struct mthca_raddr_seg { 89 + __be64 raddr; 90 + __be32 rkey; 91 + u32 reserved; 92 + }; 93 + 94 + struct mthca_atomic_seg { 95 + __be64 swap_add; 96 + __be64 compare; 97 + }; 98 + 99 + struct mthca_data_seg { 100 + __be32 byte_count; 101 + __be32 lkey; 102 + __be64 addr; 103 + }; 104 + 105 + struct mthca_mlx_seg { 106 + __be32 nda_op; 107 + __be32 nds; 108 + __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate 109 + [11:8] SL [3] C [2] E */ 110 + __be16 rlid; 111 + __be16 vcrc; 112 + }; 113 + 114 + #endif /* MTHCA_WQE_H */
+3 -1
drivers/infiniband/include/ib_cache.h include/rdma/ib_cache.h
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 37 35 #ifndef _IB_CACHE_H 38 36 #define _IB_CACHE_H 39 37 40 - #include <ib_verbs.h> 38 + #include <rdma/ib_verbs.h> 41 39 42 40 /** 43 41 * ib_get_cached_gid - Returns a cached GID table entry
+46 -47
drivers/infiniband/include/ib_cm.h include/rdma/ib_cm.h
··· 37 37 #if !defined(IB_CM_H) 38 38 #define IB_CM_H 39 39 40 - #include <ib_mad.h> 41 - #include <ib_sa.h> 40 + #include <rdma/ib_mad.h> 41 + #include <rdma/ib_sa.h> 42 42 43 43 enum ib_cm_state { 44 44 IB_CM_IDLE, ··· 115 115 struct ib_sa_path_rec *primary_path; 116 116 struct ib_sa_path_rec *alternate_path; 117 117 118 - u64 remote_ca_guid; 118 + __be64 remote_ca_guid; 119 119 u32 remote_qkey; 120 120 u32 remote_qpn; 121 121 enum ib_qp_type qp_type; ··· 132 132 }; 133 133 134 134 struct ib_cm_rep_event_param { 135 - u64 remote_ca_guid; 135 + __be64 remote_ca_guid; 136 136 u32 remote_qkey; 137 137 u32 remote_qpn; 138 138 u32 starting_psn; ··· 146 146 }; 147 147 148 148 enum ib_cm_rej_reason { 149 - IB_CM_REJ_NO_QP = __constant_htons(1), 150 - IB_CM_REJ_NO_EEC = __constant_htons(2), 151 - IB_CM_REJ_NO_RESOURCES = __constant_htons(3), 152 - IB_CM_REJ_TIMEOUT = __constant_htons(4), 153 - IB_CM_REJ_UNSUPPORTED = __constant_htons(5), 154 - IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6), 155 - IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7), 156 - IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8), 157 - IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9), 158 - IB_CM_REJ_STALE_CONN = __constant_htons(10), 159 - IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11), 160 - IB_CM_REJ_INVALID_GID = __constant_htons(12), 161 - IB_CM_REJ_INVALID_LID = __constant_htons(13), 162 - IB_CM_REJ_INVALID_SL = __constant_htons(14), 163 - IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15), 164 - IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16), 165 - IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17), 166 - IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18), 167 - IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19), 168 - IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20), 169 - IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21), 170 - IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22), 171 - IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23), 172 - IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24), 173 - IB_CM_REJ_PORT_REDIRECT = __constant_htons(25), 174 - IB_CM_REJ_INVALID_MTU = __constant_htons(26), 175 - IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27), 176 - IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28), 177 - IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29), 178 - IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30), 179 - IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31), 180 - IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32), 181 - IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33) 149 + IB_CM_REJ_NO_QP = 1, 150 + IB_CM_REJ_NO_EEC = 2, 151 + IB_CM_REJ_NO_RESOURCES = 3, 152 + IB_CM_REJ_TIMEOUT = 4, 153 + IB_CM_REJ_UNSUPPORTED = 5, 154 + IB_CM_REJ_INVALID_COMM_ID = 6, 155 + IB_CM_REJ_INVALID_COMM_INSTANCE = 7, 156 + IB_CM_REJ_INVALID_SERVICE_ID = 8, 157 + IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9, 158 + IB_CM_REJ_STALE_CONN = 10, 159 + IB_CM_REJ_RDC_NOT_EXIST = 11, 160 + IB_CM_REJ_INVALID_GID = 12, 161 + IB_CM_REJ_INVALID_LID = 13, 162 + IB_CM_REJ_INVALID_SL = 14, 163 + IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15, 164 + IB_CM_REJ_INVALID_HOP_LIMIT = 16, 165 + IB_CM_REJ_INVALID_PACKET_RATE = 17, 166 + IB_CM_REJ_INVALID_ALT_GID = 18, 167 + IB_CM_REJ_INVALID_ALT_LID = 19, 168 + IB_CM_REJ_INVALID_ALT_SL = 20, 169 + IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21, 170 + IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22, 171 + IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23, 172 + IB_CM_REJ_PORT_CM_REDIRECT = 24, 173 + IB_CM_REJ_PORT_REDIRECT = 25, 174 + IB_CM_REJ_INVALID_MTU = 26, 175 + IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27, 176 + IB_CM_REJ_CONSUMER_DEFINED = 28, 177 + IB_CM_REJ_INVALID_RNR_RETRY = 29, 178 + IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30, 179 + IB_CM_REJ_INVALID_CLASS_VERSION = 31, 180 + IB_CM_REJ_INVALID_FLOW_LABEL = 32, 181 + IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33 182 182 }; 183 183 184 184 struct ib_cm_rej_event_param { ··· 222 222 struct ib_cm_id *listen_id; 223 223 struct ib_device *device; 224 224 u8 port; 225 - 226 - u16 pkey; 225 + u16 pkey; 227 226 }; 228 227 229 228 enum ib_cm_sidr_status { ··· 284 285 struct ib_cm_id { 285 286 ib_cm_handler cm_handler; 286 287 void *context; 287 - u64 service_id; 288 - u64 service_mask; 288 + __be64 service_id; 289 + __be64 service_mask; 289 290 enum ib_cm_state state; /* internal CM/debug use */ 290 291 enum ib_cm_lap_state lap_state; /* internal CM/debug use */ 291 - u32 local_id; 292 - u32 remote_id; 292 + __be32 local_id; 293 + __be32 remote_id; 293 294 }; 294 295 295 296 /** ··· 329 330 * IB_CM_ASSIGN_SERVICE_ID. 330 331 */ 331 332 int ib_cm_listen(struct ib_cm_id *cm_id, 332 - u64 service_id, 333 - u64 service_mask); 333 + __be64 service_id, 334 + __be64 service_mask); 334 335 335 336 struct ib_cm_req_param { 336 337 struct ib_sa_path_rec *primary_path; 337 338 struct ib_sa_path_rec *alternate_path; 338 - u64 service_id; 339 + __be64 service_id; 339 340 u32 qp_num; 340 341 enum ib_qp_type qp_type; 341 342 u32 starting_psn; ··· 527 528 528 529 struct ib_cm_sidr_req_param { 529 530 struct ib_sa_path_rec *path; 530 - u64 service_id; 531 + __be64 service_id; 531 532 int timeout_ms; 532 533 const void *private_data; 533 534 u8 private_data_len;
+1 -1
drivers/infiniband/include/ib_fmr_pool.h include/rdma/ib_fmr_pool.h
··· 36 36 #if !defined(IB_FMR_POOL_H) 37 37 #define IB_FMR_POOL_H 38 38 39 - #include <ib_verbs.h> 39 + #include <rdma/ib_verbs.h> 40 40 41 41 struct ib_fmr_pool; 42 42
+14 -12
drivers/infiniband/include/ib_mad.h include/rdma/ib_mad.h
··· 41 41 42 42 #include <linux/pci.h> 43 43 44 - #include <ib_verbs.h> 44 + #include <rdma/ib_verbs.h> 45 45 46 46 /* Management base version */ 47 47 #define IB_MGMT_BASE_VERSION 1 ··· 90 90 91 91 #define IB_MGMT_RMPP_STATUS_SUCCESS 0 92 92 #define IB_MGMT_RMPP_STATUS_RESX 1 93 + #define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 93 94 #define IB_MGMT_RMPP_STATUS_T2L 118 94 95 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119 95 96 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120 ··· 101 100 #define IB_MGMT_RMPP_STATUS_UNV 125 102 101 #define IB_MGMT_RMPP_STATUS_TMR 126 103 102 #define IB_MGMT_RMPP_STATUS_UNSPEC 127 103 + #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 104 104 105 105 #define IB_QP0 0 106 106 #define IB_QP1 __constant_htonl(1) ··· 113 111 u8 mgmt_class; 114 112 u8 class_version; 115 113 u8 method; 116 - u16 status; 117 - u16 class_specific; 118 - u64 tid; 119 - u16 attr_id; 120 - u16 resv; 121 - u32 attr_mod; 114 + __be16 status; 115 + __be16 class_specific; 116 + __be64 tid; 117 + __be16 attr_id; 118 + __be16 resv; 119 + __be32 attr_mod; 122 120 }; 123 121 124 122 struct ib_rmpp_hdr { ··· 126 124 u8 rmpp_type; 127 125 u8 rmpp_rtime_flags; 128 126 u8 rmpp_status; 129 - u32 seg_num; 130 - u32 paylen_newwin; 127 + __be32 seg_num; 128 + __be32 paylen_newwin; 131 129 }; 132 130 133 131 typedef u64 __bitwise ib_sa_comp_mask; ··· 141 139 * the wire so we can't change the layout) 142 140 */ 143 141 struct ib_sa_hdr { 144 - u64 sm_key; 145 - u16 attr_offset; 146 - u16 reserved; 142 + __be64 sm_key; 143 + __be16 attr_offset; 144 + __be16 reserved; 147 145 ib_sa_comp_mask comp_mask; 148 146 } __attribute__ ((packed)); 149 147
+1 -1
drivers/infiniband/include/ib_pack.h include/rdma/ib_pack.h
··· 35 35 #ifndef IB_PACK_H 36 36 #define IB_PACK_H 37 37 38 - #include <ib_verbs.h> 38 + #include <rdma/ib_verbs.h> 39 39 40 40 enum { 41 41 IB_LRH_BYTES = 8,
+11 -11
drivers/infiniband/include/ib_sa.h include/rdma/ib_sa.h
··· 38 38 39 39 #include <linux/compiler.h> 40 40 41 - #include <ib_verbs.h> 42 - #include <ib_mad.h> 41 + #include <rdma/ib_verbs.h> 42 + #include <rdma/ib_mad.h> 43 43 44 44 enum { 45 45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */ ··· 133 133 /* reserved */ 134 134 union ib_gid dgid; 135 135 union ib_gid sgid; 136 - u16 dlid; 137 - u16 slid; 136 + __be16 dlid; 137 + __be16 slid; 138 138 int raw_traffic; 139 139 /* reserved */ 140 - u32 flow_label; 140 + __be32 flow_label; 141 141 u8 hop_limit; 142 142 u8 traffic_class; 143 143 int reversible; 144 144 u8 numb_path; 145 - u16 pkey; 145 + __be16 pkey; 146 146 /* reserved */ 147 147 u8 sl; 148 148 u8 mtu_selector; ··· 176 176 struct ib_sa_mcmember_rec { 177 177 union ib_gid mgid; 178 178 union ib_gid port_gid; 179 - u32 qkey; 180 - u16 mlid; 179 + __be32 qkey; 180 + __be16 mlid; 181 181 u8 mtu_selector; 182 182 u8 mtu; 183 183 u8 traffic_class; 184 - u16 pkey; 184 + __be16 pkey; 185 185 u8 rate_selector; 186 186 u8 rate; 187 187 u8 packet_life_time_selector; 188 188 u8 packet_life_time; 189 189 u8 sl; 190 - u32 flow_label; 190 + __be32 flow_label; 191 191 u8 hop_limit; 192 192 u8 scope; 193 193 u8 join_state; ··· 238 238 struct ib_sa_service_rec { 239 239 u64 id; 240 240 union ib_gid gid; 241 - u16 pkey; 241 + __be16 pkey; 242 242 /* reserved */ 243 243 u32 lease; 244 244 u8 key[16];
+9 -11
drivers/infiniband/include/ib_smi.h include/rdma/ib_smi.h
··· 39 39 #if !defined( IB_SMI_H ) 40 40 #define IB_SMI_H 41 41 42 - #include <ib_mad.h> 43 - 44 - #define IB_LID_PERMISSIVE 0xFFFF 42 + #include <rdma/ib_mad.h> 45 43 46 44 #define IB_SMP_DATA_SIZE 64 47 45 #define IB_SMP_MAX_PATH_HOPS 64 ··· 49 51 u8 mgmt_class; 50 52 u8 class_version; 51 53 u8 method; 52 - u16 status; 54 + __be16 status; 53 55 u8 hop_ptr; 54 56 u8 hop_cnt; 55 - u64 tid; 56 - u16 attr_id; 57 - u16 resv; 58 - u32 attr_mod; 59 - u64 mkey; 60 - u16 dr_slid; 61 - u16 dr_dlid; 57 + __be64 tid; 58 + __be16 attr_id; 59 + __be16 resv; 60 + __be32 attr_mod; 61 + __be64 mkey; 62 + __be16 dr_slid; 63 + __be16 dr_dlid; 62 64 u8 reserved[28]; 63 65 u8 data[IB_SMP_DATA_SIZE]; 64 66 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
+14 -14
drivers/infiniband/include/ib_user_cm.h include/rdma/ib_user_cm.h
··· 88 88 }; 89 89 90 90 struct ib_ucm_attr_id_resp { 91 - __u64 service_id; 92 - __u64 service_mask; 93 - __u32 local_id; 94 - __u32 remote_id; 91 + __be64 service_id; 92 + __be64 service_mask; 93 + __be32 local_id; 94 + __be32 remote_id; 95 95 }; 96 96 97 97 struct ib_ucm_listen { 98 - __u64 service_id; 99 - __u64 service_mask; 98 + __be64 service_id; 99 + __be64 service_mask; 100 100 __u32 id; 101 101 }; 102 102 ··· 114 114 struct ib_ucm_path_rec { 115 115 __u8 dgid[16]; 116 116 __u8 sgid[16]; 117 - __u16 dlid; 118 - __u16 slid; 117 + __be16 dlid; 118 + __be16 slid; 119 119 __u32 raw_traffic; 120 - __u32 flow_label; 120 + __be32 flow_label; 121 121 __u32 reversible; 122 122 __u32 mtu; 123 - __u16 pkey; 123 + __be16 pkey; 124 124 __u8 hop_limit; 125 125 __u8 traffic_class; 126 126 __u8 numb_path; ··· 138 138 __u32 qpn; 139 139 __u32 qp_type; 140 140 __u32 psn; 141 - __u64 sid; 141 + __be64 sid; 142 142 __u64 data; 143 143 __u64 primary_path; 144 144 __u64 alternate_path; ··· 200 200 struct ib_ucm_sidr_req { 201 201 __u32 id; 202 202 __u32 timeout; 203 - __u64 sid; 203 + __be64 sid; 204 204 __u64 data; 205 205 __u64 path; 206 206 __u16 pkey; ··· 237 237 /* port */ 238 238 struct ib_ucm_path_rec primary_path; 239 239 struct ib_ucm_path_rec alternate_path; 240 - __u64 remote_ca_guid; 240 + __be64 remote_ca_guid; 241 241 __u32 remote_qkey; 242 242 __u32 remote_qpn; 243 243 __u32 qp_type; ··· 253 253 }; 254 254 255 255 struct ib_ucm_rep_event_resp { 256 - __u64 remote_ca_guid; 256 + __be64 remote_ca_guid; 257 257 __u32 remote_qkey; 258 258 __u32 remote_qpn; 259 259 __u32 starting_psn;
+4 -6
drivers/infiniband/include/ib_user_mad.h include/rdma/ib_user_mad.h
··· 70 70 * @traffic_class - Traffic class in GRH 71 71 * @gid - Remote GID in GRH 72 72 * @flow_label - Flow label in GRH 73 - * 74 - * All multi-byte quantities are stored in network (big endian) byte order. 75 73 */ 76 74 struct ib_user_mad_hdr { 77 75 __u32 id; ··· 77 79 __u32 timeout_ms; 78 80 __u32 retries; 79 81 __u32 length; 80 - __u32 qpn; 81 - __u32 qkey; 82 - __u16 lid; 82 + __be32 qpn; 83 + __be32 qkey; 84 + __be16 lid; 83 85 __u8 sl; 84 86 __u8 path_bits; 85 87 __u8 grh_present; ··· 87 89 __u8 hop_limit; 88 90 __u8 traffic_class; 89 91 __u8 gid[16]; 90 - __u32 flow_label; 92 + __be32 flow_label; 91 93 }; 92 94 93 95 /**
+36 -3
drivers/infiniband/include/ib_user_verbs.h include/rdma/ib_user_verbs.h
··· 78 78 IB_USER_VERBS_CMD_POST_SEND, 79 79 IB_USER_VERBS_CMD_POST_RECV, 80 80 IB_USER_VERBS_CMD_ATTACH_MCAST, 81 - IB_USER_VERBS_CMD_DETACH_MCAST 81 + IB_USER_VERBS_CMD_DETACH_MCAST, 82 + IB_USER_VERBS_CMD_CREATE_SRQ, 83 + IB_USER_VERBS_CMD_MODIFY_SRQ, 84 + IB_USER_VERBS_CMD_QUERY_SRQ, 85 + IB_USER_VERBS_CMD_DESTROY_SRQ, 86 + IB_USER_VERBS_CMD_POST_SRQ_RECV 82 87 }; 83 88 84 89 /* ··· 148 143 149 144 struct ib_uverbs_query_device_resp { 150 145 __u64 fw_ver; 151 - __u64 node_guid; 152 - __u64 sys_image_guid; 146 + __be64 node_guid; 147 + __be64 sys_image_guid; 153 148 __u64 max_mr_size; 154 149 __u64 page_size_cap; 155 150 __u32 vendor_id; ··· 389 384 __u16 mlid; 390 385 __u16 reserved; 391 386 __u64 driver_data[0]; 387 + }; 388 + 389 + struct ib_uverbs_create_srq { 390 + __u64 response; 391 + __u64 user_handle; 392 + __u32 pd_handle; 393 + __u32 max_wr; 394 + __u32 max_sge; 395 + __u32 srq_limit; 396 + __u64 driver_data[0]; 397 + }; 398 + 399 + struct ib_uverbs_create_srq_resp { 400 + __u32 srq_handle; 401 + }; 402 + 403 + struct ib_uverbs_modify_srq { 404 + __u32 srq_handle; 405 + __u32 attr_mask; 406 + __u32 max_wr; 407 + __u32 max_sge; 408 + __u32 srq_limit; 409 + __u32 reserved; 410 + __u64 driver_data[0]; 411 + }; 412 + 413 + struct ib_uverbs_destroy_srq { 414 + __u32 srq_handle; 392 415 }; 393 416 394 417 #endif /* IB_USER_VERBS_H */
+107 -11
drivers/infiniband/include/ib_verbs.h include/rdma/ib_verbs.h
··· 4 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 8 * Copyright (c) 2005 Cisco Systems. All rights reserved. 8 9 * 9 10 * This software is available to you under a choice of one of two ··· 51 50 union ib_gid { 52 51 u8 raw[16]; 53 52 struct { 54 - u64 subnet_prefix; 55 - u64 interface_id; 53 + __be64 subnet_prefix; 54 + __be64 interface_id; 56 55 } global; 57 56 }; 58 57 ··· 88 87 89 88 struct ib_device_attr { 90 89 u64 fw_ver; 91 - u64 node_guid; 92 - u64 sys_image_guid; 90 + __be64 node_guid; 91 + __be64 sys_image_guid; 93 92 u64 max_mr_size; 94 93 u64 page_size_cap; 95 94 u32 vendor_id; ··· 256 255 IB_EVENT_PORT_ERR, 257 256 IB_EVENT_LID_CHANGE, 258 257 IB_EVENT_PKEY_CHANGE, 259 - IB_EVENT_SM_CHANGE 258 + IB_EVENT_SM_CHANGE, 259 + IB_EVENT_SRQ_ERR, 260 + IB_EVENT_SRQ_LIMIT_REACHED, 261 + IB_EVENT_QP_LAST_WQE_REACHED 260 262 }; 261 263 262 264 struct ib_event { ··· 267 263 union { 268 264 struct ib_cq *cq; 269 265 struct ib_qp *qp; 266 + struct ib_srq *srq; 270 267 u8 port_num; 271 268 } element; 272 269 enum ib_event_type event; ··· 295 290 }; 296 291 297 292 struct ib_grh { 298 - u32 version_tclass_flow; 299 - u16 paylen; 293 + __be32 version_tclass_flow; 294 + __be16 paylen; 300 295 u8 next_hdr; 301 296 u8 hop_limit; 302 297 union ib_gid sgid; ··· 306 301 enum { 307 302 IB_MULTICAST_QPN = 0xffffff 308 303 }; 304 + 305 + #define IB_LID_PERMISSIVE __constant_htons(0xFFFF) 309 306 310 307 enum ib_ah_flags { 311 308 IB_AH_GRH = 1 ··· 388 381 enum ib_cq_notify { 389 382 IB_CQ_SOLICITED, 390 383 IB_CQ_NEXT_COMP 384 + }; 385 + 386 + enum ib_srq_attr_mask { 387 + IB_SRQ_MAX_WR = 1 << 0, 388 + IB_SRQ_LIMIT = 1 << 1, 389 + }; 390 + 391 + struct ib_srq_attr { 392 + u32 max_wr; 393 + u32 max_sge; 394 + u32 srq_limit; 395 + }; 396 + 397 + struct ib_srq_init_attr { 398 + void (*event_handler)(struct ib_event *, void *); 399 + void *srq_context; 400 + struct ib_srq_attr attr; 391 401 }; 392 402 393 403 struct ib_qp_cap { ··· 734 710 }; 735 711 736 712 struct ib_srq { 737 - struct ib_device *device; 738 - struct ib_uobject *uobject; 739 - struct ib_pd *pd; 740 - void *srq_context; 713 + struct ib_device *device; 714 + struct ib_pd *pd; 715 + struct ib_uobject *uobject; 716 + void (*event_handler)(struct ib_event *, void *); 717 + void *srq_context; 741 718 atomic_t usecnt; 742 719 }; 743 720 ··· 852 827 int (*query_ah)(struct ib_ah *ah, 853 828 struct ib_ah_attr *ah_attr); 854 829 int (*destroy_ah)(struct ib_ah *ah); 830 + struct ib_srq * (*create_srq)(struct ib_pd *pd, 831 + struct ib_srq_init_attr *srq_init_attr, 832 + struct ib_udata *udata); 833 + int (*modify_srq)(struct ib_srq *srq, 834 + struct ib_srq_attr *srq_attr, 835 + enum ib_srq_attr_mask srq_attr_mask); 836 + int (*query_srq)(struct ib_srq *srq, 837 + struct ib_srq_attr *srq_attr); 838 + int (*destroy_srq)(struct ib_srq *srq); 839 + int (*post_srq_recv)(struct ib_srq *srq, 840 + struct ib_recv_wr *recv_wr, 841 + struct ib_recv_wr **bad_recv_wr); 855 842 struct ib_qp * (*create_qp)(struct ib_pd *pd, 856 843 struct ib_qp_init_attr *qp_init_attr, 857 844 struct ib_udata *udata); ··· 1074 1037 * @ah: The address handle to destroy. 1075 1038 */ 1076 1039 int ib_destroy_ah(struct ib_ah *ah); 1040 + 1041 + /** 1042 + * ib_create_srq - Creates a SRQ associated with the specified protection 1043 + * domain. 1044 + * @pd: The protection domain associated with the SRQ. 1045 + * @srq_init_attr: A list of initial attributes required to create the SRQ. 1046 + * 1047 + * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1048 + * requested size of the SRQ, and set to the actual values allocated 1049 + * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1050 + * will always be at least as large as the requested values. 1051 + */ 1052 + struct ib_srq *ib_create_srq(struct ib_pd *pd, 1053 + struct ib_srq_init_attr *srq_init_attr); 1054 + 1055 + /** 1056 + * ib_modify_srq - Modifies the attributes for the specified SRQ. 1057 + * @srq: The SRQ to modify. 1058 + * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1059 + * the current values of selected SRQ attributes are returned. 1060 + * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1061 + * are being modified. 1062 + * 1063 + * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1064 + * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1065 + * the number of receives queued drops below the limit. 1066 + */ 1067 + int ib_modify_srq(struct ib_srq *srq, 1068 + struct ib_srq_attr *srq_attr, 1069 + enum ib_srq_attr_mask srq_attr_mask); 1070 + 1071 + /** 1072 + * ib_query_srq - Returns the attribute list and current values for the 1073 + * specified SRQ. 1074 + * @srq: The SRQ to query. 1075 + * @srq_attr: The attributes of the specified SRQ. 1076 + */ 1077 + int ib_query_srq(struct ib_srq *srq, 1078 + struct ib_srq_attr *srq_attr); 1079 + 1080 + /** 1081 + * ib_destroy_srq - Destroys the specified SRQ. 1082 + * @srq: The SRQ to destroy. 1083 + */ 1084 + int ib_destroy_srq(struct ib_srq *srq); 1085 + 1086 + /** 1087 + * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1088 + * @srq: The SRQ to post the work request on. 1089 + * @recv_wr: A list of work requests to post on the receive queue. 1090 + * @bad_recv_wr: On an immediate failure, this parameter will reference 1091 + * the work request that failed to be posted on the QP. 1092 + */ 1093 + static inline int ib_post_srq_recv(struct ib_srq *srq, 1094 + struct ib_recv_wr *recv_wr, 1095 + struct ib_recv_wr **bad_recv_wr) 1096 + { 1097 + return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1098 + } 1077 1099 1078 1100 /** 1079 1101 * ib_create_qp - Creates a QP associated with the specified protection
-2
drivers/infiniband/ulp/ipoib/Makefile
··· 1 - EXTRA_CFLAGS += -Idrivers/infiniband/include 2 - 3 1 obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o 4 2 5 3 ib_ipoib-y := ipoib_main.o \
+7 -5
drivers/infiniband/ulp/ipoib/ipoib.h
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 51 49 #include <asm/atomic.h> 52 50 #include <asm/semaphore.h> 53 51 54 - #include <ib_verbs.h> 55 - #include <ib_pack.h> 56 - #include <ib_sa.h> 52 + #include <rdma/ib_verbs.h> 53 + #include <rdma/ib_pack.h> 54 + #include <rdma/ib_sa.h> 57 55 58 56 /* constants */ 59 57 ··· 90 88 /* structs */ 91 89 92 90 struct ipoib_header { 93 - u16 proto; 94 - u16 reserved; 91 + __be16 proto; 92 + u16 reserved; 95 93 }; 96 94 97 95 struct ipoib_pseudoheader {
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_fs.c
··· 97 97 98 98 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) { 99 99 n += sprintf(gid_buf + n, "%x", 100 - be16_to_cpu(((u16 *)mgid.raw)[i])); 100 + be16_to_cpu(((__be16 *) mgid.raw)[i])); 101 101 if (i < sizeof mgid / 2 - 1) 102 102 gid_buf[n++] = ':'; 103 103 }
+4 -1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 3 6 * 4 7 * This software is available to you under a choice of one of two 5 8 * licenses. You may choose to be licensed under the terms of the GNU ··· 38 35 #include <linux/delay.h> 39 36 #include <linux/dma-mapping.h> 40 37 41 - #include <ib_cache.h> 38 + #include <rdma/ib_cache.h> 42 39 43 40 #include "ipoib.h" 44 41
+21 -12
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1 1 /* 2 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 36 34 37 35 #include "ipoib.h" 38 36 39 - #include <linux/version.h> 40 37 #include <linux/module.h> 41 38 42 39 #include <linux/init.h> ··· 608 607 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 609 608 IPOIB_GID_FMT "\n", 610 609 skb->dst ? "neigh" : "dst", 611 - be16_to_cpup((u16 *) skb->data), 612 - be32_to_cpup((u32 *) phdr->hwaddr), 610 + be16_to_cpup((__be16 *) skb->data), 611 + be32_to_cpup((__be32 *) phdr->hwaddr), 613 612 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 614 613 dev_kfree_skb_any(skb); 615 614 ++priv->stats.tx_dropped; ··· 672 671 { 673 672 struct ipoib_dev_priv *priv = netdev_priv(dev); 674 673 675 - schedule_work(&priv->restart_task); 674 + queue_work(ipoib_workqueue, &priv->restart_task); 676 675 } 677 676 678 677 static void ipoib_neigh_destructor(struct neighbour *n) ··· 781 780 782 781 ipoib_ib_dev_cleanup(dev); 783 782 784 - if (priv->rx_ring) { 785 - kfree(priv->rx_ring); 786 - priv->rx_ring = NULL; 787 - } 783 + kfree(priv->rx_ring); 784 + kfree(priv->tx_ring); 788 785 789 - if (priv->tx_ring) { 790 - kfree(priv->tx_ring); 791 - priv->tx_ring = NULL; 792 - } 786 + priv->rx_ring = NULL; 787 + priv->tx_ring = NULL; 793 788 } 794 789 795 790 static void ipoib_setup(struct net_device *dev) ··· 883 886 if (pkey < 0 || pkey > 0xffff) 884 887 return -EINVAL; 885 888 889 + /* 890 + * Set the full membership bit, so that we join the right 891 + * broadcast group, etc. 892 + */ 893 + pkey |= 0x8000; 894 + 886 895 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 887 896 pkey); 888 897 ··· 940 937 hca->name, port, result); 941 938 goto alloc_mem_failed; 942 939 } 940 + 941 + /* 942 + * Set the full membership bit, so that we join the right 943 + * broadcast group, etc. 944 + */ 945 + priv->pkey |= 0x8000; 943 946 944 947 priv->dev->broadcast[8] = priv->pkey >> 8; 945 948 priv->dev->broadcast[9] = priv->pkey & 0xff;
+5 -3
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 3 5 * 4 6 * This software is available to you under a choice of one of two 5 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 359 357 360 358 rec.mgid = mcast->mcmember.mgid; 361 359 rec.port_gid = priv->local_gid; 362 - rec.pkey = be16_to_cpu(priv->pkey); 360 + rec.pkey = cpu_to_be16(priv->pkey); 363 361 364 362 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 365 363 IB_SA_MCMEMBER_REC_MGID | ··· 459 457 460 458 rec.mgid = mcast->mcmember.mgid; 461 459 rec.port_gid = priv->local_gid; 462 - rec.pkey = be16_to_cpu(priv->pkey); 460 + rec.pkey = cpu_to_be16(priv->pkey); 463 461 464 462 comp_mask = 465 463 IB_SA_MCMEMBER_REC_MGID | ··· 648 646 649 647 rec.mgid = mcast->mcmember.mgid; 650 648 rec.port_gid = priv->local_gid; 651 - rec.pkey = be16_to_cpu(priv->pkey); 649 + rec.pkey = cpu_to_be16(priv->pkey); 652 650 653 651 /* Remove ourselves from the multicast group */ 654 652 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
+2 -1
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 33 32 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $ 34 33 */ 35 34 36 - #include <ib_cache.h> 35 + #include <rdma/ib_cache.h> 37 36 38 37 #include "ipoib.h" 39 38
-1
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 32 32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $ 33 33 */ 34 34 35 - #include <linux/version.h> 36 35 #include <linux/module.h> 37 36 38 37 #include <linux/init.h>