Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 309a29b5965a0b2f36b3e245213eb43300a89ac2 295 lines 8.1 kB view raw
1/* 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#ifndef __IB_MAD_PRIV_H__ 37#define __IB_MAD_PRIV_H__ 38 39#include <linux/completion.h> 40#include <linux/err.h> 41#include <linux/workqueue.h> 42#include <rdma/ib_mad.h> 43#include <rdma/ib_smi.h> 44#include <rdma/opa_smi.h> 45 46#define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ 47 48/* QP and CQ parameters */ 49#define IB_MAD_QP_SEND_SIZE 128 50#define IB_MAD_QP_RECV_SIZE 512 51#define IB_MAD_QP_MIN_SIZE 64 52#define IB_MAD_QP_MAX_SIZE 8192 53#define IB_MAD_SEND_REQ_MAX_SG 2 54#define IB_MAD_RECV_REQ_MAX_SG 1 55 56#define IB_MAD_SEND_Q_PSN 0 57 58/* Registration table sizes */ 59#define MAX_MGMT_CLASS 80 60#define MAX_MGMT_VERSION 0x83 61#define MAX_MGMT_OUI 8 62#define MAX_MGMT_VENDOR_RANGE2 (IB_MGMT_CLASS_VENDOR_RANGE2_END - \ 63 IB_MGMT_CLASS_VENDOR_RANGE2_START + 1) 64 65struct ib_mad_list_head { 66 struct list_head list; 67 struct ib_cqe cqe; 68 struct ib_mad_queue *mad_queue; 69}; 70 71struct ib_mad_private_header { 72 struct ib_mad_list_head mad_list; 73 struct ib_mad_recv_wc recv_wc; 74 struct ib_wc wc; 75 u64 mapping; 76} __packed; 77 78struct ib_mad_private { 79 struct ib_mad_private_header header; 80 size_t mad_size; 81 struct ib_grh grh; 82 u8 mad[]; 83} __packed; 84 85struct ib_rmpp_segment { 86 struct list_head list; 87 u32 num; 88 u8 data[]; 89}; 90 91struct ib_mad_agent_private { 92 struct ib_mad_agent agent; 93 struct ib_mad_reg_req *reg_req; 94 struct ib_mad_qp_info *qp_info; 95 96 spinlock_t lock; 97 struct list_head send_list; 98 unsigned int sol_fc_send_count; 99 struct list_head wait_list; 100 unsigned int sol_fc_wait_count; 101 struct delayed_work timed_work; 102 unsigned long timeout; 103 struct list_head local_list; 104 struct work_struct local_work; 105 struct list_head rmpp_list; 106 unsigned int sol_fc_max; 107 struct list_head backlog_list; 108 109 refcount_t refcount; 110 union { 111 struct completion comp; 112 struct rcu_head rcu; 113 }; 114}; 115 116struct ib_mad_snoop_private { 117 struct ib_mad_agent agent; 118 struct ib_mad_qp_info *qp_info; 119 int snoop_index; 120 int mad_snoop_flags; 121 struct completion comp; 122}; 123 124enum ib_mad_state { 125 /* MAD is in the making and is not yet in any list */ 126 IB_MAD_STATE_INIT, 127 /* MAD is in backlog list */ 128 IB_MAD_STATE_QUEUED, 129 /* 130 * MAD was sent to the QP and is waiting for completion 131 * notification in send list. 132 */ 133 IB_MAD_STATE_SEND_START, 134 /* 135 * MAD send completed successfully, waiting for a response 136 * in wait list. 137 */ 138 IB_MAD_STATE_WAIT_RESP, 139 /* 140 * Response came early, before send completion notification, 141 * in send list. 142 */ 143 IB_MAD_STATE_EARLY_RESP, 144 /* MAD was canceled while in wait or send list */ 145 IB_MAD_STATE_CANCELED, 146 /* MAD processing completed, MAD in no list */ 147 IB_MAD_STATE_DONE 148}; 149 150struct ib_mad_send_wr_private { 151 struct ib_mad_list_head mad_list; 152 struct list_head agent_list; 153 struct ib_mad_agent_private *mad_agent_priv; 154 struct ib_mad_send_buf send_buf; 155 u64 header_mapping; 156 u64 payload_mapping; 157 struct ib_ud_wr send_wr; 158 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 159 __be64 tid; 160 unsigned long timeout; 161 int max_retries; 162 int retries_left; 163 int retry; 164 165 /* RMPP control */ 166 struct list_head rmpp_list; 167 struct ib_rmpp_segment *last_ack_seg; 168 struct ib_rmpp_segment *cur_seg; 169 int last_ack; 170 int seg_num; 171 int newwin; 172 int pad; 173 174 enum ib_mad_state state; 175 176 /* Solicited MAD flow control */ 177 bool is_solicited_fc; 178}; 179 180static inline void expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr, 181 enum ib_mad_state expected_state) 182{ 183 if (IS_ENABLED(CONFIG_LOCKDEP)) 184 WARN_ON(mad_send_wr->state != expected_state); 185} 186 187static inline void expect_mad_state2(struct ib_mad_send_wr_private *mad_send_wr, 188 enum ib_mad_state expected_state1, 189 enum ib_mad_state expected_state2) 190{ 191 if (IS_ENABLED(CONFIG_LOCKDEP)) 192 WARN_ON(mad_send_wr->state != expected_state1 && 193 mad_send_wr->state != expected_state2); 194} 195 196static inline void expect_mad_state3(struct ib_mad_send_wr_private *mad_send_wr, 197 enum ib_mad_state expected_state1, 198 enum ib_mad_state expected_state2, 199 enum ib_mad_state expected_state3) 200{ 201 if (IS_ENABLED(CONFIG_LOCKDEP)) 202 WARN_ON(mad_send_wr->state != expected_state1 && 203 mad_send_wr->state != expected_state2 && 204 mad_send_wr->state != expected_state3); 205} 206 207static inline void 208not_expect_mad_state(struct ib_mad_send_wr_private *mad_send_wr, 209 enum ib_mad_state wrong_state) 210{ 211 if (IS_ENABLED(CONFIG_LOCKDEP)) 212 WARN_ON(mad_send_wr->state == wrong_state); 213} 214 215struct ib_mad_local_private { 216 struct list_head completion_list; 217 struct ib_mad_private *mad_priv; 218 struct ib_mad_agent_private *recv_mad_agent; 219 struct ib_mad_send_wr_private *mad_send_wr; 220 size_t return_wc_byte_len; 221}; 222 223struct ib_mad_mgmt_method_table { 224 struct ib_mad_agent_private *agent[IB_MGMT_MAX_METHODS]; 225}; 226 227struct ib_mad_mgmt_class_table { 228 struct ib_mad_mgmt_method_table *method_table[MAX_MGMT_CLASS]; 229}; 230 231struct ib_mad_mgmt_vendor_class { 232 u8 oui[MAX_MGMT_OUI][3]; 233 struct ib_mad_mgmt_method_table *method_table[MAX_MGMT_OUI]; 234}; 235 236struct ib_mad_mgmt_vendor_class_table { 237 struct ib_mad_mgmt_vendor_class *vendor_class[MAX_MGMT_VENDOR_RANGE2]; 238}; 239 240struct ib_mad_mgmt_version_table { 241 struct ib_mad_mgmt_class_table *class; 242 struct ib_mad_mgmt_vendor_class_table *vendor; 243}; 244 245struct ib_mad_queue { 246 spinlock_t lock; 247 struct list_head list; 248 int count; 249 int max_active; 250 struct ib_mad_qp_info *qp_info; 251}; 252 253struct ib_mad_qp_info { 254 struct ib_mad_port_private *port_priv; 255 struct ib_qp *qp; 256 struct ib_mad_queue send_queue; 257 struct ib_mad_queue recv_queue; 258 struct list_head overflow_list; 259 spinlock_t snoop_lock; 260 struct ib_mad_snoop_private **snoop_table; 261 int snoop_table_size; 262 atomic_t snoop_count; 263}; 264 265struct ib_mad_port_private { 266 struct list_head port_list; 267 struct ib_device *device; 268 int port_num; 269 struct ib_cq *cq; 270 struct ib_pd *pd; 271 272 spinlock_t reg_lock; 273 struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION]; 274 struct workqueue_struct *wq; 275 struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE]; 276}; 277 278int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 279 280struct ib_mad_send_wr_private * 281ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 282 const struct ib_mad_recv_wc *mad_recv_wc); 283 284void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 285 struct ib_mad_send_wc *mad_send_wc); 286 287void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr); 288 289void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 290 unsigned long timeout_ms); 291 292void change_mad_state(struct ib_mad_send_wr_private *mad_send_wr, 293 enum ib_mad_state new_state); 294 295#endif /* __IB_MAD_PRIV_H__ */