Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.4-rc6 425 lines 13 kB view raw
1#ifndef _RDS_IB_H 2#define _RDS_IB_H 3 4#include <rdma/ib_verbs.h> 5#include <rdma/rdma_cm.h> 6#include <linux/interrupt.h> 7#include <linux/pci.h> 8#include <linux/slab.h> 9#include "rds.h" 10#include "rdma_transport.h" 11 12#define RDS_FMR_1M_POOL_SIZE (8192 / 2) 13#define RDS_FMR_1M_MSG_SIZE 256 14#define RDS_FMR_8K_MSG_SIZE 2 15#define RDS_MR_8K_SCALE (256 / (RDS_FMR_8K_MSG_SIZE + 1)) 16#define RDS_FMR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2)) 17 18#define RDS_IB_MAX_SGE 8 19#define RDS_IB_RECV_SGE 2 20 21#define RDS_IB_DEFAULT_RECV_WR 1024 22#define RDS_IB_DEFAULT_SEND_WR 256 23 24#define RDS_IB_DEFAULT_RETRY_COUNT 2 25 26#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ 27 28#define RDS_IB_RECYCLE_BATCH_COUNT 32 29 30#define RDS_IB_WC_MAX 32 31#define RDS_IB_SEND_OP BIT_ULL(63) 32 33extern struct rw_semaphore rds_ib_devices_lock; 34extern struct list_head rds_ib_devices; 35 36/* 37 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to 38 * try and minimize the amount of memory tied up both the device and 39 * socket receive queues. 40 */ 41struct rds_page_frag { 42 struct list_head f_item; 43 struct list_head f_cache_entry; 44 struct scatterlist f_sg; 45}; 46 47struct rds_ib_incoming { 48 struct list_head ii_frags; 49 struct list_head ii_cache_entry; 50 struct rds_incoming ii_inc; 51}; 52 53struct rds_ib_cache_head { 54 struct list_head *first; 55 unsigned long count; 56}; 57 58struct rds_ib_refill_cache { 59 struct rds_ib_cache_head __percpu *percpu; 60 struct list_head *xfer; 61 struct list_head *ready; 62}; 63 64struct rds_ib_connect_private { 65 /* Add new fields at the end, and don't permute existing fields. */ 66 __be32 dp_saddr; 67 __be32 dp_daddr; 68 u8 dp_protocol_major; 69 u8 dp_protocol_minor; 70 __be16 dp_protocol_minor_mask; /* bitmask */ 71 __be32 dp_reserved1; 72 __be64 dp_ack_seq; 73 __be32 dp_credit; /* non-zero enables flow ctl */ 74}; 75 76struct rds_ib_send_work { 77 void *s_op; 78 union { 79 struct ib_send_wr s_wr; 80 struct ib_rdma_wr s_rdma_wr; 81 struct ib_atomic_wr s_atomic_wr; 82 }; 83 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 84 unsigned long s_queued; 85}; 86 87struct rds_ib_recv_work { 88 struct rds_ib_incoming *r_ibinc; 89 struct rds_page_frag *r_frag; 90 struct ib_recv_wr r_wr; 91 struct ib_sge r_sge[2]; 92}; 93 94struct rds_ib_work_ring { 95 u32 w_nr; 96 u32 w_alloc_ptr; 97 u32 w_alloc_ctr; 98 u32 w_free_ptr; 99 atomic_t w_free_ctr; 100}; 101 102/* Rings are posted with all the allocations they'll need to queue the 103 * incoming message to the receiving socket so this can't fail. 104 * All fragments start with a header, so we can make sure we're not receiving 105 * garbage, and we can tell a small 8 byte fragment from an ACK frame. 106 */ 107struct rds_ib_ack_state { 108 u64 ack_next; 109 u64 ack_recv; 110 unsigned int ack_required:1; 111 unsigned int ack_next_valid:1; 112 unsigned int ack_recv_valid:1; 113}; 114 115 116struct rds_ib_device; 117 118struct rds_ib_connection { 119 120 struct list_head ib_node; 121 struct rds_ib_device *rds_ibdev; 122 struct rds_connection *conn; 123 124 /* alphabet soup, IBTA style */ 125 struct rdma_cm_id *i_cm_id; 126 struct ib_pd *i_pd; 127 struct ib_cq *i_send_cq; 128 struct ib_cq *i_recv_cq; 129 struct ib_wc i_send_wc[RDS_IB_WC_MAX]; 130 struct ib_wc i_recv_wc[RDS_IB_WC_MAX]; 131 132 /* interrupt handling */ 133 struct tasklet_struct i_send_tasklet; 134 struct tasklet_struct i_recv_tasklet; 135 136 /* tx */ 137 struct rds_ib_work_ring i_send_ring; 138 struct rm_data_op *i_data_op; 139 struct rds_header *i_send_hdrs; 140 u64 i_send_hdrs_dma; 141 struct rds_ib_send_work *i_sends; 142 atomic_t i_signaled_sends; 143 144 /* rx */ 145 struct mutex i_recv_mutex; 146 struct rds_ib_work_ring i_recv_ring; 147 struct rds_ib_incoming *i_ibinc; 148 u32 i_recv_data_rem; 149 struct rds_header *i_recv_hdrs; 150 u64 i_recv_hdrs_dma; 151 struct rds_ib_recv_work *i_recvs; 152 u64 i_ack_recv; /* last ACK received */ 153 struct rds_ib_refill_cache i_cache_incs; 154 struct rds_ib_refill_cache i_cache_frags; 155 156 /* sending acks */ 157 unsigned long i_ack_flags; 158#ifdef KERNEL_HAS_ATOMIC64 159 atomic64_t i_ack_next; /* next ACK to send */ 160#else 161 spinlock_t i_ack_lock; /* protect i_ack_next */ 162 u64 i_ack_next; /* next ACK to send */ 163#endif 164 struct rds_header *i_ack; 165 struct ib_send_wr i_ack_wr; 166 struct ib_sge i_ack_sge; 167 u64 i_ack_dma; 168 unsigned long i_ack_queued; 169 170 /* Flow control related information 171 * 172 * Our algorithm uses a pair variables that we need to access 173 * atomically - one for the send credits, and one posted 174 * recv credits we need to transfer to remote. 175 * Rather than protect them using a slow spinlock, we put both into 176 * a single atomic_t and update it using cmpxchg 177 */ 178 atomic_t i_credits; 179 180 /* Protocol version specific information */ 181 unsigned int i_flowctl:1; /* enable/disable flow ctl */ 182 183 /* Batched completions */ 184 unsigned int i_unsignaled_wrs; 185}; 186 187/* This assumes that atomic_t is at least 32 bits */ 188#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) 189#define IB_GET_POST_CREDITS(v) ((v) >> 16) 190#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) 191#define IB_SET_POST_CREDITS(v) ((v) << 16) 192 193struct rds_ib_ipaddr { 194 struct list_head list; 195 __be32 ipaddr; 196 struct rcu_head rcu; 197}; 198 199enum { 200 RDS_IB_MR_8K_POOL, 201 RDS_IB_MR_1M_POOL, 202}; 203 204struct rds_ib_device { 205 struct list_head list; 206 struct list_head ipaddr_list; 207 struct list_head conn_list; 208 struct ib_device *dev; 209 struct ib_pd *pd; 210 unsigned int max_fmrs; 211 struct rds_ib_mr_pool *mr_1m_pool; 212 struct rds_ib_mr_pool *mr_8k_pool; 213 unsigned int fmr_max_remaps; 214 unsigned int max_8k_fmrs; 215 unsigned int max_1m_fmrs; 216 int max_sge; 217 unsigned int max_wrs; 218 unsigned int max_initiator_depth; 219 unsigned int max_responder_resources; 220 spinlock_t spinlock; /* protect the above */ 221 atomic_t refcount; 222 struct work_struct free_work; 223}; 224 225#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) 226#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) 227 228/* bits for i_ack_flags */ 229#define IB_ACK_IN_FLIGHT 0 230#define IB_ACK_REQUESTED 1 231 232/* Magic WR_ID for ACKs */ 233#define RDS_IB_ACK_WR_ID (~(u64) 0) 234 235struct rds_ib_statistics { 236 uint64_t s_ib_connect_raced; 237 uint64_t s_ib_listen_closed_stale; 238 uint64_t s_ib_evt_handler_call; 239 uint64_t s_ib_tasklet_call; 240 uint64_t s_ib_tx_cq_event; 241 uint64_t s_ib_tx_ring_full; 242 uint64_t s_ib_tx_throttle; 243 uint64_t s_ib_tx_sg_mapping_failure; 244 uint64_t s_ib_tx_stalled; 245 uint64_t s_ib_tx_credit_updates; 246 uint64_t s_ib_rx_cq_event; 247 uint64_t s_ib_rx_ring_empty; 248 uint64_t s_ib_rx_refill_from_cq; 249 uint64_t s_ib_rx_refill_from_thread; 250 uint64_t s_ib_rx_alloc_limit; 251 uint64_t s_ib_rx_credit_updates; 252 uint64_t s_ib_ack_sent; 253 uint64_t s_ib_ack_send_failure; 254 uint64_t s_ib_ack_send_delayed; 255 uint64_t s_ib_ack_send_piggybacked; 256 uint64_t s_ib_ack_received; 257 uint64_t s_ib_rdma_mr_8k_alloc; 258 uint64_t s_ib_rdma_mr_8k_free; 259 uint64_t s_ib_rdma_mr_8k_used; 260 uint64_t s_ib_rdma_mr_8k_pool_flush; 261 uint64_t s_ib_rdma_mr_8k_pool_wait; 262 uint64_t s_ib_rdma_mr_8k_pool_depleted; 263 uint64_t s_ib_rdma_mr_1m_alloc; 264 uint64_t s_ib_rdma_mr_1m_free; 265 uint64_t s_ib_rdma_mr_1m_used; 266 uint64_t s_ib_rdma_mr_1m_pool_flush; 267 uint64_t s_ib_rdma_mr_1m_pool_wait; 268 uint64_t s_ib_rdma_mr_1m_pool_depleted; 269 uint64_t s_ib_atomic_cswp; 270 uint64_t s_ib_atomic_fadd; 271}; 272 273extern struct workqueue_struct *rds_ib_wq; 274 275/* 276 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h 277 * doesn't define it. 278 */ 279static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, 280 struct scatterlist *sglist, 281 unsigned int sg_dma_len, 282 int direction) 283{ 284 struct scatterlist *sg; 285 unsigned int i; 286 287 for_each_sg(sglist, sg, sg_dma_len, i) { 288 ib_dma_sync_single_for_cpu(dev, 289 ib_sg_dma_address(dev, sg), 290 ib_sg_dma_len(dev, sg), 291 direction); 292 } 293} 294#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu 295 296static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev, 297 struct scatterlist *sglist, 298 unsigned int sg_dma_len, 299 int direction) 300{ 301 struct scatterlist *sg; 302 unsigned int i; 303 304 for_each_sg(sglist, sg, sg_dma_len, i) { 305 ib_dma_sync_single_for_device(dev, 306 ib_sg_dma_address(dev, sg), 307 ib_sg_dma_len(dev, sg), 308 direction); 309 } 310} 311#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device 312 313 314/* ib.c */ 315extern struct rds_transport rds_ib_transport; 316struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device); 317void rds_ib_dev_put(struct rds_ib_device *rds_ibdev); 318extern struct ib_client rds_ib_client; 319 320extern unsigned int rds_ib_fmr_1m_pool_size; 321extern unsigned int rds_ib_fmr_8k_pool_size; 322extern unsigned int rds_ib_retry_count; 323 324extern spinlock_t ib_nodev_conns_lock; 325extern struct list_head ib_nodev_conns; 326 327/* ib_cm.c */ 328int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); 329void rds_ib_conn_free(void *arg); 330int rds_ib_conn_connect(struct rds_connection *conn); 331void rds_ib_conn_shutdown(struct rds_connection *conn); 332void rds_ib_state_change(struct sock *sk); 333int rds_ib_listen_init(void); 334void rds_ib_listen_stop(void); 335void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); 336int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, 337 struct rdma_cm_event *event); 338int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id); 339void rds_ib_cm_connect_complete(struct rds_connection *conn, 340 struct rdma_cm_event *event); 341 342 343#define rds_ib_conn_error(conn, fmt...) \ 344 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) 345 346/* ib_rdma.c */ 347int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); 348void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 349void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 350void rds_ib_destroy_nodev_conns(void); 351struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev, 352 int npages); 353void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); 354void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 355void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 356 struct rds_sock *rs, u32 *key_ret); 357void rds_ib_sync_mr(void *trans_private, int dir); 358void rds_ib_free_mr(void *trans_private, int invalidate); 359void rds_ib_flush_mrs(void); 360int rds_ib_fmr_init(void); 361void rds_ib_fmr_exit(void); 362 363/* ib_recv.c */ 364int rds_ib_recv_init(void); 365void rds_ib_recv_exit(void); 366int rds_ib_recv(struct rds_connection *conn); 367int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); 368void rds_ib_recv_free_caches(struct rds_ib_connection *ic); 369void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); 370void rds_ib_inc_free(struct rds_incoming *inc); 371int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 372void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, 373 struct rds_ib_ack_state *state); 374void rds_ib_recv_tasklet_fn(unsigned long data); 375void rds_ib_recv_init_ring(struct rds_ib_connection *ic); 376void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); 377void rds_ib_recv_init_ack(struct rds_ib_connection *ic); 378void rds_ib_attempt_ack(struct rds_ib_connection *ic); 379void rds_ib_ack_send_complete(struct rds_ib_connection *ic); 380u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); 381void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required); 382 383/* ib_ring.c */ 384void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); 385void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); 386u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); 387void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); 388void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); 389int rds_ib_ring_empty(struct rds_ib_work_ring *ring); 390int rds_ib_ring_low(struct rds_ib_work_ring *ring); 391u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); 392u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); 393extern wait_queue_head_t rds_ib_ring_empty_wait; 394 395/* ib_send.c */ 396void rds_ib_xmit_complete(struct rds_connection *conn); 397int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 398 unsigned int hdr_off, unsigned int sg, unsigned int off); 399void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); 400void rds_ib_send_init_ring(struct rds_ib_connection *ic); 401void rds_ib_send_clear_ring(struct rds_ib_connection *ic); 402int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); 403void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 404void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 405int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 406 u32 *adv_credits, int need_posted, int max_posted); 407int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); 408 409/* ib_stats.c */ 410DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 411#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) 412unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, 413 unsigned int avail); 414 415/* ib_sysctl.c */ 416int rds_ib_sysctl_init(void); 417void rds_ib_sysctl_exit(void); 418extern unsigned long rds_ib_sysctl_max_send_wr; 419extern unsigned long rds_ib_sysctl_max_recv_wr; 420extern unsigned long rds_ib_sysctl_max_unsig_wrs; 421extern unsigned long rds_ib_sysctl_max_unsig_bytes; 422extern unsigned long rds_ib_sysctl_max_recv_allocation; 423extern unsigned int rds_ib_sysctl_flow_control; 424 425#endif