Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */
35
36#include <linux/dma-mapping.h>
37
38#include "mad_priv.h"
39#include "mad_rmpp.h"
40
41enum rmpp_state {
42 RMPP_STATE_ACTIVE,
43 RMPP_STATE_TIMEOUT,
44 RMPP_STATE_COMPLETE
45};
46
47struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct work_struct timeout_work;
51 struct work_struct cleanup_work;
52 wait_queue_head_t wait;
53 enum rmpp_state state;
54 spinlock_t lock;
55 atomic_t refcount;
56
57 struct ib_ah *ah;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
60 int last_ack;
61 int seg_num;
62 int newwin;
63
64 __be64 tid;
65 u32 src_qp;
66 u16 slid;
67 u8 mgmt_class;
68 u8 class_version;
69 u8 method;
70};
71
72static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{
74 atomic_dec(&rmpp_recv->refcount);
75 wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76 ib_destroy_ah(rmpp_recv->ah);
77 kfree(rmpp_recv);
78}
79
80void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
81{
82 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
83 unsigned long flags;
84
85 spin_lock_irqsave(&agent->lock, flags);
86 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87 cancel_delayed_work(&rmpp_recv->timeout_work);
88 cancel_delayed_work(&rmpp_recv->cleanup_work);
89 }
90 spin_unlock_irqrestore(&agent->lock, flags);
91
92 flush_workqueue(agent->qp_info->port_priv->wq);
93
94 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95 &agent->rmpp_list, list) {
96 list_del(&rmpp_recv->list);
97 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98 ib_free_recv_mad(rmpp_recv->rmpp_wc);
99 destroy_rmpp_recv(rmpp_recv);
100 }
101}
102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return offsetof(struct ib_sa_mad, data);
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return offsetof(struct ib_vendor_mad, data);
110 else
111 return offsetof(struct ib_rmpp_mad, data);
112}
113
114static void format_ack(struct ib_rmpp_mad *ack,
115 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv)
117{
118 unsigned long flags;
119
120 memcpy(&ack->mad_hdr, &data->mad_hdr,
121 data_offset(data->mad_hdr.mgmt_class));
122
123 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
124 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
125 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
126
127 spin_lock_irqsave(&rmpp_recv->lock, flags);
128 rmpp_recv->last_ack = rmpp_recv->seg_num;
129 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
130 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
131 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
132}
133
134static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
135 struct ib_mad_recv_wc *recv_wc)
136{
137 struct ib_mad_send_buf *msg;
138 struct ib_send_wr *bad_send_wr;
139 int hdr_len, ret;
140
141 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
142 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
144 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
145 GFP_KERNEL);
146 if (!msg)
147 return;
148
149 format_ack((struct ib_rmpp_mad *) msg->mad,
150 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
151 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
152 &bad_send_wr);
153 if (ret)
154 ib_free_send_mad(msg);
155}
156
157static int alloc_response_msg(struct ib_mad_agent *agent,
158 struct ib_mad_recv_wc *recv_wc,
159 struct ib_mad_send_buf **msg)
160{
161 struct ib_mad_send_buf *m;
162 struct ib_ah *ah;
163 int hdr_len;
164
165 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
166 recv_wc->recv_buf.grh, agent->port_num);
167 if (IS_ERR(ah))
168 return PTR_ERR(ah);
169
170 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
171 m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
172 recv_wc->wc->pkey_index, ah, 1, hdr_len,
173 sizeof(struct ib_rmpp_mad) - hdr_len,
174 GFP_KERNEL);
175 if (IS_ERR(m)) {
176 ib_destroy_ah(ah);
177 return PTR_ERR(m);
178 }
179 *msg = m;
180 return 0;
181}
182
183static void free_msg(struct ib_mad_send_buf *msg)
184{
185 ib_destroy_ah(msg->send_wr.wr.ud.ah);
186 ib_free_send_mad(msg);
187}
188
189static void nack_recv(struct ib_mad_agent_private *agent,
190 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
191{
192 struct ib_mad_send_buf *msg;
193 struct ib_rmpp_mad *rmpp_mad;
194 struct ib_send_wr *bad_send_wr;
195 int ret;
196
197 ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
198 if (ret)
199 return;
200
201 rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
202 memcpy(rmpp_mad, recv_wc->recv_buf.mad,
203 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
204
205 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
206 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
207 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
208 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
209 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
210 rmpp_mad->rmpp_hdr.seg_num = 0;
211 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
212
213 ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
214 if (ret)
215 free_msg(msg);
216}
217
218static void recv_timeout_handler(void *data)
219{
220 struct mad_rmpp_recv *rmpp_recv = data;
221 struct ib_mad_recv_wc *rmpp_wc;
222 unsigned long flags;
223
224 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
225 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
226 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
227 return;
228 }
229 rmpp_recv->state = RMPP_STATE_TIMEOUT;
230 list_del(&rmpp_recv->list);
231 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
232
233 rmpp_wc = rmpp_recv->rmpp_wc;
234 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
235 destroy_rmpp_recv(rmpp_recv);
236 ib_free_recv_mad(rmpp_wc);
237}
238
239static void recv_cleanup_handler(void *data)
240{
241 struct mad_rmpp_recv *rmpp_recv = data;
242 unsigned long flags;
243
244 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
245 list_del(&rmpp_recv->list);
246 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
247 destroy_rmpp_recv(rmpp_recv);
248}
249
250static struct mad_rmpp_recv *
251create_rmpp_recv(struct ib_mad_agent_private *agent,
252 struct ib_mad_recv_wc *mad_recv_wc)
253{
254 struct mad_rmpp_recv *rmpp_recv;
255 struct ib_mad_hdr *mad_hdr;
256
257 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
258 if (!rmpp_recv)
259 return NULL;
260
261 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
262 mad_recv_wc->wc,
263 mad_recv_wc->recv_buf.grh,
264 agent->agent.port_num);
265 if (IS_ERR(rmpp_recv->ah))
266 goto error;
267
268 rmpp_recv->agent = agent;
269 init_waitqueue_head(&rmpp_recv->wait);
270 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
271 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
272 spin_lock_init(&rmpp_recv->lock);
273 rmpp_recv->state = RMPP_STATE_ACTIVE;
274 atomic_set(&rmpp_recv->refcount, 1);
275
276 rmpp_recv->rmpp_wc = mad_recv_wc;
277 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
278 rmpp_recv->newwin = 1;
279 rmpp_recv->seg_num = 1;
280 rmpp_recv->last_ack = 0;
281
282 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
283 rmpp_recv->tid = mad_hdr->tid;
284 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
285 rmpp_recv->slid = mad_recv_wc->wc->slid;
286 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
287 rmpp_recv->class_version = mad_hdr->class_version;
288 rmpp_recv->method = mad_hdr->method;
289 return rmpp_recv;
290
291error: kfree(rmpp_recv);
292 return NULL;
293}
294
295static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
296{
297 if (atomic_dec_and_test(&rmpp_recv->refcount))
298 wake_up(&rmpp_recv->wait);
299}
300
301static struct mad_rmpp_recv *
302find_rmpp_recv(struct ib_mad_agent_private *agent,
303 struct ib_mad_recv_wc *mad_recv_wc)
304{
305 struct mad_rmpp_recv *rmpp_recv;
306 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
307
308 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
309 if (rmpp_recv->tid == mad_hdr->tid &&
310 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
311 rmpp_recv->slid == mad_recv_wc->wc->slid &&
312 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
313 rmpp_recv->class_version == mad_hdr->class_version &&
314 rmpp_recv->method == mad_hdr->method)
315 return rmpp_recv;
316 }
317 return NULL;
318}
319
320static struct mad_rmpp_recv *
321acquire_rmpp_recv(struct ib_mad_agent_private *agent,
322 struct ib_mad_recv_wc *mad_recv_wc)
323{
324 struct mad_rmpp_recv *rmpp_recv;
325 unsigned long flags;
326
327 spin_lock_irqsave(&agent->lock, flags);
328 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
329 if (rmpp_recv)
330 atomic_inc(&rmpp_recv->refcount);
331 spin_unlock_irqrestore(&agent->lock, flags);
332 return rmpp_recv;
333}
334
335static struct mad_rmpp_recv *
336insert_rmpp_recv(struct ib_mad_agent_private *agent,
337 struct mad_rmpp_recv *rmpp_recv)
338{
339 struct mad_rmpp_recv *cur_rmpp_recv;
340
341 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
342 if (!cur_rmpp_recv)
343 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
344
345 return cur_rmpp_recv;
346}
347
348static inline int get_last_flag(struct ib_mad_recv_buf *seg)
349{
350 struct ib_rmpp_mad *rmpp_mad;
351
352 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
353 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
354}
355
356static inline int get_seg_num(struct ib_mad_recv_buf *seg)
357{
358 struct ib_rmpp_mad *rmpp_mad;
359
360 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
361 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
362}
363
364static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
365 struct ib_mad_recv_buf *seg)
366{
367 if (seg->list.next == rmpp_list)
368 return NULL;
369
370 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
371}
372
373static inline int window_size(struct ib_mad_agent_private *agent)
374{
375 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
376}
377
378static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
379 int seg_num)
380{
381 struct ib_mad_recv_buf *seg_buf;
382 int cur_seg_num;
383
384 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
385 cur_seg_num = get_seg_num(seg_buf);
386 if (seg_num > cur_seg_num)
387 return seg_buf;
388 if (seg_num == cur_seg_num)
389 break;
390 }
391 return NULL;
392}
393
394static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
395 struct ib_mad_recv_buf *new_buf)
396{
397 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
398
399 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
400 rmpp_recv->cur_seg_buf = new_buf;
401 rmpp_recv->seg_num++;
402 new_buf = get_next_seg(rmpp_list, new_buf);
403 }
404}
405
406static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
407{
408 struct ib_rmpp_mad *rmpp_mad;
409 int hdr_size, data_size, pad;
410
411 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
412
413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416 if (pad > data_size || pad < 0)
417 pad = 0;
418
419 return hdr_size + rmpp_recv->seg_num * data_size - pad;
420}
421
422static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
423{
424 struct ib_mad_recv_wc *rmpp_wc;
425
426 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
427 if (rmpp_recv->seg_num > 1)
428 cancel_delayed_work(&rmpp_recv->timeout_work);
429
430 rmpp_wc = rmpp_recv->rmpp_wc;
431 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
432 /* 10 seconds until we can find the packet lifetime */
433 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
434 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
435 return rmpp_wc;
436}
437
438void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
439{
440 struct ib_mad_recv_buf *seg_buf;
441 struct ib_rmpp_mad *rmpp_mad;
442 void *data;
443 int size, len, offset;
444 u8 flags;
445
446 len = mad_recv_wc->mad_len;
447 if (len <= sizeof(struct ib_mad)) {
448 memcpy(buf, mad_recv_wc->recv_buf.mad, len);
449 return;
450 }
451
452 offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
453
454 list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
455 rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
456 flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
457
458 if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
459 data = rmpp_mad;
460 size = sizeof(*rmpp_mad);
461 } else {
462 data = (void *) rmpp_mad + offset;
463 if (flags & IB_MGMT_RMPP_FLAG_LAST)
464 size = len;
465 else
466 size = sizeof(*rmpp_mad) - offset;
467 }
468
469 memcpy(buf, data, size);
470 len -= size;
471 buf += size;
472 }
473}
474EXPORT_SYMBOL(ib_coalesce_recv_mad);
475
476static struct ib_mad_recv_wc *
477continue_rmpp(struct ib_mad_agent_private *agent,
478 struct ib_mad_recv_wc *mad_recv_wc)
479{
480 struct mad_rmpp_recv *rmpp_recv;
481 struct ib_mad_recv_buf *prev_buf;
482 struct ib_mad_recv_wc *done_wc;
483 int seg_num;
484 unsigned long flags;
485
486 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
487 if (!rmpp_recv)
488 goto drop1;
489
490 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
491
492 spin_lock_irqsave(&rmpp_recv->lock, flags);
493 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
494 (seg_num > rmpp_recv->newwin))
495 goto drop3;
496
497 if ((seg_num <= rmpp_recv->last_ack) ||
498 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
499 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500 ack_recv(rmpp_recv, mad_recv_wc);
501 goto drop2;
502 }
503
504 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
505 if (!prev_buf)
506 goto drop3;
507
508 done_wc = NULL;
509 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
510 if (rmpp_recv->cur_seg_buf == prev_buf) {
511 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
512 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
513 rmpp_recv->state = RMPP_STATE_COMPLETE;
514 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
515 done_wc = complete_rmpp(rmpp_recv);
516 goto out;
517 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
518 rmpp_recv->newwin += window_size(agent);
519 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
520 ack_recv(rmpp_recv, mad_recv_wc);
521 goto out;
522 }
523 }
524 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
525out:
526 deref_rmpp_recv(rmpp_recv);
527 return done_wc;
528
529drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
530drop2: deref_rmpp_recv(rmpp_recv);
531drop1: ib_free_recv_mad(mad_recv_wc);
532 return NULL;
533}
534
535static struct ib_mad_recv_wc *
536start_rmpp(struct ib_mad_agent_private *agent,
537 struct ib_mad_recv_wc *mad_recv_wc)
538{
539 struct mad_rmpp_recv *rmpp_recv;
540 unsigned long flags;
541
542 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
543 if (!rmpp_recv) {
544 ib_free_recv_mad(mad_recv_wc);
545 return NULL;
546 }
547
548 spin_lock_irqsave(&agent->lock, flags);
549 if (insert_rmpp_recv(agent, rmpp_recv)) {
550 spin_unlock_irqrestore(&agent->lock, flags);
551 /* duplicate first MAD */
552 destroy_rmpp_recv(rmpp_recv);
553 return continue_rmpp(agent, mad_recv_wc);
554 }
555 atomic_inc(&rmpp_recv->refcount);
556
557 if (get_last_flag(&mad_recv_wc->recv_buf)) {
558 rmpp_recv->state = RMPP_STATE_COMPLETE;
559 spin_unlock_irqrestore(&agent->lock, flags);
560 complete_rmpp(rmpp_recv);
561 } else {
562 spin_unlock_irqrestore(&agent->lock, flags);
563 /* 40 seconds until we can find the packet lifetimes */
564 queue_delayed_work(agent->qp_info->port_priv->wq,
565 &rmpp_recv->timeout_work,
566 msecs_to_jiffies(40000));
567 rmpp_recv->newwin += window_size(agent);
568 ack_recv(rmpp_recv, mad_recv_wc);
569 mad_recv_wc = NULL;
570 }
571 deref_rmpp_recv(rmpp_recv);
572 return mad_recv_wc;
573}
574
575static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
576{
577 return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
578 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
579 (mad_send_wr->seg_num - 1);
580}
581
582static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{
584 struct ib_rmpp_mad *rmpp_mad;
585 int timeout;
586
587 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
589 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
590
591 if (mad_send_wr->seg_num == 1) {
592 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593 rmpp_mad->rmpp_hdr.paylen_newwin =
594 cpu_to_be32(mad_send_wr->total_seg *
595 (sizeof(struct ib_rmpp_mad) -
596 offsetof(struct ib_rmpp_mad, data)) -
597 mad_send_wr->pad);
598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
599 } else {
600 mad_send_wr->send_wr.num_sge = 2;
601 mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
602 mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
603 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
604 mad_send_wr->data_offset;
605 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
606 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
607 }
608
609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
610 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
611 rmpp_mad->rmpp_hdr.paylen_newwin =
612 cpu_to_be32(sizeof(struct ib_rmpp_mad) -
613 offsetof(struct ib_rmpp_mad, data) -
614 mad_send_wr->pad);
615 }
616
617 /* 2 seconds for an ACK until we can find the packet lifetime */
618 timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
619 if (!timeout || timeout > 2000)
620 mad_send_wr->timeout = msecs_to_jiffies(2000);
621 mad_send_wr->seg_num++;
622 return ib_send_mad(mad_send_wr);
623}
624
625static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
626 u8 rmpp_status)
627{
628 struct ib_mad_send_wr_private *mad_send_wr;
629 struct ib_mad_send_wc wc;
630 unsigned long flags;
631
632 spin_lock_irqsave(&agent->lock, flags);
633 mad_send_wr = ib_find_send_mad(agent, tid);
634 if (!mad_send_wr)
635 goto out; /* Unmatched send */
636
637 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
638 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
639 goto out; /* Send is already done */
640
641 ib_mark_mad_done(mad_send_wr);
642 spin_unlock_irqrestore(&agent->lock, flags);
643
644 wc.status = IB_WC_REM_ABORT_ERR;
645 wc.vendor_err = rmpp_status;
646 wc.wr_id = mad_send_wr->wr_id;
647 ib_mad_complete_send_wr(mad_send_wr, &wc);
648 return;
649out:
650 spin_unlock_irqrestore(&agent->lock, flags);
651}
652
653static void process_rmpp_ack(struct ib_mad_agent_private *agent,
654 struct ib_mad_recv_wc *mad_recv_wc)
655{
656 struct ib_mad_send_wr_private *mad_send_wr;
657 struct ib_rmpp_mad *rmpp_mad;
658 unsigned long flags;
659 int seg_num, newwin, ret;
660
661 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
662 if (rmpp_mad->rmpp_hdr.rmpp_status) {
663 abort_send(agent, rmpp_mad->mad_hdr.tid,
664 IB_MGMT_RMPP_STATUS_BAD_STATUS);
665 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
666 return;
667 }
668
669 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
670 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
671 if (newwin < seg_num) {
672 abort_send(agent, rmpp_mad->mad_hdr.tid,
673 IB_MGMT_RMPP_STATUS_W2S);
674 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
675 return;
676 }
677
678 spin_lock_irqsave(&agent->lock, flags);
679 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
680 if (!mad_send_wr)
681 goto out; /* Unmatched ACK */
682
683 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
684 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
685 goto out; /* Send is already done */
686
687 if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
688 spin_unlock_irqrestore(&agent->lock, flags);
689 abort_send(agent, rmpp_mad->mad_hdr.tid,
690 IB_MGMT_RMPP_STATUS_S2B);
691 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
692 return;
693 }
694
695 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
696 goto out; /* Old ACK */
697
698 if (seg_num > mad_send_wr->last_ack) {
699 mad_send_wr->last_ack = seg_num;
700 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
701 }
702 mad_send_wr->newwin = newwin;
703 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
704 /* If no response is expected, the ACK completes the send */
705 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
706 struct ib_mad_send_wc wc;
707
708 ib_mark_mad_done(mad_send_wr);
709 spin_unlock_irqrestore(&agent->lock, flags);
710
711 wc.status = IB_WC_SUCCESS;
712 wc.vendor_err = 0;
713 wc.wr_id = mad_send_wr->wr_id;
714 ib_mad_complete_send_wr(mad_send_wr, &wc);
715 return;
716 }
717 if (mad_send_wr->refcount == 1)
718 ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
719 send_wr.wr.ud.timeout_ms);
720 } else if (mad_send_wr->refcount == 1 &&
721 mad_send_wr->seg_num < mad_send_wr->newwin &&
722 mad_send_wr->seg_num <= mad_send_wr->total_seg) {
723 /* Send failure will just result in a timeout/retry */
724 ret = send_next_seg(mad_send_wr);
725 if (ret)
726 goto out;
727
728 mad_send_wr->refcount++;
729 list_del(&mad_send_wr->agent_list);
730 list_add_tail(&mad_send_wr->agent_list,
731 &mad_send_wr->mad_agent_priv->send_list);
732 }
733out:
734 spin_unlock_irqrestore(&agent->lock, flags);
735}
736
737static struct ib_mad_recv_wc *
738process_rmpp_data(struct ib_mad_agent_private *agent,
739 struct ib_mad_recv_wc *mad_recv_wc)
740{
741 struct ib_rmpp_hdr *rmpp_hdr;
742 u8 rmpp_status;
743
744 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
745
746 if (rmpp_hdr->rmpp_status) {
747 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
748 goto bad;
749 }
750
751 if (rmpp_hdr->seg_num == __constant_htonl(1)) {
752 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
753 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
754 goto bad;
755 }
756 return start_rmpp(agent, mad_recv_wc);
757 } else {
758 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
759 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
760 goto bad;
761 }
762 return continue_rmpp(agent, mad_recv_wc);
763 }
764bad:
765 nack_recv(agent, mad_recv_wc, rmpp_status);
766 ib_free_recv_mad(mad_recv_wc);
767 return NULL;
768}
769
770static void process_rmpp_stop(struct ib_mad_agent_private *agent,
771 struct ib_mad_recv_wc *mad_recv_wc)
772{
773 struct ib_rmpp_mad *rmpp_mad;
774
775 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
776
777 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
778 abort_send(agent, rmpp_mad->mad_hdr.tid,
779 IB_MGMT_RMPP_STATUS_BAD_STATUS);
780 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
781 } else
782 abort_send(agent, rmpp_mad->mad_hdr.tid,
783 rmpp_mad->rmpp_hdr.rmpp_status);
784}
785
786static void process_rmpp_abort(struct ib_mad_agent_private *agent,
787 struct ib_mad_recv_wc *mad_recv_wc)
788{
789 struct ib_rmpp_mad *rmpp_mad;
790
791 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
792
793 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
794 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
795 abort_send(agent, rmpp_mad->mad_hdr.tid,
796 IB_MGMT_RMPP_STATUS_BAD_STATUS);
797 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
798 } else
799 abort_send(agent, rmpp_mad->mad_hdr.tid,
800 rmpp_mad->rmpp_hdr.rmpp_status);
801}
802
803struct ib_mad_recv_wc *
804ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
805 struct ib_mad_recv_wc *mad_recv_wc)
806{
807 struct ib_rmpp_mad *rmpp_mad;
808
809 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
810 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
811 return mad_recv_wc;
812
813 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
814 abort_send(agent, rmpp_mad->mad_hdr.tid,
815 IB_MGMT_RMPP_STATUS_UNV);
816 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
817 goto out;
818 }
819
820 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
821 case IB_MGMT_RMPP_TYPE_DATA:
822 return process_rmpp_data(agent, mad_recv_wc);
823 case IB_MGMT_RMPP_TYPE_ACK:
824 process_rmpp_ack(agent, mad_recv_wc);
825 break;
826 case IB_MGMT_RMPP_TYPE_STOP:
827 process_rmpp_stop(agent, mad_recv_wc);
828 break;
829 case IB_MGMT_RMPP_TYPE_ABORT:
830 process_rmpp_abort(agent, mad_recv_wc);
831 break;
832 default:
833 abort_send(agent, rmpp_mad->mad_hdr.tid,
834 IB_MGMT_RMPP_STATUS_BADT);
835 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
836 break;
837 }
838out:
839 ib_free_recv_mad(mad_recv_wc);
840 return NULL;
841}
842
843int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
844{
845 struct ib_rmpp_mad *rmpp_mad;
846 int i, total_len, ret;
847
848 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
849 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
850 IB_MGMT_RMPP_FLAG_ACTIVE))
851 return IB_RMPP_RESULT_UNHANDLED;
852
853 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
854 return IB_RMPP_RESULT_INTERNAL;
855
856 if (mad_send_wr->send_wr.num_sge > 1)
857 return -EINVAL; /* TODO: support num_sge > 1 */
858
859 mad_send_wr->seg_num = 1;
860 mad_send_wr->newwin = 1;
861 mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
862
863 total_len = 0;
864 for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
865 total_len += mad_send_wr->send_wr.sg_list[i].length;
866
867 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
868 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
869 mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
870 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
871
872 /* We need to wait for the final ACK even if there isn't a response */
873 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
874 ret = send_next_seg(mad_send_wr);
875 if (!ret)
876 return IB_RMPP_RESULT_CONSUMED;
877 return ret;
878}
879
880int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
881 struct ib_mad_send_wc *mad_send_wc)
882{
883 struct ib_rmpp_mad *rmpp_mad;
884 struct ib_mad_send_buf *msg;
885 int ret;
886
887 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
888 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
889 IB_MGMT_RMPP_FLAG_ACTIVE))
890 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
891
892 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
893 msg = (struct ib_mad_send_buf *) (unsigned long)
894 mad_send_wc->wr_id;
895 if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
896 ib_free_send_mad(msg);
897 else
898 free_msg(msg);
899 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
900 }
901
902 if (mad_send_wc->status != IB_WC_SUCCESS ||
903 mad_send_wr->status != IB_WC_SUCCESS)
904 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
905
906 if (!mad_send_wr->timeout)
907 return IB_RMPP_RESULT_PROCESSED; /* Response received */
908
909 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
910 mad_send_wr->timeout =
911 msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
912 return IB_RMPP_RESULT_PROCESSED; /* Send done */
913 }
914
915 if (mad_send_wr->seg_num > mad_send_wr->newwin ||
916 mad_send_wr->seg_num > mad_send_wr->total_seg)
917 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
918
919 ret = send_next_seg(mad_send_wr);
920 if (ret) {
921 mad_send_wc->status = IB_WC_GENERAL_ERR;
922 return IB_RMPP_RESULT_PROCESSED;
923 }
924 return IB_RMPP_RESULT_CONSUMED;
925}
926
927int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
928{
929 struct ib_rmpp_mad *rmpp_mad;
930 int ret;
931
932 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
933 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
934 IB_MGMT_RMPP_FLAG_ACTIVE))
935 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
936
937 if (mad_send_wr->last_ack == mad_send_wr->total_seg)
938 return IB_RMPP_RESULT_PROCESSED;
939
940 mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
941 ret = send_next_seg(mad_send_wr);
942 if (ret)
943 return IB_RMPP_RESULT_PROCESSED;
944
945 return IB_RMPP_RESULT_CONSUMED;
946}