Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/err.h>
38#include <linux/random.h>
39#include <linux/spinlock.h>
40#include <linux/slab.h>
41#include <linux/dma-mapping.h>
42#include <linux/kref.h>
43#include <linux/idr.h>
44#include <linux/workqueue.h>
45#include <uapi/linux/if_ether.h>
46#include <rdma/ib_pack.h>
47#include <rdma/ib_cache.h>
48#include <rdma/rdma_netlink.h>
49#include <net/netlink.h>
50#include <uapi/rdma/ib_user_sa.h>
51#include <rdma/ib_marshall.h>
52#include <rdma/ib_addr.h>
53#include "sa.h"
54#include "core_priv.h"
55
56#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59#define IB_SA_CPI_MAX_RETRY_CNT 3
60#define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */
61static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
62
63struct ib_sa_sm_ah {
64 struct ib_ah *ah;
65 struct kref ref;
66 u16 pkey_index;
67 u8 src_path_mask;
68};
69
70enum rdma_class_port_info_type {
71 RDMA_CLASS_PORT_INFO_IB,
72 RDMA_CLASS_PORT_INFO_OPA
73};
74
75struct rdma_class_port_info {
76 enum rdma_class_port_info_type type;
77 union {
78 struct ib_class_port_info ib;
79 struct opa_class_port_info opa;
80 };
81};
82
83struct ib_sa_classport_cache {
84 bool valid;
85 int retry_cnt;
86 struct rdma_class_port_info data;
87};
88
89struct ib_sa_port {
90 struct ib_mad_agent *agent;
91 struct ib_sa_sm_ah *sm_ah;
92 struct work_struct update_task;
93 struct ib_sa_classport_cache classport_info;
94 struct delayed_work ib_cpi_work;
95 spinlock_t classport_lock; /* protects class port info set */
96 spinlock_t ah_lock;
97 u8 port_num;
98};
99
100struct ib_sa_device {
101 int start_port, end_port;
102 struct ib_event_handler event_handler;
103 struct ib_sa_port port[0];
104};
105
106struct ib_sa_query {
107 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
108 void (*release)(struct ib_sa_query *);
109 struct ib_sa_client *client;
110 struct ib_sa_port *port;
111 struct ib_mad_send_buf *mad_buf;
112 struct ib_sa_sm_ah *sm_ah;
113 int id;
114 u32 flags;
115 struct list_head list; /* Local svc request list */
116 u32 seq; /* Local svc request sequence number */
117 unsigned long timeout; /* Local svc timeout */
118 u8 path_use; /* How will the pathrecord be used */
119};
120
121#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
122#define IB_SA_CANCEL 0x00000002
123#define IB_SA_QUERY_OPA 0x00000004
124
125struct ib_sa_service_query {
126 void (*callback)(int, struct ib_sa_service_rec *, void *);
127 void *context;
128 struct ib_sa_query sa_query;
129};
130
131struct ib_sa_path_query {
132 void (*callback)(int, struct sa_path_rec *, void *);
133 void *context;
134 struct ib_sa_query sa_query;
135 struct sa_path_rec *conv_pr;
136};
137
138struct ib_sa_guidinfo_query {
139 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
140 void *context;
141 struct ib_sa_query sa_query;
142};
143
144struct ib_sa_classport_info_query {
145 void (*callback)(void *);
146 void *context;
147 struct ib_sa_query sa_query;
148};
149
150struct ib_sa_mcmember_query {
151 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
152 void *context;
153 struct ib_sa_query sa_query;
154};
155
156static LIST_HEAD(ib_nl_request_list);
157static DEFINE_SPINLOCK(ib_nl_request_lock);
158static atomic_t ib_nl_sa_request_seq;
159static struct workqueue_struct *ib_nl_wq;
160static struct delayed_work ib_nl_timed_work;
161static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
162 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
163 .len = sizeof(struct ib_path_rec_data)},
164 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
165 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
166 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
167 .len = sizeof(struct rdma_nla_ls_gid)},
168 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
169 .len = sizeof(struct rdma_nla_ls_gid)},
170 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
171 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
172 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
173};
174
175
176static void ib_sa_add_one(struct ib_device *device);
177static void ib_sa_remove_one(struct ib_device *device, void *client_data);
178
179static struct ib_client sa_client = {
180 .name = "sa",
181 .add = ib_sa_add_one,
182 .remove = ib_sa_remove_one
183};
184
185static DEFINE_SPINLOCK(idr_lock);
186static DEFINE_IDR(query_idr);
187
188static DEFINE_SPINLOCK(tid_lock);
189static u32 tid;
190
191#define PATH_REC_FIELD(field) \
192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
193 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
194 .field_name = "sa_path_rec:" #field
195
196static const struct ib_field path_rec_table[] = {
197 { PATH_REC_FIELD(service_id),
198 .offset_words = 0,
199 .offset_bits = 0,
200 .size_bits = 64 },
201 { PATH_REC_FIELD(dgid),
202 .offset_words = 2,
203 .offset_bits = 0,
204 .size_bits = 128 },
205 { PATH_REC_FIELD(sgid),
206 .offset_words = 6,
207 .offset_bits = 0,
208 .size_bits = 128 },
209 { PATH_REC_FIELD(ib.dlid),
210 .offset_words = 10,
211 .offset_bits = 0,
212 .size_bits = 16 },
213 { PATH_REC_FIELD(ib.slid),
214 .offset_words = 10,
215 .offset_bits = 16,
216 .size_bits = 16 },
217 { PATH_REC_FIELD(ib.raw_traffic),
218 .offset_words = 11,
219 .offset_bits = 0,
220 .size_bits = 1 },
221 { RESERVED,
222 .offset_words = 11,
223 .offset_bits = 1,
224 .size_bits = 3 },
225 { PATH_REC_FIELD(flow_label),
226 .offset_words = 11,
227 .offset_bits = 4,
228 .size_bits = 20 },
229 { PATH_REC_FIELD(hop_limit),
230 .offset_words = 11,
231 .offset_bits = 24,
232 .size_bits = 8 },
233 { PATH_REC_FIELD(traffic_class),
234 .offset_words = 12,
235 .offset_bits = 0,
236 .size_bits = 8 },
237 { PATH_REC_FIELD(reversible),
238 .offset_words = 12,
239 .offset_bits = 8,
240 .size_bits = 1 },
241 { PATH_REC_FIELD(numb_path),
242 .offset_words = 12,
243 .offset_bits = 9,
244 .size_bits = 7 },
245 { PATH_REC_FIELD(pkey),
246 .offset_words = 12,
247 .offset_bits = 16,
248 .size_bits = 16 },
249 { PATH_REC_FIELD(qos_class),
250 .offset_words = 13,
251 .offset_bits = 0,
252 .size_bits = 12 },
253 { PATH_REC_FIELD(sl),
254 .offset_words = 13,
255 .offset_bits = 12,
256 .size_bits = 4 },
257 { PATH_REC_FIELD(mtu_selector),
258 .offset_words = 13,
259 .offset_bits = 16,
260 .size_bits = 2 },
261 { PATH_REC_FIELD(mtu),
262 .offset_words = 13,
263 .offset_bits = 18,
264 .size_bits = 6 },
265 { PATH_REC_FIELD(rate_selector),
266 .offset_words = 13,
267 .offset_bits = 24,
268 .size_bits = 2 },
269 { PATH_REC_FIELD(rate),
270 .offset_words = 13,
271 .offset_bits = 26,
272 .size_bits = 6 },
273 { PATH_REC_FIELD(packet_life_time_selector),
274 .offset_words = 14,
275 .offset_bits = 0,
276 .size_bits = 2 },
277 { PATH_REC_FIELD(packet_life_time),
278 .offset_words = 14,
279 .offset_bits = 2,
280 .size_bits = 6 },
281 { PATH_REC_FIELD(preference),
282 .offset_words = 14,
283 .offset_bits = 8,
284 .size_bits = 8 },
285 { RESERVED,
286 .offset_words = 14,
287 .offset_bits = 16,
288 .size_bits = 48 },
289};
290
291#define OPA_PATH_REC_FIELD(field) \
292 .struct_offset_bytes = \
293 offsetof(struct sa_path_rec, field), \
294 .struct_size_bytes = \
295 sizeof((struct sa_path_rec *)0)->field, \
296 .field_name = "sa_path_rec:" #field
297
298static const struct ib_field opa_path_rec_table[] = {
299 { OPA_PATH_REC_FIELD(service_id),
300 .offset_words = 0,
301 .offset_bits = 0,
302 .size_bits = 64 },
303 { OPA_PATH_REC_FIELD(dgid),
304 .offset_words = 2,
305 .offset_bits = 0,
306 .size_bits = 128 },
307 { OPA_PATH_REC_FIELD(sgid),
308 .offset_words = 6,
309 .offset_bits = 0,
310 .size_bits = 128 },
311 { OPA_PATH_REC_FIELD(opa.dlid),
312 .offset_words = 10,
313 .offset_bits = 0,
314 .size_bits = 32 },
315 { OPA_PATH_REC_FIELD(opa.slid),
316 .offset_words = 11,
317 .offset_bits = 0,
318 .size_bits = 32 },
319 { OPA_PATH_REC_FIELD(opa.raw_traffic),
320 .offset_words = 12,
321 .offset_bits = 0,
322 .size_bits = 1 },
323 { RESERVED,
324 .offset_words = 12,
325 .offset_bits = 1,
326 .size_bits = 3 },
327 { OPA_PATH_REC_FIELD(flow_label),
328 .offset_words = 12,
329 .offset_bits = 4,
330 .size_bits = 20 },
331 { OPA_PATH_REC_FIELD(hop_limit),
332 .offset_words = 12,
333 .offset_bits = 24,
334 .size_bits = 8 },
335 { OPA_PATH_REC_FIELD(traffic_class),
336 .offset_words = 13,
337 .offset_bits = 0,
338 .size_bits = 8 },
339 { OPA_PATH_REC_FIELD(reversible),
340 .offset_words = 13,
341 .offset_bits = 8,
342 .size_bits = 1 },
343 { OPA_PATH_REC_FIELD(numb_path),
344 .offset_words = 13,
345 .offset_bits = 9,
346 .size_bits = 7 },
347 { OPA_PATH_REC_FIELD(pkey),
348 .offset_words = 13,
349 .offset_bits = 16,
350 .size_bits = 16 },
351 { OPA_PATH_REC_FIELD(opa.l2_8B),
352 .offset_words = 14,
353 .offset_bits = 0,
354 .size_bits = 1 },
355 { OPA_PATH_REC_FIELD(opa.l2_10B),
356 .offset_words = 14,
357 .offset_bits = 1,
358 .size_bits = 1 },
359 { OPA_PATH_REC_FIELD(opa.l2_9B),
360 .offset_words = 14,
361 .offset_bits = 2,
362 .size_bits = 1 },
363 { OPA_PATH_REC_FIELD(opa.l2_16B),
364 .offset_words = 14,
365 .offset_bits = 3,
366 .size_bits = 1 },
367 { RESERVED,
368 .offset_words = 14,
369 .offset_bits = 4,
370 .size_bits = 2 },
371 { OPA_PATH_REC_FIELD(opa.qos_type),
372 .offset_words = 14,
373 .offset_bits = 6,
374 .size_bits = 2 },
375 { OPA_PATH_REC_FIELD(opa.qos_priority),
376 .offset_words = 14,
377 .offset_bits = 8,
378 .size_bits = 8 },
379 { RESERVED,
380 .offset_words = 14,
381 .offset_bits = 16,
382 .size_bits = 3 },
383 { OPA_PATH_REC_FIELD(sl),
384 .offset_words = 14,
385 .offset_bits = 19,
386 .size_bits = 5 },
387 { RESERVED,
388 .offset_words = 14,
389 .offset_bits = 24,
390 .size_bits = 8 },
391 { OPA_PATH_REC_FIELD(mtu_selector),
392 .offset_words = 15,
393 .offset_bits = 0,
394 .size_bits = 2 },
395 { OPA_PATH_REC_FIELD(mtu),
396 .offset_words = 15,
397 .offset_bits = 2,
398 .size_bits = 6 },
399 { OPA_PATH_REC_FIELD(rate_selector),
400 .offset_words = 15,
401 .offset_bits = 8,
402 .size_bits = 2 },
403 { OPA_PATH_REC_FIELD(rate),
404 .offset_words = 15,
405 .offset_bits = 10,
406 .size_bits = 6 },
407 { OPA_PATH_REC_FIELD(packet_life_time_selector),
408 .offset_words = 15,
409 .offset_bits = 16,
410 .size_bits = 2 },
411 { OPA_PATH_REC_FIELD(packet_life_time),
412 .offset_words = 15,
413 .offset_bits = 18,
414 .size_bits = 6 },
415 { OPA_PATH_REC_FIELD(preference),
416 .offset_words = 15,
417 .offset_bits = 24,
418 .size_bits = 8 },
419};
420
421#define MCMEMBER_REC_FIELD(field) \
422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
423 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
424 .field_name = "sa_mcmember_rec:" #field
425
426static const struct ib_field mcmember_rec_table[] = {
427 { MCMEMBER_REC_FIELD(mgid),
428 .offset_words = 0,
429 .offset_bits = 0,
430 .size_bits = 128 },
431 { MCMEMBER_REC_FIELD(port_gid),
432 .offset_words = 4,
433 .offset_bits = 0,
434 .size_bits = 128 },
435 { MCMEMBER_REC_FIELD(qkey),
436 .offset_words = 8,
437 .offset_bits = 0,
438 .size_bits = 32 },
439 { MCMEMBER_REC_FIELD(mlid),
440 .offset_words = 9,
441 .offset_bits = 0,
442 .size_bits = 16 },
443 { MCMEMBER_REC_FIELD(mtu_selector),
444 .offset_words = 9,
445 .offset_bits = 16,
446 .size_bits = 2 },
447 { MCMEMBER_REC_FIELD(mtu),
448 .offset_words = 9,
449 .offset_bits = 18,
450 .size_bits = 6 },
451 { MCMEMBER_REC_FIELD(traffic_class),
452 .offset_words = 9,
453 .offset_bits = 24,
454 .size_bits = 8 },
455 { MCMEMBER_REC_FIELD(pkey),
456 .offset_words = 10,
457 .offset_bits = 0,
458 .size_bits = 16 },
459 { MCMEMBER_REC_FIELD(rate_selector),
460 .offset_words = 10,
461 .offset_bits = 16,
462 .size_bits = 2 },
463 { MCMEMBER_REC_FIELD(rate),
464 .offset_words = 10,
465 .offset_bits = 18,
466 .size_bits = 6 },
467 { MCMEMBER_REC_FIELD(packet_life_time_selector),
468 .offset_words = 10,
469 .offset_bits = 24,
470 .size_bits = 2 },
471 { MCMEMBER_REC_FIELD(packet_life_time),
472 .offset_words = 10,
473 .offset_bits = 26,
474 .size_bits = 6 },
475 { MCMEMBER_REC_FIELD(sl),
476 .offset_words = 11,
477 .offset_bits = 0,
478 .size_bits = 4 },
479 { MCMEMBER_REC_FIELD(flow_label),
480 .offset_words = 11,
481 .offset_bits = 4,
482 .size_bits = 20 },
483 { MCMEMBER_REC_FIELD(hop_limit),
484 .offset_words = 11,
485 .offset_bits = 24,
486 .size_bits = 8 },
487 { MCMEMBER_REC_FIELD(scope),
488 .offset_words = 12,
489 .offset_bits = 0,
490 .size_bits = 4 },
491 { MCMEMBER_REC_FIELD(join_state),
492 .offset_words = 12,
493 .offset_bits = 4,
494 .size_bits = 4 },
495 { MCMEMBER_REC_FIELD(proxy_join),
496 .offset_words = 12,
497 .offset_bits = 8,
498 .size_bits = 1 },
499 { RESERVED,
500 .offset_words = 12,
501 .offset_bits = 9,
502 .size_bits = 23 },
503};
504
505#define SERVICE_REC_FIELD(field) \
506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
507 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
508 .field_name = "sa_service_rec:" #field
509
510static const struct ib_field service_rec_table[] = {
511 { SERVICE_REC_FIELD(id),
512 .offset_words = 0,
513 .offset_bits = 0,
514 .size_bits = 64 },
515 { SERVICE_REC_FIELD(gid),
516 .offset_words = 2,
517 .offset_bits = 0,
518 .size_bits = 128 },
519 { SERVICE_REC_FIELD(pkey),
520 .offset_words = 6,
521 .offset_bits = 0,
522 .size_bits = 16 },
523 { SERVICE_REC_FIELD(lease),
524 .offset_words = 7,
525 .offset_bits = 0,
526 .size_bits = 32 },
527 { SERVICE_REC_FIELD(key),
528 .offset_words = 8,
529 .offset_bits = 0,
530 .size_bits = 128 },
531 { SERVICE_REC_FIELD(name),
532 .offset_words = 12,
533 .offset_bits = 0,
534 .size_bits = 64*8 },
535 { SERVICE_REC_FIELD(data8),
536 .offset_words = 28,
537 .offset_bits = 0,
538 .size_bits = 16*8 },
539 { SERVICE_REC_FIELD(data16),
540 .offset_words = 32,
541 .offset_bits = 0,
542 .size_bits = 8*16 },
543 { SERVICE_REC_FIELD(data32),
544 .offset_words = 36,
545 .offset_bits = 0,
546 .size_bits = 4*32 },
547 { SERVICE_REC_FIELD(data64),
548 .offset_words = 40,
549 .offset_bits = 0,
550 .size_bits = 2*64 },
551};
552
553#define CLASSPORTINFO_REC_FIELD(field) \
554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
555 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
556 .field_name = "ib_class_port_info:" #field
557
558static const struct ib_field ib_classport_info_rec_table[] = {
559 { CLASSPORTINFO_REC_FIELD(base_version),
560 .offset_words = 0,
561 .offset_bits = 0,
562 .size_bits = 8 },
563 { CLASSPORTINFO_REC_FIELD(class_version),
564 .offset_words = 0,
565 .offset_bits = 8,
566 .size_bits = 8 },
567 { CLASSPORTINFO_REC_FIELD(capability_mask),
568 .offset_words = 0,
569 .offset_bits = 16,
570 .size_bits = 16 },
571 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
572 .offset_words = 1,
573 .offset_bits = 0,
574 .size_bits = 32 },
575 { CLASSPORTINFO_REC_FIELD(redirect_gid),
576 .offset_words = 2,
577 .offset_bits = 0,
578 .size_bits = 128 },
579 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
580 .offset_words = 6,
581 .offset_bits = 0,
582 .size_bits = 32 },
583 { CLASSPORTINFO_REC_FIELD(redirect_lid),
584 .offset_words = 7,
585 .offset_bits = 0,
586 .size_bits = 16 },
587 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
588 .offset_words = 7,
589 .offset_bits = 16,
590 .size_bits = 16 },
591
592 { CLASSPORTINFO_REC_FIELD(redirect_qp),
593 .offset_words = 8,
594 .offset_bits = 0,
595 .size_bits = 32 },
596 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
597 .offset_words = 9,
598 .offset_bits = 0,
599 .size_bits = 32 },
600
601 { CLASSPORTINFO_REC_FIELD(trap_gid),
602 .offset_words = 10,
603 .offset_bits = 0,
604 .size_bits = 128 },
605 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
606 .offset_words = 14,
607 .offset_bits = 0,
608 .size_bits = 32 },
609
610 { CLASSPORTINFO_REC_FIELD(trap_lid),
611 .offset_words = 15,
612 .offset_bits = 0,
613 .size_bits = 16 },
614 { CLASSPORTINFO_REC_FIELD(trap_pkey),
615 .offset_words = 15,
616 .offset_bits = 16,
617 .size_bits = 16 },
618
619 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
620 .offset_words = 16,
621 .offset_bits = 0,
622 .size_bits = 32 },
623 { CLASSPORTINFO_REC_FIELD(trap_qkey),
624 .offset_words = 17,
625 .offset_bits = 0,
626 .size_bits = 32 },
627};
628
629#define OPA_CLASSPORTINFO_REC_FIELD(field) \
630 .struct_offset_bytes =\
631 offsetof(struct opa_class_port_info, field), \
632 .struct_size_bytes = \
633 sizeof((struct opa_class_port_info *)0)->field, \
634 .field_name = "opa_class_port_info:" #field
635
636static const struct ib_field opa_classport_info_rec_table[] = {
637 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
638 .offset_words = 0,
639 .offset_bits = 0,
640 .size_bits = 8 },
641 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
642 .offset_words = 0,
643 .offset_bits = 8,
644 .size_bits = 8 },
645 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
646 .offset_words = 0,
647 .offset_bits = 16,
648 .size_bits = 16 },
649 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
650 .offset_words = 1,
651 .offset_bits = 0,
652 .size_bits = 32 },
653 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
654 .offset_words = 2,
655 .offset_bits = 0,
656 .size_bits = 128 },
657 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
658 .offset_words = 6,
659 .offset_bits = 0,
660 .size_bits = 32 },
661 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
662 .offset_words = 7,
663 .offset_bits = 0,
664 .size_bits = 32 },
665 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
666 .offset_words = 8,
667 .offset_bits = 0,
668 .size_bits = 32 },
669 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
670 .offset_words = 9,
671 .offset_bits = 0,
672 .size_bits = 32 },
673 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
674 .offset_words = 10,
675 .offset_bits = 0,
676 .size_bits = 128 },
677 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
678 .offset_words = 14,
679 .offset_bits = 0,
680 .size_bits = 32 },
681 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
682 .offset_words = 15,
683 .offset_bits = 0,
684 .size_bits = 32 },
685 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
686 .offset_words = 16,
687 .offset_bits = 0,
688 .size_bits = 32 },
689 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
690 .offset_words = 17,
691 .offset_bits = 0,
692 .size_bits = 32 },
693 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
694 .offset_words = 18,
695 .offset_bits = 0,
696 .size_bits = 16 },
697 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
698 .offset_words = 18,
699 .offset_bits = 16,
700 .size_bits = 16 },
701 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
702 .offset_words = 19,
703 .offset_bits = 0,
704 .size_bits = 8 },
705 { RESERVED,
706 .offset_words = 19,
707 .offset_bits = 8,
708 .size_bits = 24 },
709};
710
711#define GUIDINFO_REC_FIELD(field) \
712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
713 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
714 .field_name = "sa_guidinfo_rec:" #field
715
716static const struct ib_field guidinfo_rec_table[] = {
717 { GUIDINFO_REC_FIELD(lid),
718 .offset_words = 0,
719 .offset_bits = 0,
720 .size_bits = 16 },
721 { GUIDINFO_REC_FIELD(block_num),
722 .offset_words = 0,
723 .offset_bits = 16,
724 .size_bits = 8 },
725 { GUIDINFO_REC_FIELD(res1),
726 .offset_words = 0,
727 .offset_bits = 24,
728 .size_bits = 8 },
729 { GUIDINFO_REC_FIELD(res2),
730 .offset_words = 1,
731 .offset_bits = 0,
732 .size_bits = 32 },
733 { GUIDINFO_REC_FIELD(guid_info_list),
734 .offset_words = 2,
735 .offset_bits = 0,
736 .size_bits = 512 },
737};
738
739static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
740{
741 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
742}
743
744static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
745{
746 return (query->flags & IB_SA_CANCEL);
747}
748
749static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
750 struct ib_sa_query *query)
751{
752 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
753 struct ib_sa_mad *mad = query->mad_buf->mad;
754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
755 u16 val16;
756 u64 val64;
757 struct rdma_ls_resolve_header *header;
758
759 query->mad_buf->context[1] = NULL;
760
761 /* Construct the family header first */
762 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
763 memcpy(header->device_name, query->port->agent->device->name,
764 LS_DEVICE_NAME_MAX);
765 header->port_num = query->port->port_num;
766
767 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
768 sa_rec->reversible != 0)
769 query->path_use = LS_RESOLVE_PATH_USE_GMP;
770 else
771 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
772 header->path_use = query->path_use;
773
774 /* Now build the attributes */
775 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
776 val64 = be64_to_cpu(sa_rec->service_id);
777 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
778 sizeof(val64), &val64);
779 }
780 if (comp_mask & IB_SA_PATH_REC_DGID)
781 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
782 sizeof(sa_rec->dgid), &sa_rec->dgid);
783 if (comp_mask & IB_SA_PATH_REC_SGID)
784 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
785 sizeof(sa_rec->sgid), &sa_rec->sgid);
786 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
787 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
788 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
789
790 if (comp_mask & IB_SA_PATH_REC_PKEY) {
791 val16 = be16_to_cpu(sa_rec->pkey);
792 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
793 sizeof(val16), &val16);
794 }
795 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
796 val16 = be16_to_cpu(sa_rec->qos_class);
797 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
798 sizeof(val16), &val16);
799 }
800}
801
802static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
803{
804 int len = 0;
805
806 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
807 len += nla_total_size(sizeof(u64));
808 if (comp_mask & IB_SA_PATH_REC_DGID)
809 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
810 if (comp_mask & IB_SA_PATH_REC_SGID)
811 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
812 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
813 len += nla_total_size(sizeof(u8));
814 if (comp_mask & IB_SA_PATH_REC_PKEY)
815 len += nla_total_size(sizeof(u16));
816 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
817 len += nla_total_size(sizeof(u16));
818
819 /*
820 * Make sure that at least some of the required comp_mask bits are
821 * set.
822 */
823 if (WARN_ON(len == 0))
824 return len;
825
826 /* Add the family header */
827 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
828
829 return len;
830}
831
832static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
833{
834 struct sk_buff *skb = NULL;
835 struct nlmsghdr *nlh;
836 void *data;
837 int ret = 0;
838 struct ib_sa_mad *mad;
839 int len;
840
841 mad = query->mad_buf->mad;
842 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
843 if (len <= 0)
844 return -EMSGSIZE;
845
846 skb = nlmsg_new(len, gfp_mask);
847 if (!skb)
848 return -ENOMEM;
849
850 /* Put nlmsg header only for now */
851 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
852 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
853 if (!data) {
854 nlmsg_free(skb);
855 return -EMSGSIZE;
856 }
857
858 /* Add attributes */
859 ib_nl_set_path_rec_attrs(skb, query);
860
861 /* Repair the nlmsg header length */
862 nlmsg_end(skb, nlh);
863
864 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
865 if (!ret)
866 ret = len;
867 else
868 ret = 0;
869
870 return ret;
871}
872
873static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
874{
875 unsigned long flags;
876 unsigned long delay;
877 int ret;
878
879 INIT_LIST_HEAD(&query->list);
880 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
881
882 /* Put the request on the list first.*/
883 spin_lock_irqsave(&ib_nl_request_lock, flags);
884 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
885 query->timeout = delay + jiffies;
886 list_add_tail(&query->list, &ib_nl_request_list);
887 /* Start the timeout if this is the only request */
888 if (ib_nl_request_list.next == &query->list)
889 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
890 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
891
892 ret = ib_nl_send_msg(query, gfp_mask);
893 if (ret <= 0) {
894 ret = -EIO;
895 /* Remove the request */
896 spin_lock_irqsave(&ib_nl_request_lock, flags);
897 list_del(&query->list);
898 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
899 } else {
900 ret = 0;
901 }
902
903 return ret;
904}
905
906static int ib_nl_cancel_request(struct ib_sa_query *query)
907{
908 unsigned long flags;
909 struct ib_sa_query *wait_query;
910 int found = 0;
911
912 spin_lock_irqsave(&ib_nl_request_lock, flags);
913 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
914 /* Let the timeout to take care of the callback */
915 if (query == wait_query) {
916 query->flags |= IB_SA_CANCEL;
917 query->timeout = jiffies;
918 list_move(&query->list, &ib_nl_request_list);
919 found = 1;
920 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
921 break;
922 }
923 }
924 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
925
926 return found;
927}
928
929static void send_handler(struct ib_mad_agent *agent,
930 struct ib_mad_send_wc *mad_send_wc);
931
932static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
933 const struct nlmsghdr *nlh)
934{
935 struct ib_mad_send_wc mad_send_wc;
936 struct ib_sa_mad *mad = NULL;
937 const struct nlattr *head, *curr;
938 struct ib_path_rec_data *rec;
939 int len, rem;
940 u32 mask = 0;
941 int status = -EIO;
942
943 if (query->callback) {
944 head = (const struct nlattr *) nlmsg_data(nlh);
945 len = nlmsg_len(nlh);
946 switch (query->path_use) {
947 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
948 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
949 break;
950
951 case LS_RESOLVE_PATH_USE_ALL:
952 case LS_RESOLVE_PATH_USE_GMP:
953 default:
954 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
955 IB_PATH_BIDIRECTIONAL;
956 break;
957 }
958 nla_for_each_attr(curr, head, len, rem) {
959 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
960 rec = nla_data(curr);
961 /*
962 * Get the first one. In the future, we may
963 * need to get up to 6 pathrecords.
964 */
965 if ((rec->flags & mask) == mask) {
966 mad = query->mad_buf->mad;
967 mad->mad_hdr.method |=
968 IB_MGMT_METHOD_RESP;
969 memcpy(mad->data, rec->path_rec,
970 sizeof(rec->path_rec));
971 status = 0;
972 break;
973 }
974 }
975 }
976 query->callback(query, status, mad);
977 }
978
979 mad_send_wc.send_buf = query->mad_buf;
980 mad_send_wc.status = IB_WC_SUCCESS;
981 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
982}
983
984static void ib_nl_request_timeout(struct work_struct *work)
985{
986 unsigned long flags;
987 struct ib_sa_query *query;
988 unsigned long delay;
989 struct ib_mad_send_wc mad_send_wc;
990 int ret;
991
992 spin_lock_irqsave(&ib_nl_request_lock, flags);
993 while (!list_empty(&ib_nl_request_list)) {
994 query = list_entry(ib_nl_request_list.next,
995 struct ib_sa_query, list);
996
997 if (time_after(query->timeout, jiffies)) {
998 delay = query->timeout - jiffies;
999 if ((long)delay <= 0)
1000 delay = 1;
1001 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
1002 break;
1003 }
1004
1005 list_del(&query->list);
1006 ib_sa_disable_local_svc(query);
1007 /* Hold the lock to protect against query cancellation */
1008 if (ib_sa_query_cancelled(query))
1009 ret = -1;
1010 else
1011 ret = ib_post_send_mad(query->mad_buf, NULL);
1012 if (ret) {
1013 mad_send_wc.send_buf = query->mad_buf;
1014 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1015 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1016 send_handler(query->port->agent, &mad_send_wc);
1017 spin_lock_irqsave(&ib_nl_request_lock, flags);
1018 }
1019 }
1020 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1021}
1022
1023int ib_nl_handle_set_timeout(struct sk_buff *skb,
1024 struct netlink_callback *cb)
1025{
1026 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
1027 int timeout, delta, abs_delta;
1028 const struct nlattr *attr;
1029 unsigned long flags;
1030 struct ib_sa_query *query;
1031 long delay = 0;
1032 struct nlattr *tb[LS_NLA_TYPE_MAX];
1033 int ret;
1034
1035 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1036 !(NETLINK_CB(skb).sk) ||
1037 !netlink_capable(skb, CAP_NET_ADMIN))
1038 return -EPERM;
1039
1040 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1041 nlmsg_len(nlh), ib_nl_policy, NULL);
1042 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1043 if (ret || !attr)
1044 goto settimeout_out;
1045
1046 timeout = *(int *) nla_data(attr);
1047 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1048 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1049 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1050 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1051
1052 delta = timeout - sa_local_svc_timeout_ms;
1053 if (delta < 0)
1054 abs_delta = -delta;
1055 else
1056 abs_delta = delta;
1057
1058 if (delta != 0) {
1059 spin_lock_irqsave(&ib_nl_request_lock, flags);
1060 sa_local_svc_timeout_ms = timeout;
1061 list_for_each_entry(query, &ib_nl_request_list, list) {
1062 if (delta < 0 && abs_delta > query->timeout)
1063 query->timeout = 0;
1064 else
1065 query->timeout += delta;
1066
1067 /* Get the new delay from the first entry */
1068 if (!delay) {
1069 delay = query->timeout - jiffies;
1070 if (delay <= 0)
1071 delay = 1;
1072 }
1073 }
1074 if (delay)
1075 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1076 (unsigned long)delay);
1077 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1078 }
1079
1080settimeout_out:
1081 return skb->len;
1082}
1083
1084static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1085{
1086 struct nlattr *tb[LS_NLA_TYPE_MAX];
1087 int ret;
1088
1089 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1090 return 0;
1091
1092 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1093 nlmsg_len(nlh), ib_nl_policy, NULL);
1094 if (ret)
1095 return 0;
1096
1097 return 1;
1098}
1099
1100int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1101 struct netlink_callback *cb)
1102{
1103 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
1104 unsigned long flags;
1105 struct ib_sa_query *query;
1106 struct ib_mad_send_buf *send_buf;
1107 struct ib_mad_send_wc mad_send_wc;
1108 int found = 0;
1109 int ret;
1110
1111 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1112 !(NETLINK_CB(skb).sk) ||
1113 !netlink_capable(skb, CAP_NET_ADMIN))
1114 return -EPERM;
1115
1116 spin_lock_irqsave(&ib_nl_request_lock, flags);
1117 list_for_each_entry(query, &ib_nl_request_list, list) {
1118 /*
1119 * If the query is cancelled, let the timeout routine
1120 * take care of it.
1121 */
1122 if (nlh->nlmsg_seq == query->seq) {
1123 found = !ib_sa_query_cancelled(query);
1124 if (found)
1125 list_del(&query->list);
1126 break;
1127 }
1128 }
1129
1130 if (!found) {
1131 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1132 goto resp_out;
1133 }
1134
1135 send_buf = query->mad_buf;
1136
1137 if (!ib_nl_is_good_resolve_resp(nlh)) {
1138 /* if the result is a failure, send out the packet via IB */
1139 ib_sa_disable_local_svc(query);
1140 ret = ib_post_send_mad(query->mad_buf, NULL);
1141 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1142 if (ret) {
1143 mad_send_wc.send_buf = send_buf;
1144 mad_send_wc.status = IB_WC_GENERAL_ERR;
1145 send_handler(query->port->agent, &mad_send_wc);
1146 }
1147 } else {
1148 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1149 ib_nl_process_good_resolve_rsp(query, nlh);
1150 }
1151
1152resp_out:
1153 return skb->len;
1154}
1155
1156static void free_sm_ah(struct kref *kref)
1157{
1158 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1159
1160 rdma_destroy_ah(sm_ah->ah);
1161 kfree(sm_ah);
1162}
1163
1164void ib_sa_register_client(struct ib_sa_client *client)
1165{
1166 atomic_set(&client->users, 1);
1167 init_completion(&client->comp);
1168}
1169EXPORT_SYMBOL(ib_sa_register_client);
1170
1171void ib_sa_unregister_client(struct ib_sa_client *client)
1172{
1173 ib_sa_client_put(client);
1174 wait_for_completion(&client->comp);
1175}
1176EXPORT_SYMBOL(ib_sa_unregister_client);
1177
1178/**
1179 * ib_sa_cancel_query - try to cancel an SA query
1180 * @id:ID of query to cancel
1181 * @query:query pointer to cancel
1182 *
1183 * Try to cancel an SA query. If the id and query don't match up or
1184 * the query has already completed, nothing is done. Otherwise the
1185 * query is canceled and will complete with a status of -EINTR.
1186 */
1187void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1188{
1189 unsigned long flags;
1190 struct ib_mad_agent *agent;
1191 struct ib_mad_send_buf *mad_buf;
1192
1193 spin_lock_irqsave(&idr_lock, flags);
1194 if (idr_find(&query_idr, id) != query) {
1195 spin_unlock_irqrestore(&idr_lock, flags);
1196 return;
1197 }
1198 agent = query->port->agent;
1199 mad_buf = query->mad_buf;
1200 spin_unlock_irqrestore(&idr_lock, flags);
1201
1202 /*
1203 * If the query is still on the netlink request list, schedule
1204 * it to be cancelled by the timeout routine. Otherwise, it has been
1205 * sent to the MAD layer and has to be cancelled from there.
1206 */
1207 if (!ib_nl_cancel_request(query))
1208 ib_cancel_mad(agent, mad_buf);
1209}
1210EXPORT_SYMBOL(ib_sa_cancel_query);
1211
1212static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1213{
1214 struct ib_sa_device *sa_dev;
1215 struct ib_sa_port *port;
1216 unsigned long flags;
1217 u8 src_path_mask;
1218
1219 sa_dev = ib_get_client_data(device, &sa_client);
1220 if (!sa_dev)
1221 return 0x7f;
1222
1223 port = &sa_dev->port[port_num - sa_dev->start_port];
1224 spin_lock_irqsave(&port->ah_lock, flags);
1225 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1226 spin_unlock_irqrestore(&port->ah_lock, flags);
1227
1228 return src_path_mask;
1229}
1230
1231int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1232 struct sa_path_rec *rec,
1233 struct rdma_ah_attr *ah_attr)
1234{
1235 int ret;
1236 u16 gid_index;
1237 int use_roce;
1238 struct net_device *ndev = NULL;
1239
1240 memset(ah_attr, 0, sizeof *ah_attr);
1241 ah_attr->type = rdma_ah_find_type(device, port_num);
1242
1243 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1244 rdma_ah_set_sl(ah_attr, rec->sl);
1245 rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) &
1246 get_src_path_mask(device, port_num));
1247 rdma_ah_set_port_num(ah_attr, port_num);
1248 rdma_ah_set_static_rate(ah_attr, rec->rate);
1249 use_roce = rdma_cap_eth_ah(device, port_num);
1250
1251 if (use_roce) {
1252 struct net_device *idev;
1253 struct net_device *resolved_dev;
1254 struct rdma_dev_addr dev_addr = {
1255 .bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
1256 sa_path_get_ifindex(rec) : 0),
1257 .net = sa_path_get_ndev(rec) ?
1258 sa_path_get_ndev(rec) :
1259 &init_net
1260 };
1261 union {
1262 struct sockaddr _sockaddr;
1263 struct sockaddr_in _sockaddr_in;
1264 struct sockaddr_in6 _sockaddr_in6;
1265 } sgid_addr, dgid_addr;
1266
1267 if (!device->get_netdev)
1268 return -EOPNOTSUPP;
1269
1270 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1271 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1272
1273 /* validate the route */
1274 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1275 &dgid_addr._sockaddr, &dev_addr);
1276 if (ret)
1277 return ret;
1278
1279 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1280 dev_addr.network == RDMA_NETWORK_IPV6) &&
1281 rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
1282 return -EINVAL;
1283
1284 idev = device->get_netdev(device, port_num);
1285 if (!idev)
1286 return -ENODEV;
1287
1288 resolved_dev = dev_get_by_index(dev_addr.net,
1289 dev_addr.bound_dev_if);
1290 if (resolved_dev->flags & IFF_LOOPBACK) {
1291 dev_put(resolved_dev);
1292 resolved_dev = idev;
1293 dev_hold(resolved_dev);
1294 }
1295 ndev = ib_get_ndev_from_path(rec);
1296 rcu_read_lock();
1297 if ((ndev && ndev != resolved_dev) ||
1298 (resolved_dev != idev &&
1299 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1300 ret = -EHOSTUNREACH;
1301 rcu_read_unlock();
1302 dev_put(idev);
1303 dev_put(resolved_dev);
1304 if (ret) {
1305 if (ndev)
1306 dev_put(ndev);
1307 return ret;
1308 }
1309 }
1310
1311 if (rec->hop_limit > 0 || use_roce) {
1312 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1313
1314 ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
1315 port_num, ndev, &gid_index);
1316 if (ret) {
1317 if (ndev)
1318 dev_put(ndev);
1319 return ret;
1320 }
1321
1322 rdma_ah_set_grh(ah_attr, &rec->dgid,
1323 be32_to_cpu(rec->flow_label),
1324 gid_index, rec->hop_limit,
1325 rec->traffic_class);
1326 if (ndev)
1327 dev_put(ndev);
1328 }
1329
1330 if (use_roce) {
1331 u8 *dmac = sa_path_get_dmac(rec);
1332
1333 if (!dmac)
1334 return -EINVAL;
1335 memcpy(ah_attr->roce.dmac, dmac, ETH_ALEN);
1336 }
1337
1338 return 0;
1339}
1340EXPORT_SYMBOL(ib_init_ah_from_path);
1341
1342static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1343{
1344 unsigned long flags;
1345
1346 spin_lock_irqsave(&query->port->ah_lock, flags);
1347 if (!query->port->sm_ah) {
1348 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1349 return -EAGAIN;
1350 }
1351 kref_get(&query->port->sm_ah->ref);
1352 query->sm_ah = query->port->sm_ah;
1353 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1354
1355 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1356 query->sm_ah->pkey_index,
1357 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1358 gfp_mask,
1359 ((query->flags & IB_SA_QUERY_OPA) ?
1360 OPA_MGMT_BASE_VERSION :
1361 IB_MGMT_BASE_VERSION));
1362 if (IS_ERR(query->mad_buf)) {
1363 kref_put(&query->sm_ah->ref, free_sm_ah);
1364 return -ENOMEM;
1365 }
1366
1367 query->mad_buf->ah = query->sm_ah->ah;
1368
1369 return 0;
1370}
1371
1372static void free_mad(struct ib_sa_query *query)
1373{
1374 ib_free_send_mad(query->mad_buf);
1375 kref_put(&query->sm_ah->ref, free_sm_ah);
1376}
1377
1378static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1379{
1380 struct ib_sa_mad *mad = query->mad_buf->mad;
1381 unsigned long flags;
1382
1383 memset(mad, 0, sizeof *mad);
1384
1385 if (query->flags & IB_SA_QUERY_OPA) {
1386 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1387 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1388 } else {
1389 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1390 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1391 }
1392 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1393 spin_lock_irqsave(&tid_lock, flags);
1394 mad->mad_hdr.tid =
1395 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1396 spin_unlock_irqrestore(&tid_lock, flags);
1397}
1398
1399static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1400{
1401 bool preload = gfpflags_allow_blocking(gfp_mask);
1402 unsigned long flags;
1403 int ret, id;
1404
1405 if (preload)
1406 idr_preload(gfp_mask);
1407 spin_lock_irqsave(&idr_lock, flags);
1408
1409 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1410
1411 spin_unlock_irqrestore(&idr_lock, flags);
1412 if (preload)
1413 idr_preload_end();
1414 if (id < 0)
1415 return id;
1416
1417 query->mad_buf->timeout_ms = timeout_ms;
1418 query->mad_buf->context[0] = query;
1419 query->id = id;
1420
1421 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1422 (!(query->flags & IB_SA_QUERY_OPA))) {
1423 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1424 if (!ib_nl_make_request(query, gfp_mask))
1425 return id;
1426 }
1427 ib_sa_disable_local_svc(query);
1428 }
1429
1430 ret = ib_post_send_mad(query->mad_buf, NULL);
1431 if (ret) {
1432 spin_lock_irqsave(&idr_lock, flags);
1433 idr_remove(&query_idr, id);
1434 spin_unlock_irqrestore(&idr_lock, flags);
1435 }
1436
1437 /*
1438 * It's not safe to dereference query any more, because the
1439 * send may already have completed and freed the query in
1440 * another context.
1441 */
1442 return ret ? ret : id;
1443}
1444
1445void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1446{
1447 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1448}
1449EXPORT_SYMBOL(ib_sa_unpack_path);
1450
1451void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1452{
1453 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1454}
1455EXPORT_SYMBOL(ib_sa_pack_path);
1456
1457static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1458 struct ib_device *device,
1459 u8 port_num)
1460{
1461 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1462 struct ib_sa_port *port;
1463 unsigned long flags;
1464 bool ret = false;
1465
1466 if (!sa_dev)
1467 return ret;
1468
1469 port = &sa_dev->port[port_num - sa_dev->start_port];
1470 spin_lock_irqsave(&port->classport_lock, flags);
1471 if (!port->classport_info.valid)
1472 goto ret;
1473
1474 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1475 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1476 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1477ret:
1478 spin_unlock_irqrestore(&port->classport_lock, flags);
1479 return ret;
1480}
1481
1482enum opa_pr_supported {
1483 PR_NOT_SUPPORTED,
1484 PR_OPA_SUPPORTED,
1485 PR_IB_SUPPORTED
1486};
1487
1488/**
1489 * Check if current PR query can be an OPA query.
1490 * Retuns PR_NOT_SUPPORTED if a path record query is not
1491 * possible, PR_OPA_SUPPORTED if an OPA path record query
1492 * is possible and PR_IB_SUPPORTED if an IB path record
1493 * query is possible.
1494 */
1495static int opa_pr_query_possible(struct ib_sa_client *client,
1496 struct ib_device *device,
1497 u8 port_num,
1498 struct sa_path_rec *rec)
1499{
1500 struct ib_port_attr port_attr;
1501
1502 if (ib_query_port(device, port_num, &port_attr))
1503 return PR_NOT_SUPPORTED;
1504
1505 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1506 return PR_OPA_SUPPORTED;
1507
1508 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1509 return PR_NOT_SUPPORTED;
1510 else
1511 return PR_IB_SUPPORTED;
1512}
1513
1514static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1515 int status,
1516 struct ib_sa_mad *mad)
1517{
1518 struct ib_sa_path_query *query =
1519 container_of(sa_query, struct ib_sa_path_query, sa_query);
1520
1521 if (mad) {
1522 struct sa_path_rec rec;
1523
1524 if (sa_query->flags & IB_SA_QUERY_OPA) {
1525 ib_unpack(opa_path_rec_table,
1526 ARRAY_SIZE(opa_path_rec_table),
1527 mad->data, &rec);
1528 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1529 query->callback(status, &rec, query->context);
1530 } else {
1531 ib_unpack(path_rec_table,
1532 ARRAY_SIZE(path_rec_table),
1533 mad->data, &rec);
1534 rec.rec_type = SA_PATH_REC_TYPE_IB;
1535 sa_path_set_ndev(&rec, NULL);
1536 sa_path_set_ifindex(&rec, 0);
1537 sa_path_set_dmac_zero(&rec);
1538
1539 if (query->conv_pr) {
1540 struct sa_path_rec opa;
1541
1542 memset(&opa, 0, sizeof(struct sa_path_rec));
1543 sa_convert_path_ib_to_opa(&opa, &rec);
1544 query->callback(status, &opa, query->context);
1545 } else {
1546 query->callback(status, &rec, query->context);
1547 }
1548 }
1549 } else
1550 query->callback(status, NULL, query->context);
1551}
1552
1553static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1554{
1555 struct ib_sa_path_query *query =
1556 container_of(sa_query, struct ib_sa_path_query, sa_query);
1557
1558 kfree(query->conv_pr);
1559 kfree(query);
1560}
1561
1562/**
1563 * ib_sa_path_rec_get - Start a Path get query
1564 * @client:SA client
1565 * @device:device to send query on
1566 * @port_num: port number to send query on
1567 * @rec:Path Record to send in query
1568 * @comp_mask:component mask to send in query
1569 * @timeout_ms:time to wait for response
1570 * @gfp_mask:GFP mask to use for internal allocations
1571 * @callback:function called when query completes, times out or is
1572 * canceled
1573 * @context:opaque user context passed to callback
1574 * @sa_query:query context, used to cancel query
1575 *
1576 * Send a Path Record Get query to the SA to look up a path. The
1577 * callback function will be called when the query completes (or
1578 * fails); status is 0 for a successful response, -EINTR if the query
1579 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1580 * occurred sending the query. The resp parameter of the callback is
1581 * only valid if status is 0.
1582 *
1583 * If the return value of ib_sa_path_rec_get() is negative, it is an
1584 * error code. Otherwise it is a query ID that can be used to cancel
1585 * the query.
1586 */
1587int ib_sa_path_rec_get(struct ib_sa_client *client,
1588 struct ib_device *device, u8 port_num,
1589 struct sa_path_rec *rec,
1590 ib_sa_comp_mask comp_mask,
1591 int timeout_ms, gfp_t gfp_mask,
1592 void (*callback)(int status,
1593 struct sa_path_rec *resp,
1594 void *context),
1595 void *context,
1596 struct ib_sa_query **sa_query)
1597{
1598 struct ib_sa_path_query *query;
1599 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1600 struct ib_sa_port *port;
1601 struct ib_mad_agent *agent;
1602 struct ib_sa_mad *mad;
1603 enum opa_pr_supported status;
1604 int ret;
1605
1606 if (!sa_dev)
1607 return -ENODEV;
1608
1609 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1610 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1611 return -EINVAL;
1612
1613 port = &sa_dev->port[port_num - sa_dev->start_port];
1614 agent = port->agent;
1615
1616 query = kzalloc(sizeof(*query), gfp_mask);
1617 if (!query)
1618 return -ENOMEM;
1619
1620 query->sa_query.port = port;
1621 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1622 status = opa_pr_query_possible(client, device, port_num, rec);
1623 if (status == PR_NOT_SUPPORTED) {
1624 ret = -EINVAL;
1625 goto err1;
1626 } else if (status == PR_OPA_SUPPORTED) {
1627 query->sa_query.flags |= IB_SA_QUERY_OPA;
1628 } else {
1629 query->conv_pr =
1630 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1631 if (!query->conv_pr) {
1632 ret = -ENOMEM;
1633 goto err1;
1634 }
1635 }
1636 }
1637
1638 ret = alloc_mad(&query->sa_query, gfp_mask);
1639 if (ret)
1640 goto err2;
1641
1642 ib_sa_client_get(client);
1643 query->sa_query.client = client;
1644 query->callback = callback;
1645 query->context = context;
1646
1647 mad = query->sa_query.mad_buf->mad;
1648 init_mad(&query->sa_query, agent);
1649
1650 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1651 query->sa_query.release = ib_sa_path_rec_release;
1652 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1653 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1654 mad->sa_hdr.comp_mask = comp_mask;
1655
1656 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1657 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1658 rec, mad->data);
1659 } else if (query->conv_pr) {
1660 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1661 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1662 query->conv_pr, mad->data);
1663 } else {
1664 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1665 rec, mad->data);
1666 }
1667
1668 *sa_query = &query->sa_query;
1669
1670 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1671 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1672 query->conv_pr : rec;
1673
1674 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1675 if (ret < 0)
1676 goto err3;
1677
1678 return ret;
1679
1680err3:
1681 *sa_query = NULL;
1682 ib_sa_client_put(query->sa_query.client);
1683 free_mad(&query->sa_query);
1684err2:
1685 kfree(query->conv_pr);
1686err1:
1687 kfree(query);
1688 return ret;
1689}
1690EXPORT_SYMBOL(ib_sa_path_rec_get);
1691
1692static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1693 int status,
1694 struct ib_sa_mad *mad)
1695{
1696 struct ib_sa_service_query *query =
1697 container_of(sa_query, struct ib_sa_service_query, sa_query);
1698
1699 if (mad) {
1700 struct ib_sa_service_rec rec;
1701
1702 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1703 mad->data, &rec);
1704 query->callback(status, &rec, query->context);
1705 } else
1706 query->callback(status, NULL, query->context);
1707}
1708
1709static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1710{
1711 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1712}
1713
1714/**
1715 * ib_sa_service_rec_query - Start Service Record operation
1716 * @client:SA client
1717 * @device:device to send request on
1718 * @port_num: port number to send request on
1719 * @method:SA method - should be get, set, or delete
1720 * @rec:Service Record to send in request
1721 * @comp_mask:component mask to send in request
1722 * @timeout_ms:time to wait for response
1723 * @gfp_mask:GFP mask to use for internal allocations
1724 * @callback:function called when request completes, times out or is
1725 * canceled
1726 * @context:opaque user context passed to callback
1727 * @sa_query:request context, used to cancel request
1728 *
1729 * Send a Service Record set/get/delete to the SA to register,
1730 * unregister or query a service record.
1731 * The callback function will be called when the request completes (or
1732 * fails); status is 0 for a successful response, -EINTR if the query
1733 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1734 * occurred sending the query. The resp parameter of the callback is
1735 * only valid if status is 0.
1736 *
1737 * If the return value of ib_sa_service_rec_query() is negative, it is an
1738 * error code. Otherwise it is a request ID that can be used to cancel
1739 * the query.
1740 */
1741int ib_sa_service_rec_query(struct ib_sa_client *client,
1742 struct ib_device *device, u8 port_num, u8 method,
1743 struct ib_sa_service_rec *rec,
1744 ib_sa_comp_mask comp_mask,
1745 int timeout_ms, gfp_t gfp_mask,
1746 void (*callback)(int status,
1747 struct ib_sa_service_rec *resp,
1748 void *context),
1749 void *context,
1750 struct ib_sa_query **sa_query)
1751{
1752 struct ib_sa_service_query *query;
1753 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1754 struct ib_sa_port *port;
1755 struct ib_mad_agent *agent;
1756 struct ib_sa_mad *mad;
1757 int ret;
1758
1759 if (!sa_dev)
1760 return -ENODEV;
1761
1762 port = &sa_dev->port[port_num - sa_dev->start_port];
1763 agent = port->agent;
1764
1765 if (method != IB_MGMT_METHOD_GET &&
1766 method != IB_MGMT_METHOD_SET &&
1767 method != IB_SA_METHOD_DELETE)
1768 return -EINVAL;
1769
1770 query = kzalloc(sizeof(*query), gfp_mask);
1771 if (!query)
1772 return -ENOMEM;
1773
1774 query->sa_query.port = port;
1775 ret = alloc_mad(&query->sa_query, gfp_mask);
1776 if (ret)
1777 goto err1;
1778
1779 ib_sa_client_get(client);
1780 query->sa_query.client = client;
1781 query->callback = callback;
1782 query->context = context;
1783
1784 mad = query->sa_query.mad_buf->mad;
1785 init_mad(&query->sa_query, agent);
1786
1787 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1788 query->sa_query.release = ib_sa_service_rec_release;
1789 mad->mad_hdr.method = method;
1790 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1791 mad->sa_hdr.comp_mask = comp_mask;
1792
1793 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1794 rec, mad->data);
1795
1796 *sa_query = &query->sa_query;
1797
1798 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1799 if (ret < 0)
1800 goto err2;
1801
1802 return ret;
1803
1804err2:
1805 *sa_query = NULL;
1806 ib_sa_client_put(query->sa_query.client);
1807 free_mad(&query->sa_query);
1808
1809err1:
1810 kfree(query);
1811 return ret;
1812}
1813EXPORT_SYMBOL(ib_sa_service_rec_query);
1814
1815static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1816 int status,
1817 struct ib_sa_mad *mad)
1818{
1819 struct ib_sa_mcmember_query *query =
1820 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1821
1822 if (mad) {
1823 struct ib_sa_mcmember_rec rec;
1824
1825 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1826 mad->data, &rec);
1827 query->callback(status, &rec, query->context);
1828 } else
1829 query->callback(status, NULL, query->context);
1830}
1831
1832static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1833{
1834 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1835}
1836
1837int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1838 struct ib_device *device, u8 port_num,
1839 u8 method,
1840 struct ib_sa_mcmember_rec *rec,
1841 ib_sa_comp_mask comp_mask,
1842 int timeout_ms, gfp_t gfp_mask,
1843 void (*callback)(int status,
1844 struct ib_sa_mcmember_rec *resp,
1845 void *context),
1846 void *context,
1847 struct ib_sa_query **sa_query)
1848{
1849 struct ib_sa_mcmember_query *query;
1850 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1851 struct ib_sa_port *port;
1852 struct ib_mad_agent *agent;
1853 struct ib_sa_mad *mad;
1854 int ret;
1855
1856 if (!sa_dev)
1857 return -ENODEV;
1858
1859 port = &sa_dev->port[port_num - sa_dev->start_port];
1860 agent = port->agent;
1861
1862 query = kzalloc(sizeof(*query), gfp_mask);
1863 if (!query)
1864 return -ENOMEM;
1865
1866 query->sa_query.port = port;
1867 ret = alloc_mad(&query->sa_query, gfp_mask);
1868 if (ret)
1869 goto err1;
1870
1871 ib_sa_client_get(client);
1872 query->sa_query.client = client;
1873 query->callback = callback;
1874 query->context = context;
1875
1876 mad = query->sa_query.mad_buf->mad;
1877 init_mad(&query->sa_query, agent);
1878
1879 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1880 query->sa_query.release = ib_sa_mcmember_rec_release;
1881 mad->mad_hdr.method = method;
1882 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1883 mad->sa_hdr.comp_mask = comp_mask;
1884
1885 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1886 rec, mad->data);
1887
1888 *sa_query = &query->sa_query;
1889
1890 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1891 if (ret < 0)
1892 goto err2;
1893
1894 return ret;
1895
1896err2:
1897 *sa_query = NULL;
1898 ib_sa_client_put(query->sa_query.client);
1899 free_mad(&query->sa_query);
1900
1901err1:
1902 kfree(query);
1903 return ret;
1904}
1905
1906/* Support GuidInfoRecord */
1907static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1908 int status,
1909 struct ib_sa_mad *mad)
1910{
1911 struct ib_sa_guidinfo_query *query =
1912 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1913
1914 if (mad) {
1915 struct ib_sa_guidinfo_rec rec;
1916
1917 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1918 mad->data, &rec);
1919 query->callback(status, &rec, query->context);
1920 } else
1921 query->callback(status, NULL, query->context);
1922}
1923
1924static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1925{
1926 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1927}
1928
1929int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1930 struct ib_device *device, u8 port_num,
1931 struct ib_sa_guidinfo_rec *rec,
1932 ib_sa_comp_mask comp_mask, u8 method,
1933 int timeout_ms, gfp_t gfp_mask,
1934 void (*callback)(int status,
1935 struct ib_sa_guidinfo_rec *resp,
1936 void *context),
1937 void *context,
1938 struct ib_sa_query **sa_query)
1939{
1940 struct ib_sa_guidinfo_query *query;
1941 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1942 struct ib_sa_port *port;
1943 struct ib_mad_agent *agent;
1944 struct ib_sa_mad *mad;
1945 int ret;
1946
1947 if (!sa_dev)
1948 return -ENODEV;
1949
1950 if (method != IB_MGMT_METHOD_GET &&
1951 method != IB_MGMT_METHOD_SET &&
1952 method != IB_SA_METHOD_DELETE) {
1953 return -EINVAL;
1954 }
1955
1956 port = &sa_dev->port[port_num - sa_dev->start_port];
1957 agent = port->agent;
1958
1959 query = kzalloc(sizeof(*query), gfp_mask);
1960 if (!query)
1961 return -ENOMEM;
1962
1963 query->sa_query.port = port;
1964 ret = alloc_mad(&query->sa_query, gfp_mask);
1965 if (ret)
1966 goto err1;
1967
1968 ib_sa_client_get(client);
1969 query->sa_query.client = client;
1970 query->callback = callback;
1971 query->context = context;
1972
1973 mad = query->sa_query.mad_buf->mad;
1974 init_mad(&query->sa_query, agent);
1975
1976 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1977 query->sa_query.release = ib_sa_guidinfo_rec_release;
1978
1979 mad->mad_hdr.method = method;
1980 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1981 mad->sa_hdr.comp_mask = comp_mask;
1982
1983 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1984 mad->data);
1985
1986 *sa_query = &query->sa_query;
1987
1988 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1989 if (ret < 0)
1990 goto err2;
1991
1992 return ret;
1993
1994err2:
1995 *sa_query = NULL;
1996 ib_sa_client_put(query->sa_query.client);
1997 free_mad(&query->sa_query);
1998
1999err1:
2000 kfree(query);
2001 return ret;
2002}
2003EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
2004
2005bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
2006 struct ib_device *device,
2007 u8 port_num)
2008{
2009 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
2010 struct ib_sa_port *port;
2011 bool ret = false;
2012 unsigned long flags;
2013
2014 if (!sa_dev)
2015 return ret;
2016
2017 port = &sa_dev->port[port_num - sa_dev->start_port];
2018
2019 spin_lock_irqsave(&port->classport_lock, flags);
2020 if ((port->classport_info.valid) &&
2021 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
2022 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
2023 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
2024 spin_unlock_irqrestore(&port->classport_lock, flags);
2025 return ret;
2026}
2027EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
2028
2029struct ib_classport_info_context {
2030 struct completion done;
2031 struct ib_sa_query *sa_query;
2032};
2033
2034static void ib_classportinfo_cb(void *context)
2035{
2036 struct ib_classport_info_context *cb_ctx = context;
2037
2038 complete(&cb_ctx->done);
2039}
2040
2041static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
2042 int status,
2043 struct ib_sa_mad *mad)
2044{
2045 unsigned long flags;
2046 struct ib_sa_classport_info_query *query =
2047 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2048 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2049
2050 if (mad) {
2051 if (sa_query->flags & IB_SA_QUERY_OPA) {
2052 struct opa_class_port_info rec;
2053
2054 ib_unpack(opa_classport_info_rec_table,
2055 ARRAY_SIZE(opa_classport_info_rec_table),
2056 mad->data, &rec);
2057
2058 spin_lock_irqsave(&sa_query->port->classport_lock,
2059 flags);
2060 if (!status && !info->valid) {
2061 memcpy(&info->data.opa, &rec,
2062 sizeof(info->data.opa));
2063
2064 info->valid = true;
2065 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2066 }
2067 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2068 flags);
2069
2070 } else {
2071 struct ib_class_port_info rec;
2072
2073 ib_unpack(ib_classport_info_rec_table,
2074 ARRAY_SIZE(ib_classport_info_rec_table),
2075 mad->data, &rec);
2076
2077 spin_lock_irqsave(&sa_query->port->classport_lock,
2078 flags);
2079 if (!status && !info->valid) {
2080 memcpy(&info->data.ib, &rec,
2081 sizeof(info->data.ib));
2082
2083 info->valid = true;
2084 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2085 }
2086 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2087 flags);
2088 }
2089 }
2090 query->callback(query->context);
2091}
2092
2093static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2094{
2095 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2096 sa_query));
2097}
2098
2099static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2100 int timeout_ms,
2101 void (*callback)(void *context),
2102 void *context,
2103 struct ib_sa_query **sa_query)
2104{
2105 struct ib_mad_agent *agent;
2106 struct ib_sa_classport_info_query *query;
2107 struct ib_sa_mad *mad;
2108 gfp_t gfp_mask = GFP_KERNEL;
2109 int ret;
2110
2111 agent = port->agent;
2112
2113 query = kzalloc(sizeof(*query), gfp_mask);
2114 if (!query)
2115 return -ENOMEM;
2116
2117 query->sa_query.port = port;
2118 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2119 port->port_num) ?
2120 IB_SA_QUERY_OPA : 0;
2121 ret = alloc_mad(&query->sa_query, gfp_mask);
2122 if (ret)
2123 goto err_free;
2124
2125 query->callback = callback;
2126 query->context = context;
2127
2128 mad = query->sa_query.mad_buf->mad;
2129 init_mad(&query->sa_query, agent);
2130
2131 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2132 query->sa_query.release = ib_sa_classport_info_rec_release;
2133 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2134 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2135 mad->sa_hdr.comp_mask = 0;
2136 *sa_query = &query->sa_query;
2137
2138 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2139 if (ret < 0)
2140 goto err_free_mad;
2141
2142 return ret;
2143
2144err_free_mad:
2145 *sa_query = NULL;
2146 free_mad(&query->sa_query);
2147
2148err_free:
2149 kfree(query);
2150 return ret;
2151}
2152
2153static void update_ib_cpi(struct work_struct *work)
2154{
2155 struct ib_sa_port *port =
2156 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2157 struct ib_classport_info_context *cb_context;
2158 unsigned long flags;
2159 int ret;
2160
2161 /* If the classport info is valid, nothing
2162 * to do here.
2163 */
2164 spin_lock_irqsave(&port->classport_lock, flags);
2165 if (port->classport_info.valid) {
2166 spin_unlock_irqrestore(&port->classport_lock, flags);
2167 return;
2168 }
2169 spin_unlock_irqrestore(&port->classport_lock, flags);
2170
2171 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2172 if (!cb_context)
2173 goto err_nomem;
2174
2175 init_completion(&cb_context->done);
2176
2177 ret = ib_sa_classport_info_rec_query(port, 3000,
2178 ib_classportinfo_cb, cb_context,
2179 &cb_context->sa_query);
2180 if (ret < 0)
2181 goto free_cb_err;
2182 wait_for_completion(&cb_context->done);
2183free_cb_err:
2184 kfree(cb_context);
2185 spin_lock_irqsave(&port->classport_lock, flags);
2186
2187 /* If the classport info is still not valid, the query should have
2188 * failed for some reason. Retry issuing the query
2189 */
2190 if (!port->classport_info.valid) {
2191 port->classport_info.retry_cnt++;
2192 if (port->classport_info.retry_cnt <=
2193 IB_SA_CPI_MAX_RETRY_CNT) {
2194 unsigned long delay =
2195 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2196
2197 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2198 }
2199 }
2200 spin_unlock_irqrestore(&port->classport_lock, flags);
2201
2202err_nomem:
2203 return;
2204}
2205
2206static void send_handler(struct ib_mad_agent *agent,
2207 struct ib_mad_send_wc *mad_send_wc)
2208{
2209 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2210 unsigned long flags;
2211
2212 if (query->callback)
2213 switch (mad_send_wc->status) {
2214 case IB_WC_SUCCESS:
2215 /* No callback -- already got recv */
2216 break;
2217 case IB_WC_RESP_TIMEOUT_ERR:
2218 query->callback(query, -ETIMEDOUT, NULL);
2219 break;
2220 case IB_WC_WR_FLUSH_ERR:
2221 query->callback(query, -EINTR, NULL);
2222 break;
2223 default:
2224 query->callback(query, -EIO, NULL);
2225 break;
2226 }
2227
2228 spin_lock_irqsave(&idr_lock, flags);
2229 idr_remove(&query_idr, query->id);
2230 spin_unlock_irqrestore(&idr_lock, flags);
2231
2232 free_mad(query);
2233 if (query->client)
2234 ib_sa_client_put(query->client);
2235 query->release(query);
2236}
2237
2238static void recv_handler(struct ib_mad_agent *mad_agent,
2239 struct ib_mad_send_buf *send_buf,
2240 struct ib_mad_recv_wc *mad_recv_wc)
2241{
2242 struct ib_sa_query *query;
2243
2244 if (!send_buf)
2245 return;
2246
2247 query = send_buf->context[0];
2248 if (query->callback) {
2249 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2250 query->callback(query,
2251 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2252 -EINVAL : 0,
2253 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2254 else
2255 query->callback(query, -EIO, NULL);
2256 }
2257
2258 ib_free_recv_mad(mad_recv_wc);
2259}
2260
2261static void update_sm_ah(struct work_struct *work)
2262{
2263 struct ib_sa_port *port =
2264 container_of(work, struct ib_sa_port, update_task);
2265 struct ib_sa_sm_ah *new_ah;
2266 struct ib_port_attr port_attr;
2267 struct rdma_ah_attr ah_attr;
2268
2269 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2270 pr_warn("Couldn't query port\n");
2271 return;
2272 }
2273
2274 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2275 if (!new_ah)
2276 return;
2277
2278 kref_init(&new_ah->ref);
2279 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2280
2281 new_ah->pkey_index = 0;
2282 if (ib_find_pkey(port->agent->device, port->port_num,
2283 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2284 pr_err("Couldn't find index for default PKey\n");
2285
2286 memset(&ah_attr, 0, sizeof(ah_attr));
2287 ah_attr.type = rdma_ah_find_type(port->agent->device,
2288 port->port_num);
2289 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2290 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2291 rdma_ah_set_port_num(&ah_attr, port->port_num);
2292 if (port_attr.grh_required) {
2293 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2294
2295 rdma_ah_set_subnet_prefix(&ah_attr,
2296 cpu_to_be64(port_attr.subnet_prefix));
2297 rdma_ah_set_interface_id(&ah_attr,
2298 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2299 }
2300
2301 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
2302 if (IS_ERR(new_ah->ah)) {
2303 pr_warn("Couldn't create new SM AH\n");
2304 kfree(new_ah);
2305 return;
2306 }
2307
2308 spin_lock_irq(&port->ah_lock);
2309 if (port->sm_ah)
2310 kref_put(&port->sm_ah->ref, free_sm_ah);
2311 port->sm_ah = new_ah;
2312 spin_unlock_irq(&port->ah_lock);
2313}
2314
2315static void ib_sa_event(struct ib_event_handler *handler,
2316 struct ib_event *event)
2317{
2318 if (event->event == IB_EVENT_PORT_ERR ||
2319 event->event == IB_EVENT_PORT_ACTIVE ||
2320 event->event == IB_EVENT_LID_CHANGE ||
2321 event->event == IB_EVENT_PKEY_CHANGE ||
2322 event->event == IB_EVENT_SM_CHANGE ||
2323 event->event == IB_EVENT_CLIENT_REREGISTER) {
2324 unsigned long flags;
2325 struct ib_sa_device *sa_dev =
2326 container_of(handler, typeof(*sa_dev), event_handler);
2327 u8 port_num = event->element.port_num - sa_dev->start_port;
2328 struct ib_sa_port *port = &sa_dev->port[port_num];
2329
2330 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2331 return;
2332
2333 spin_lock_irqsave(&port->ah_lock, flags);
2334 if (port->sm_ah)
2335 kref_put(&port->sm_ah->ref, free_sm_ah);
2336 port->sm_ah = NULL;
2337 spin_unlock_irqrestore(&port->ah_lock, flags);
2338
2339 if (event->event == IB_EVENT_SM_CHANGE ||
2340 event->event == IB_EVENT_CLIENT_REREGISTER ||
2341 event->event == IB_EVENT_LID_CHANGE ||
2342 event->event == IB_EVENT_PORT_ACTIVE) {
2343 unsigned long delay =
2344 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2345
2346 spin_lock_irqsave(&port->classport_lock, flags);
2347 port->classport_info.valid = false;
2348 port->classport_info.retry_cnt = 0;
2349 spin_unlock_irqrestore(&port->classport_lock, flags);
2350 queue_delayed_work(ib_wq,
2351 &port->ib_cpi_work, delay);
2352 }
2353 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2354 }
2355}
2356
2357static void ib_sa_add_one(struct ib_device *device)
2358{
2359 struct ib_sa_device *sa_dev;
2360 int s, e, i;
2361 int count = 0;
2362
2363 s = rdma_start_port(device);
2364 e = rdma_end_port(device);
2365
2366 sa_dev = kzalloc(sizeof *sa_dev +
2367 (e - s + 1) * sizeof (struct ib_sa_port),
2368 GFP_KERNEL);
2369 if (!sa_dev)
2370 return;
2371
2372 sa_dev->start_port = s;
2373 sa_dev->end_port = e;
2374
2375 for (i = 0; i <= e - s; ++i) {
2376 spin_lock_init(&sa_dev->port[i].ah_lock);
2377 if (!rdma_cap_ib_sa(device, i + 1))
2378 continue;
2379
2380 sa_dev->port[i].sm_ah = NULL;
2381 sa_dev->port[i].port_num = i + s;
2382
2383 spin_lock_init(&sa_dev->port[i].classport_lock);
2384 sa_dev->port[i].classport_info.valid = false;
2385
2386 sa_dev->port[i].agent =
2387 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2388 NULL, 0, send_handler,
2389 recv_handler, sa_dev, 0);
2390 if (IS_ERR(sa_dev->port[i].agent))
2391 goto err;
2392
2393 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2394 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2395 update_ib_cpi);
2396
2397 count++;
2398 }
2399
2400 if (!count)
2401 goto free;
2402
2403 ib_set_client_data(device, &sa_client, sa_dev);
2404
2405 /*
2406 * We register our event handler after everything is set up,
2407 * and then update our cached info after the event handler is
2408 * registered to avoid any problems if a port changes state
2409 * during our initialization.
2410 */
2411
2412 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2413 if (ib_register_event_handler(&sa_dev->event_handler))
2414 goto err;
2415
2416 for (i = 0; i <= e - s; ++i) {
2417 if (rdma_cap_ib_sa(device, i + 1))
2418 update_sm_ah(&sa_dev->port[i].update_task);
2419 }
2420
2421 return;
2422
2423err:
2424 while (--i >= 0) {
2425 if (rdma_cap_ib_sa(device, i + 1))
2426 ib_unregister_mad_agent(sa_dev->port[i].agent);
2427 }
2428free:
2429 kfree(sa_dev);
2430 return;
2431}
2432
2433static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2434{
2435 struct ib_sa_device *sa_dev = client_data;
2436 int i;
2437
2438 if (!sa_dev)
2439 return;
2440
2441 ib_unregister_event_handler(&sa_dev->event_handler);
2442 flush_workqueue(ib_wq);
2443
2444 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2445 if (rdma_cap_ib_sa(device, i + 1)) {
2446 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2447 ib_unregister_mad_agent(sa_dev->port[i].agent);
2448 if (sa_dev->port[i].sm_ah)
2449 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2450 }
2451
2452 }
2453
2454 kfree(sa_dev);
2455}
2456
2457int ib_sa_init(void)
2458{
2459 int ret;
2460
2461 get_random_bytes(&tid, sizeof tid);
2462
2463 atomic_set(&ib_nl_sa_request_seq, 0);
2464
2465 ret = ib_register_client(&sa_client);
2466 if (ret) {
2467 pr_err("Couldn't register ib_sa client\n");
2468 goto err1;
2469 }
2470
2471 ret = mcast_init();
2472 if (ret) {
2473 pr_err("Couldn't initialize multicast handling\n");
2474 goto err2;
2475 }
2476
2477 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2478 if (!ib_nl_wq) {
2479 ret = -ENOMEM;
2480 goto err3;
2481 }
2482
2483 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2484
2485 return 0;
2486
2487err3:
2488 mcast_cleanup();
2489err2:
2490 ib_unregister_client(&sa_client);
2491err1:
2492 return ret;
2493}
2494
2495void ib_sa_cleanup(void)
2496{
2497 cancel_delayed_work(&ib_nl_timed_work);
2498 flush_workqueue(ib_nl_wq);
2499 destroy_workqueue(ib_nl_wq);
2500 mcast_cleanup();
2501 ib_unregister_client(&sa_client);
2502 idr_destroy(&query_idr);
2503}