2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59 static int sa_local_svc_timeout_ms
= IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT
;
69 struct ib_mad_agent
*agent
;
70 struct ib_sa_sm_ah
*sm_ah
;
71 struct work_struct update_task
;
77 int start_port
, end_port
;
78 struct ib_event_handler event_handler
;
79 struct ib_sa_port port
[0];
83 void (*callback
)(struct ib_sa_query
*, int, struct ib_sa_mad
*);
84 void (*release
)(struct ib_sa_query
*);
85 struct ib_sa_client
*client
;
86 struct ib_sa_port
*port
;
87 struct ib_mad_send_buf
*mad_buf
;
88 struct ib_sa_sm_ah
*sm_ah
;
91 struct list_head list
; /* Local svc request list */
92 u32 seq
; /* Local svc request sequence number */
93 unsigned long timeout
; /* Local svc timeout */
94 u8 path_use
; /* How will the pathrecord be used */
97 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
98 #define IB_SA_CANCEL 0x00000002
100 struct ib_sa_service_query
{
101 void (*callback
)(int, struct ib_sa_service_rec
*, void *);
103 struct ib_sa_query sa_query
;
106 struct ib_sa_path_query
{
107 void (*callback
)(int, struct ib_sa_path_rec
*, void *);
109 struct ib_sa_query sa_query
;
112 struct ib_sa_guidinfo_query
{
113 void (*callback
)(int, struct ib_sa_guidinfo_rec
*, void *);
115 struct ib_sa_query sa_query
;
118 struct ib_sa_classport_info_query
{
119 void (*callback
)(int, struct ib_class_port_info
*, void *);
121 struct ib_sa_query sa_query
;
124 struct ib_sa_mcmember_query
{
125 void (*callback
)(int, struct ib_sa_mcmember_rec
*, void *);
127 struct ib_sa_query sa_query
;
130 static LIST_HEAD(ib_nl_request_list
);
131 static DEFINE_SPINLOCK(ib_nl_request_lock
);
132 static atomic_t ib_nl_sa_request_seq
;
133 static struct workqueue_struct
*ib_nl_wq
;
134 static struct delayed_work ib_nl_timed_work
;
135 static const struct nla_policy ib_nl_policy
[LS_NLA_TYPE_MAX
] = {
136 [LS_NLA_TYPE_PATH_RECORD
] = {.type
= NLA_BINARY
,
137 .len
= sizeof(struct ib_path_rec_data
)},
138 [LS_NLA_TYPE_TIMEOUT
] = {.type
= NLA_U32
},
139 [LS_NLA_TYPE_SERVICE_ID
] = {.type
= NLA_U64
},
140 [LS_NLA_TYPE_DGID
] = {.type
= NLA_BINARY
,
141 .len
= sizeof(struct rdma_nla_ls_gid
)},
142 [LS_NLA_TYPE_SGID
] = {.type
= NLA_BINARY
,
143 .len
= sizeof(struct rdma_nla_ls_gid
)},
144 [LS_NLA_TYPE_TCLASS
] = {.type
= NLA_U8
},
145 [LS_NLA_TYPE_PKEY
] = {.type
= NLA_U16
},
146 [LS_NLA_TYPE_QOS_CLASS
] = {.type
= NLA_U16
},
150 static void ib_sa_add_one(struct ib_device
*device
);
151 static void ib_sa_remove_one(struct ib_device
*device
, void *client_data
);
153 static struct ib_client sa_client
= {
155 .add
= ib_sa_add_one
,
156 .remove
= ib_sa_remove_one
159 static DEFINE_SPINLOCK(idr_lock
);
160 static DEFINE_IDR(query_idr
);
162 static DEFINE_SPINLOCK(tid_lock
);
165 #define PATH_REC_FIELD(field) \
166 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
167 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
168 .field_name = "sa_path_rec:" #field
170 static const struct ib_field path_rec_table
[] = {
171 { PATH_REC_FIELD(service_id
),
175 { PATH_REC_FIELD(dgid
),
179 { PATH_REC_FIELD(sgid
),
183 { PATH_REC_FIELD(dlid
),
187 { PATH_REC_FIELD(slid
),
191 { PATH_REC_FIELD(raw_traffic
),
199 { PATH_REC_FIELD(flow_label
),
203 { PATH_REC_FIELD(hop_limit
),
207 { PATH_REC_FIELD(traffic_class
),
211 { PATH_REC_FIELD(reversible
),
215 { PATH_REC_FIELD(numb_path
),
219 { PATH_REC_FIELD(pkey
),
223 { PATH_REC_FIELD(qos_class
),
227 { PATH_REC_FIELD(sl
),
231 { PATH_REC_FIELD(mtu_selector
),
235 { PATH_REC_FIELD(mtu
),
239 { PATH_REC_FIELD(rate_selector
),
243 { PATH_REC_FIELD(rate
),
247 { PATH_REC_FIELD(packet_life_time_selector
),
251 { PATH_REC_FIELD(packet_life_time
),
255 { PATH_REC_FIELD(preference
),
265 #define MCMEMBER_REC_FIELD(field) \
266 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
267 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
268 .field_name = "sa_mcmember_rec:" #field
270 static const struct ib_field mcmember_rec_table
[] = {
271 { MCMEMBER_REC_FIELD(mgid
),
275 { MCMEMBER_REC_FIELD(port_gid
),
279 { MCMEMBER_REC_FIELD(qkey
),
283 { MCMEMBER_REC_FIELD(mlid
),
287 { MCMEMBER_REC_FIELD(mtu_selector
),
291 { MCMEMBER_REC_FIELD(mtu
),
295 { MCMEMBER_REC_FIELD(traffic_class
),
299 { MCMEMBER_REC_FIELD(pkey
),
303 { MCMEMBER_REC_FIELD(rate_selector
),
307 { MCMEMBER_REC_FIELD(rate
),
311 { MCMEMBER_REC_FIELD(packet_life_time_selector
),
315 { MCMEMBER_REC_FIELD(packet_life_time
),
319 { MCMEMBER_REC_FIELD(sl
),
323 { MCMEMBER_REC_FIELD(flow_label
),
327 { MCMEMBER_REC_FIELD(hop_limit
),
331 { MCMEMBER_REC_FIELD(scope
),
335 { MCMEMBER_REC_FIELD(join_state
),
339 { MCMEMBER_REC_FIELD(proxy_join
),
349 #define SERVICE_REC_FIELD(field) \
350 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
351 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
352 .field_name = "sa_service_rec:" #field
354 static const struct ib_field service_rec_table
[] = {
355 { SERVICE_REC_FIELD(id
),
359 { SERVICE_REC_FIELD(gid
),
363 { SERVICE_REC_FIELD(pkey
),
367 { SERVICE_REC_FIELD(lease
),
371 { SERVICE_REC_FIELD(key
),
375 { SERVICE_REC_FIELD(name
),
379 { SERVICE_REC_FIELD(data8
),
383 { SERVICE_REC_FIELD(data16
),
387 { SERVICE_REC_FIELD(data32
),
391 { SERVICE_REC_FIELD(data64
),
397 #define CLASSPORTINFO_REC_FIELD(field) \
398 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
399 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
400 .field_name = "ib_class_port_info:" #field
402 static const struct ib_field classport_info_rec_table
[] = {
403 { CLASSPORTINFO_REC_FIELD(base_version
),
407 { CLASSPORTINFO_REC_FIELD(class_version
),
411 { CLASSPORTINFO_REC_FIELD(capability_mask
),
415 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time
),
419 { CLASSPORTINFO_REC_FIELD(redirect_gid
),
423 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl
),
427 { CLASSPORTINFO_REC_FIELD(redirect_lid
),
431 { CLASSPORTINFO_REC_FIELD(redirect_pkey
),
436 { CLASSPORTINFO_REC_FIELD(redirect_qp
),
440 { CLASSPORTINFO_REC_FIELD(redirect_qkey
),
445 { CLASSPORTINFO_REC_FIELD(trap_gid
),
449 { CLASSPORTINFO_REC_FIELD(trap_tcslfl
),
454 { CLASSPORTINFO_REC_FIELD(trap_lid
),
458 { CLASSPORTINFO_REC_FIELD(trap_pkey
),
463 { CLASSPORTINFO_REC_FIELD(trap_hlqp
),
467 { CLASSPORTINFO_REC_FIELD(trap_qkey
),
473 #define GUIDINFO_REC_FIELD(field) \
474 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
475 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
476 .field_name = "sa_guidinfo_rec:" #field
478 static const struct ib_field guidinfo_rec_table
[] = {
479 { GUIDINFO_REC_FIELD(lid
),
483 { GUIDINFO_REC_FIELD(block_num
),
487 { GUIDINFO_REC_FIELD(res1
),
491 { GUIDINFO_REC_FIELD(res2
),
495 { GUIDINFO_REC_FIELD(guid_info_list
),
501 static inline void ib_sa_disable_local_svc(struct ib_sa_query
*query
)
503 query
->flags
&= ~IB_SA_ENABLE_LOCAL_SERVICE
;
506 static inline int ib_sa_query_cancelled(struct ib_sa_query
*query
)
508 return (query
->flags
& IB_SA_CANCEL
);
511 static void ib_nl_set_path_rec_attrs(struct sk_buff
*skb
,
512 struct ib_sa_query
*query
)
514 struct ib_sa_path_rec
*sa_rec
= query
->mad_buf
->context
[1];
515 struct ib_sa_mad
*mad
= query
->mad_buf
->mad
;
516 ib_sa_comp_mask comp_mask
= mad
->sa_hdr
.comp_mask
;
519 struct rdma_ls_resolve_header
*header
;
521 query
->mad_buf
->context
[1] = NULL
;
523 /* Construct the family header first */
524 header
= (struct rdma_ls_resolve_header
*)
525 skb_put(skb
, NLMSG_ALIGN(sizeof(*header
)));
526 memcpy(header
->device_name
, query
->port
->agent
->device
->name
,
528 header
->port_num
= query
->port
->port_num
;
530 if ((comp_mask
& IB_SA_PATH_REC_REVERSIBLE
) &&
531 sa_rec
->reversible
!= 0)
532 query
->path_use
= LS_RESOLVE_PATH_USE_GMP
;
534 query
->path_use
= LS_RESOLVE_PATH_USE_UNIDIRECTIONAL
;
535 header
->path_use
= query
->path_use
;
537 /* Now build the attributes */
538 if (comp_mask
& IB_SA_PATH_REC_SERVICE_ID
) {
539 val64
= be64_to_cpu(sa_rec
->service_id
);
540 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_SERVICE_ID
,
541 sizeof(val64
), &val64
);
543 if (comp_mask
& IB_SA_PATH_REC_DGID
)
544 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_DGID
,
545 sizeof(sa_rec
->dgid
), &sa_rec
->dgid
);
546 if (comp_mask
& IB_SA_PATH_REC_SGID
)
547 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_SGID
,
548 sizeof(sa_rec
->sgid
), &sa_rec
->sgid
);
549 if (comp_mask
& IB_SA_PATH_REC_TRAFFIC_CLASS
)
550 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_TCLASS
,
551 sizeof(sa_rec
->traffic_class
), &sa_rec
->traffic_class
);
553 if (comp_mask
& IB_SA_PATH_REC_PKEY
) {
554 val16
= be16_to_cpu(sa_rec
->pkey
);
555 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_PKEY
,
556 sizeof(val16
), &val16
);
558 if (comp_mask
& IB_SA_PATH_REC_QOS_CLASS
) {
559 val16
= be16_to_cpu(sa_rec
->qos_class
);
560 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_QOS_CLASS
,
561 sizeof(val16
), &val16
);
565 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask
)
569 if (comp_mask
& IB_SA_PATH_REC_SERVICE_ID
)
570 len
+= nla_total_size(sizeof(u64
));
571 if (comp_mask
& IB_SA_PATH_REC_DGID
)
572 len
+= nla_total_size(sizeof(struct rdma_nla_ls_gid
));
573 if (comp_mask
& IB_SA_PATH_REC_SGID
)
574 len
+= nla_total_size(sizeof(struct rdma_nla_ls_gid
));
575 if (comp_mask
& IB_SA_PATH_REC_TRAFFIC_CLASS
)
576 len
+= nla_total_size(sizeof(u8
));
577 if (comp_mask
& IB_SA_PATH_REC_PKEY
)
578 len
+= nla_total_size(sizeof(u16
));
579 if (comp_mask
& IB_SA_PATH_REC_QOS_CLASS
)
580 len
+= nla_total_size(sizeof(u16
));
583 * Make sure that at least some of the required comp_mask bits are
586 if (WARN_ON(len
== 0))
589 /* Add the family header */
590 len
+= NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header
));
595 static int ib_nl_send_msg(struct ib_sa_query
*query
, gfp_t gfp_mask
)
597 struct sk_buff
*skb
= NULL
;
598 struct nlmsghdr
*nlh
;
601 struct ib_sa_mad
*mad
;
604 mad
= query
->mad_buf
->mad
;
605 len
= ib_nl_get_path_rec_attrs_len(mad
->sa_hdr
.comp_mask
);
609 skb
= nlmsg_new(len
, gfp_mask
);
613 /* Put nlmsg header only for now */
614 data
= ibnl_put_msg(skb
, &nlh
, query
->seq
, 0, RDMA_NL_LS
,
615 RDMA_NL_LS_OP_RESOLVE
, NLM_F_REQUEST
);
622 ib_nl_set_path_rec_attrs(skb
, query
);
624 /* Repair the nlmsg header length */
627 ret
= ibnl_multicast(skb
, nlh
, RDMA_NL_GROUP_LS
, gfp_mask
);
636 static int ib_nl_make_request(struct ib_sa_query
*query
, gfp_t gfp_mask
)
642 INIT_LIST_HEAD(&query
->list
);
643 query
->seq
= (u32
)atomic_inc_return(&ib_nl_sa_request_seq
);
645 /* Put the request on the list first.*/
646 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
647 delay
= msecs_to_jiffies(sa_local_svc_timeout_ms
);
648 query
->timeout
= delay
+ jiffies
;
649 list_add_tail(&query
->list
, &ib_nl_request_list
);
650 /* Start the timeout if this is the only request */
651 if (ib_nl_request_list
.next
== &query
->list
)
652 queue_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, delay
);
653 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
655 ret
= ib_nl_send_msg(query
, gfp_mask
);
658 /* Remove the request */
659 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
660 list_del(&query
->list
);
661 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
669 static int ib_nl_cancel_request(struct ib_sa_query
*query
)
672 struct ib_sa_query
*wait_query
;
675 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
676 list_for_each_entry(wait_query
, &ib_nl_request_list
, list
) {
677 /* Let the timeout to take care of the callback */
678 if (query
== wait_query
) {
679 query
->flags
|= IB_SA_CANCEL
;
680 query
->timeout
= jiffies
;
681 list_move(&query
->list
, &ib_nl_request_list
);
683 mod_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, 1);
687 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
692 static void send_handler(struct ib_mad_agent
*agent
,
693 struct ib_mad_send_wc
*mad_send_wc
);
695 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query
*query
,
696 const struct nlmsghdr
*nlh
)
698 struct ib_mad_send_wc mad_send_wc
;
699 struct ib_sa_mad
*mad
= NULL
;
700 const struct nlattr
*head
, *curr
;
701 struct ib_path_rec_data
*rec
;
706 if (query
->callback
) {
707 head
= (const struct nlattr
*) nlmsg_data(nlh
);
708 len
= nlmsg_len(nlh
);
709 switch (query
->path_use
) {
710 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL
:
711 mask
= IB_PATH_PRIMARY
| IB_PATH_OUTBOUND
;
714 case LS_RESOLVE_PATH_USE_ALL
:
715 case LS_RESOLVE_PATH_USE_GMP
:
717 mask
= IB_PATH_PRIMARY
| IB_PATH_GMP
|
718 IB_PATH_BIDIRECTIONAL
;
721 nla_for_each_attr(curr
, head
, len
, rem
) {
722 if (curr
->nla_type
== LS_NLA_TYPE_PATH_RECORD
) {
723 rec
= nla_data(curr
);
725 * Get the first one. In the future, we may
726 * need to get up to 6 pathrecords.
728 if ((rec
->flags
& mask
) == mask
) {
729 mad
= query
->mad_buf
->mad
;
730 mad
->mad_hdr
.method
|=
732 memcpy(mad
->data
, rec
->path_rec
,
733 sizeof(rec
->path_rec
));
739 query
->callback(query
, status
, mad
);
742 mad_send_wc
.send_buf
= query
->mad_buf
;
743 mad_send_wc
.status
= IB_WC_SUCCESS
;
744 send_handler(query
->mad_buf
->mad_agent
, &mad_send_wc
);
747 static void ib_nl_request_timeout(struct work_struct
*work
)
750 struct ib_sa_query
*query
;
752 struct ib_mad_send_wc mad_send_wc
;
755 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
756 while (!list_empty(&ib_nl_request_list
)) {
757 query
= list_entry(ib_nl_request_list
.next
,
758 struct ib_sa_query
, list
);
760 if (time_after(query
->timeout
, jiffies
)) {
761 delay
= query
->timeout
- jiffies
;
762 if ((long)delay
<= 0)
764 queue_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, delay
);
768 list_del(&query
->list
);
769 ib_sa_disable_local_svc(query
);
770 /* Hold the lock to protect against query cancellation */
771 if (ib_sa_query_cancelled(query
))
774 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
776 mad_send_wc
.send_buf
= query
->mad_buf
;
777 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
778 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
779 send_handler(query
->port
->agent
, &mad_send_wc
);
780 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
783 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
786 int ib_nl_handle_set_timeout(struct sk_buff
*skb
,
787 struct netlink_callback
*cb
)
789 const struct nlmsghdr
*nlh
= (struct nlmsghdr
*)cb
->nlh
;
790 int timeout
, delta
, abs_delta
;
791 const struct nlattr
*attr
;
793 struct ib_sa_query
*query
;
795 struct nlattr
*tb
[LS_NLA_TYPE_MAX
];
798 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
) ||
799 !(NETLINK_CB(skb
).sk
) ||
800 !netlink_capable(skb
, CAP_NET_ADMIN
))
803 ret
= nla_parse(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
804 nlmsg_len(nlh
), ib_nl_policy
);
805 attr
= (const struct nlattr
*)tb
[LS_NLA_TYPE_TIMEOUT
];
809 timeout
= *(int *) nla_data(attr
);
810 if (timeout
< IB_SA_LOCAL_SVC_TIMEOUT_MIN
)
811 timeout
= IB_SA_LOCAL_SVC_TIMEOUT_MIN
;
812 if (timeout
> IB_SA_LOCAL_SVC_TIMEOUT_MAX
)
813 timeout
= IB_SA_LOCAL_SVC_TIMEOUT_MAX
;
815 delta
= timeout
- sa_local_svc_timeout_ms
;
822 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
823 sa_local_svc_timeout_ms
= timeout
;
824 list_for_each_entry(query
, &ib_nl_request_list
, list
) {
825 if (delta
< 0 && abs_delta
> query
->timeout
)
828 query
->timeout
+= delta
;
830 /* Get the new delay from the first entry */
832 delay
= query
->timeout
- jiffies
;
838 mod_delayed_work(ib_nl_wq
, &ib_nl_timed_work
,
839 (unsigned long)delay
);
840 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
847 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr
*nlh
)
849 struct nlattr
*tb
[LS_NLA_TYPE_MAX
];
852 if (nlh
->nlmsg_flags
& RDMA_NL_LS_F_ERR
)
855 ret
= nla_parse(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
856 nlmsg_len(nlh
), ib_nl_policy
);
863 int ib_nl_handle_resolve_resp(struct sk_buff
*skb
,
864 struct netlink_callback
*cb
)
866 const struct nlmsghdr
*nlh
= (struct nlmsghdr
*)cb
->nlh
;
868 struct ib_sa_query
*query
;
869 struct ib_mad_send_buf
*send_buf
;
870 struct ib_mad_send_wc mad_send_wc
;
874 if ((nlh
->nlmsg_flags
& NLM_F_REQUEST
) ||
875 !(NETLINK_CB(skb
).sk
) ||
876 !netlink_capable(skb
, CAP_NET_ADMIN
))
879 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
880 list_for_each_entry(query
, &ib_nl_request_list
, list
) {
882 * If the query is cancelled, let the timeout routine
885 if (nlh
->nlmsg_seq
== query
->seq
) {
886 found
= !ib_sa_query_cancelled(query
);
888 list_del(&query
->list
);
894 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
898 send_buf
= query
->mad_buf
;
900 if (!ib_nl_is_good_resolve_resp(nlh
)) {
901 /* if the result is a failure, send out the packet via IB */
902 ib_sa_disable_local_svc(query
);
903 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
904 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
906 mad_send_wc
.send_buf
= send_buf
;
907 mad_send_wc
.status
= IB_WC_GENERAL_ERR
;
908 send_handler(query
->port
->agent
, &mad_send_wc
);
911 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
912 ib_nl_process_good_resolve_rsp(query
, nlh
);
919 static void free_sm_ah(struct kref
*kref
)
921 struct ib_sa_sm_ah
*sm_ah
= container_of(kref
, struct ib_sa_sm_ah
, ref
);
923 ib_destroy_ah(sm_ah
->ah
);
927 static void update_sm_ah(struct work_struct
*work
)
929 struct ib_sa_port
*port
=
930 container_of(work
, struct ib_sa_port
, update_task
);
931 struct ib_sa_sm_ah
*new_ah
;
932 struct ib_port_attr port_attr
;
933 struct ib_ah_attr ah_attr
;
935 if (ib_query_port(port
->agent
->device
, port
->port_num
, &port_attr
)) {
936 pr_warn("Couldn't query port\n");
940 new_ah
= kmalloc(sizeof *new_ah
, GFP_KERNEL
);
945 kref_init(&new_ah
->ref
);
946 new_ah
->src_path_mask
= (1 << port_attr
.lmc
) - 1;
948 new_ah
->pkey_index
= 0;
949 if (ib_find_pkey(port
->agent
->device
, port
->port_num
,
950 IB_DEFAULT_PKEY_FULL
, &new_ah
->pkey_index
))
951 pr_err("Couldn't find index for default PKey\n");
953 memset(&ah_attr
, 0, sizeof ah_attr
);
954 ah_attr
.dlid
= port_attr
.sm_lid
;
955 ah_attr
.sl
= port_attr
.sm_sl
;
956 ah_attr
.port_num
= port
->port_num
;
957 if (port_attr
.grh_required
) {
958 ah_attr
.ah_flags
= IB_AH_GRH
;
959 ah_attr
.grh
.dgid
.global
.subnet_prefix
= cpu_to_be64(port_attr
.subnet_prefix
);
960 ah_attr
.grh
.dgid
.global
.interface_id
= cpu_to_be64(IB_SA_WELL_KNOWN_GUID
);
963 new_ah
->ah
= ib_create_ah(port
->agent
->qp
->pd
, &ah_attr
);
964 if (IS_ERR(new_ah
->ah
)) {
965 pr_warn("Couldn't create new SM AH\n");
970 spin_lock_irq(&port
->ah_lock
);
972 kref_put(&port
->sm_ah
->ref
, free_sm_ah
);
973 port
->sm_ah
= new_ah
;
974 spin_unlock_irq(&port
->ah_lock
);
978 static void ib_sa_event(struct ib_event_handler
*handler
, struct ib_event
*event
)
980 if (event
->event
== IB_EVENT_PORT_ERR
||
981 event
->event
== IB_EVENT_PORT_ACTIVE
||
982 event
->event
== IB_EVENT_LID_CHANGE
||
983 event
->event
== IB_EVENT_PKEY_CHANGE
||
984 event
->event
== IB_EVENT_SM_CHANGE
||
985 event
->event
== IB_EVENT_CLIENT_REREGISTER
) {
987 struct ib_sa_device
*sa_dev
=
988 container_of(handler
, typeof(*sa_dev
), event_handler
);
989 struct ib_sa_port
*port
=
990 &sa_dev
->port
[event
->element
.port_num
- sa_dev
->start_port
];
992 if (!rdma_cap_ib_sa(handler
->device
, port
->port_num
))
995 spin_lock_irqsave(&port
->ah_lock
, flags
);
997 kref_put(&port
->sm_ah
->ref
, free_sm_ah
);
999 spin_unlock_irqrestore(&port
->ah_lock
, flags
);
1001 queue_work(ib_wq
, &sa_dev
->port
[event
->element
.port_num
-
1002 sa_dev
->start_port
].update_task
);
1006 void ib_sa_register_client(struct ib_sa_client
*client
)
1008 atomic_set(&client
->users
, 1);
1009 init_completion(&client
->comp
);
1011 EXPORT_SYMBOL(ib_sa_register_client
);
1013 void ib_sa_unregister_client(struct ib_sa_client
*client
)
1015 ib_sa_client_put(client
);
1016 wait_for_completion(&client
->comp
);
1018 EXPORT_SYMBOL(ib_sa_unregister_client
);
1021 * ib_sa_cancel_query - try to cancel an SA query
1022 * @id:ID of query to cancel
1023 * @query:query pointer to cancel
1025 * Try to cancel an SA query. If the id and query don't match up or
1026 * the query has already completed, nothing is done. Otherwise the
1027 * query is canceled and will complete with a status of -EINTR.
1029 void ib_sa_cancel_query(int id
, struct ib_sa_query
*query
)
1031 unsigned long flags
;
1032 struct ib_mad_agent
*agent
;
1033 struct ib_mad_send_buf
*mad_buf
;
1035 spin_lock_irqsave(&idr_lock
, flags
);
1036 if (idr_find(&query_idr
, id
) != query
) {
1037 spin_unlock_irqrestore(&idr_lock
, flags
);
1040 agent
= query
->port
->agent
;
1041 mad_buf
= query
->mad_buf
;
1042 spin_unlock_irqrestore(&idr_lock
, flags
);
1045 * If the query is still on the netlink request list, schedule
1046 * it to be cancelled by the timeout routine. Otherwise, it has been
1047 * sent to the MAD layer and has to be cancelled from there.
1049 if (!ib_nl_cancel_request(query
))
1050 ib_cancel_mad(agent
, mad_buf
);
1052 EXPORT_SYMBOL(ib_sa_cancel_query
);
1054 static u8
get_src_path_mask(struct ib_device
*device
, u8 port_num
)
1056 struct ib_sa_device
*sa_dev
;
1057 struct ib_sa_port
*port
;
1058 unsigned long flags
;
1061 sa_dev
= ib_get_client_data(device
, &sa_client
);
1065 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1066 spin_lock_irqsave(&port
->ah_lock
, flags
);
1067 src_path_mask
= port
->sm_ah
? port
->sm_ah
->src_path_mask
: 0x7f;
1068 spin_unlock_irqrestore(&port
->ah_lock
, flags
);
1070 return src_path_mask
;
1073 int ib_init_ah_from_path(struct ib_device
*device
, u8 port_num
,
1074 struct ib_sa_path_rec
*rec
, struct ib_ah_attr
*ah_attr
)
1079 struct net_device
*ndev
= NULL
;
1081 memset(ah_attr
, 0, sizeof *ah_attr
);
1082 ah_attr
->dlid
= be16_to_cpu(rec
->dlid
);
1083 ah_attr
->sl
= rec
->sl
;
1084 ah_attr
->src_path_bits
= be16_to_cpu(rec
->slid
) &
1085 get_src_path_mask(device
, port_num
);
1086 ah_attr
->port_num
= port_num
;
1087 ah_attr
->static_rate
= rec
->rate
;
1089 use_roce
= rdma_cap_eth_ah(device
, port_num
);
1092 struct net_device
*idev
;
1093 struct net_device
*resolved_dev
;
1094 struct rdma_dev_addr dev_addr
= {.bound_dev_if
= rec
->ifindex
,
1095 .net
= rec
->net
? rec
->net
:
1098 struct sockaddr _sockaddr
;
1099 struct sockaddr_in _sockaddr_in
;
1100 struct sockaddr_in6 _sockaddr_in6
;
1101 } sgid_addr
, dgid_addr
;
1103 if (!device
->get_netdev
)
1106 rdma_gid2ip(&sgid_addr
._sockaddr
, &rec
->sgid
);
1107 rdma_gid2ip(&dgid_addr
._sockaddr
, &rec
->dgid
);
1109 /* validate the route */
1110 ret
= rdma_resolve_ip_route(&sgid_addr
._sockaddr
,
1111 &dgid_addr
._sockaddr
, &dev_addr
);
1115 if ((dev_addr
.network
== RDMA_NETWORK_IPV4
||
1116 dev_addr
.network
== RDMA_NETWORK_IPV6
) &&
1117 rec
->gid_type
!= IB_GID_TYPE_ROCE_UDP_ENCAP
)
1120 idev
= device
->get_netdev(device
, port_num
);
1124 resolved_dev
= dev_get_by_index(dev_addr
.net
,
1125 dev_addr
.bound_dev_if
);
1126 if (resolved_dev
->flags
& IFF_LOOPBACK
) {
1127 dev_put(resolved_dev
);
1128 resolved_dev
= idev
;
1129 dev_hold(resolved_dev
);
1131 ndev
= ib_get_ndev_from_path(rec
);
1133 if ((ndev
&& ndev
!= resolved_dev
) ||
1134 (resolved_dev
!= idev
&&
1135 !rdma_is_upper_dev_rcu(idev
, resolved_dev
)))
1136 ret
= -EHOSTUNREACH
;
1139 dev_put(resolved_dev
);
1147 if (rec
->hop_limit
> 0 || use_roce
) {
1148 ah_attr
->ah_flags
= IB_AH_GRH
;
1149 ah_attr
->grh
.dgid
= rec
->dgid
;
1151 ret
= ib_find_cached_gid_by_port(device
, &rec
->sgid
,
1152 rec
->gid_type
, port_num
, ndev
,
1160 ah_attr
->grh
.sgid_index
= gid_index
;
1161 ah_attr
->grh
.flow_label
= be32_to_cpu(rec
->flow_label
);
1162 ah_attr
->grh
.hop_limit
= rec
->hop_limit
;
1163 ah_attr
->grh
.traffic_class
= rec
->traffic_class
;
1169 memcpy(ah_attr
->dmac
, rec
->dmac
, ETH_ALEN
);
1173 EXPORT_SYMBOL(ib_init_ah_from_path
);
1175 static int alloc_mad(struct ib_sa_query
*query
, gfp_t gfp_mask
)
1177 unsigned long flags
;
1179 spin_lock_irqsave(&query
->port
->ah_lock
, flags
);
1180 if (!query
->port
->sm_ah
) {
1181 spin_unlock_irqrestore(&query
->port
->ah_lock
, flags
);
1184 kref_get(&query
->port
->sm_ah
->ref
);
1185 query
->sm_ah
= query
->port
->sm_ah
;
1186 spin_unlock_irqrestore(&query
->port
->ah_lock
, flags
);
1188 query
->mad_buf
= ib_create_send_mad(query
->port
->agent
, 1,
1189 query
->sm_ah
->pkey_index
,
1190 0, IB_MGMT_SA_HDR
, IB_MGMT_SA_DATA
,
1192 IB_MGMT_BASE_VERSION
);
1193 if (IS_ERR(query
->mad_buf
)) {
1194 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
1198 query
->mad_buf
->ah
= query
->sm_ah
->ah
;
1203 static void free_mad(struct ib_sa_query
*query
)
1205 ib_free_send_mad(query
->mad_buf
);
1206 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
1209 static void init_mad(struct ib_sa_mad
*mad
, struct ib_mad_agent
*agent
)
1211 unsigned long flags
;
1213 memset(mad
, 0, sizeof *mad
);
1215 mad
->mad_hdr
.base_version
= IB_MGMT_BASE_VERSION
;
1216 mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
1217 mad
->mad_hdr
.class_version
= IB_SA_CLASS_VERSION
;
1219 spin_lock_irqsave(&tid_lock
, flags
);
1221 cpu_to_be64(((u64
) agent
->hi_tid
) << 32 | tid
++);
1222 spin_unlock_irqrestore(&tid_lock
, flags
);
1225 static int send_mad(struct ib_sa_query
*query
, int timeout_ms
, gfp_t gfp_mask
)
1227 bool preload
= gfpflags_allow_blocking(gfp_mask
);
1228 unsigned long flags
;
1232 idr_preload(gfp_mask
);
1233 spin_lock_irqsave(&idr_lock
, flags
);
1235 id
= idr_alloc(&query_idr
, query
, 0, 0, GFP_NOWAIT
);
1237 spin_unlock_irqrestore(&idr_lock
, flags
);
1243 query
->mad_buf
->timeout_ms
= timeout_ms
;
1244 query
->mad_buf
->context
[0] = query
;
1247 if (query
->flags
& IB_SA_ENABLE_LOCAL_SERVICE
) {
1248 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS
)) {
1249 if (!ib_nl_make_request(query
, gfp_mask
))
1252 ib_sa_disable_local_svc(query
);
1255 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
1257 spin_lock_irqsave(&idr_lock
, flags
);
1258 idr_remove(&query_idr
, id
);
1259 spin_unlock_irqrestore(&idr_lock
, flags
);
1263 * It's not safe to dereference query any more, because the
1264 * send may already have completed and freed the query in
1267 return ret
? ret
: id
;
1270 void ib_sa_unpack_path(void *attribute
, struct ib_sa_path_rec
*rec
)
1272 ib_unpack(path_rec_table
, ARRAY_SIZE(path_rec_table
), attribute
, rec
);
1274 EXPORT_SYMBOL(ib_sa_unpack_path
);
1276 void ib_sa_pack_path(struct ib_sa_path_rec
*rec
, void *attribute
)
1278 ib_pack(path_rec_table
, ARRAY_SIZE(path_rec_table
), rec
, attribute
);
1280 EXPORT_SYMBOL(ib_sa_pack_path
);
1282 static void ib_sa_path_rec_callback(struct ib_sa_query
*sa_query
,
1284 struct ib_sa_mad
*mad
)
1286 struct ib_sa_path_query
*query
=
1287 container_of(sa_query
, struct ib_sa_path_query
, sa_query
);
1290 struct ib_sa_path_rec rec
;
1292 ib_unpack(path_rec_table
, ARRAY_SIZE(path_rec_table
),
1296 rec
.gid_type
= IB_GID_TYPE_IB
;
1297 eth_zero_addr(rec
.dmac
);
1298 query
->callback(status
, &rec
, query
->context
);
1300 query
->callback(status
, NULL
, query
->context
);
1303 static void ib_sa_path_rec_release(struct ib_sa_query
*sa_query
)
1305 kfree(container_of(sa_query
, struct ib_sa_path_query
, sa_query
));
1309 * ib_sa_path_rec_get - Start a Path get query
1311 * @device:device to send query on
1312 * @port_num: port number to send query on
1313 * @rec:Path Record to send in query
1314 * @comp_mask:component mask to send in query
1315 * @timeout_ms:time to wait for response
1316 * @gfp_mask:GFP mask to use for internal allocations
1317 * @callback:function called when query completes, times out or is
1319 * @context:opaque user context passed to callback
1320 * @sa_query:query context, used to cancel query
1322 * Send a Path Record Get query to the SA to look up a path. The
1323 * callback function will be called when the query completes (or
1324 * fails); status is 0 for a successful response, -EINTR if the query
1325 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1326 * occurred sending the query. The resp parameter of the callback is
1327 * only valid if status is 0.
1329 * If the return value of ib_sa_path_rec_get() is negative, it is an
1330 * error code. Otherwise it is a query ID that can be used to cancel
1333 int ib_sa_path_rec_get(struct ib_sa_client
*client
,
1334 struct ib_device
*device
, u8 port_num
,
1335 struct ib_sa_path_rec
*rec
,
1336 ib_sa_comp_mask comp_mask
,
1337 int timeout_ms
, gfp_t gfp_mask
,
1338 void (*callback
)(int status
,
1339 struct ib_sa_path_rec
*resp
,
1342 struct ib_sa_query
**sa_query
)
1344 struct ib_sa_path_query
*query
;
1345 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1346 struct ib_sa_port
*port
;
1347 struct ib_mad_agent
*agent
;
1348 struct ib_sa_mad
*mad
;
1354 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1355 agent
= port
->agent
;
1357 query
= kzalloc(sizeof(*query
), gfp_mask
);
1361 query
->sa_query
.port
= port
;
1362 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1366 ib_sa_client_get(client
);
1367 query
->sa_query
.client
= client
;
1368 query
->callback
= callback
;
1369 query
->context
= context
;
1371 mad
= query
->sa_query
.mad_buf
->mad
;
1372 init_mad(mad
, agent
);
1374 query
->sa_query
.callback
= callback
? ib_sa_path_rec_callback
: NULL
;
1375 query
->sa_query
.release
= ib_sa_path_rec_release
;
1376 mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
1377 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_PATH_REC
);
1378 mad
->sa_hdr
.comp_mask
= comp_mask
;
1380 ib_pack(path_rec_table
, ARRAY_SIZE(path_rec_table
), rec
, mad
->data
);
1382 *sa_query
= &query
->sa_query
;
1384 query
->sa_query
.flags
|= IB_SA_ENABLE_LOCAL_SERVICE
;
1385 query
->sa_query
.mad_buf
->context
[1] = rec
;
1387 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1395 ib_sa_client_put(query
->sa_query
.client
);
1396 free_mad(&query
->sa_query
);
1402 EXPORT_SYMBOL(ib_sa_path_rec_get
);
1404 static void ib_sa_service_rec_callback(struct ib_sa_query
*sa_query
,
1406 struct ib_sa_mad
*mad
)
1408 struct ib_sa_service_query
*query
=
1409 container_of(sa_query
, struct ib_sa_service_query
, sa_query
);
1412 struct ib_sa_service_rec rec
;
1414 ib_unpack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
1416 query
->callback(status
, &rec
, query
->context
);
1418 query
->callback(status
, NULL
, query
->context
);
1421 static void ib_sa_service_rec_release(struct ib_sa_query
*sa_query
)
1423 kfree(container_of(sa_query
, struct ib_sa_service_query
, sa_query
));
1427 * ib_sa_service_rec_query - Start Service Record operation
1429 * @device:device to send request on
1430 * @port_num: port number to send request on
1431 * @method:SA method - should be get, set, or delete
1432 * @rec:Service Record to send in request
1433 * @comp_mask:component mask to send in request
1434 * @timeout_ms:time to wait for response
1435 * @gfp_mask:GFP mask to use for internal allocations
1436 * @callback:function called when request completes, times out or is
1438 * @context:opaque user context passed to callback
1439 * @sa_query:request context, used to cancel request
1441 * Send a Service Record set/get/delete to the SA to register,
1442 * unregister or query a service record.
1443 * The callback function will be called when the request completes (or
1444 * fails); status is 0 for a successful response, -EINTR if the query
1445 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1446 * occurred sending the query. The resp parameter of the callback is
1447 * only valid if status is 0.
1449 * If the return value of ib_sa_service_rec_query() is negative, it is an
1450 * error code. Otherwise it is a request ID that can be used to cancel
1453 int ib_sa_service_rec_query(struct ib_sa_client
*client
,
1454 struct ib_device
*device
, u8 port_num
, u8 method
,
1455 struct ib_sa_service_rec
*rec
,
1456 ib_sa_comp_mask comp_mask
,
1457 int timeout_ms
, gfp_t gfp_mask
,
1458 void (*callback
)(int status
,
1459 struct ib_sa_service_rec
*resp
,
1462 struct ib_sa_query
**sa_query
)
1464 struct ib_sa_service_query
*query
;
1465 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1466 struct ib_sa_port
*port
;
1467 struct ib_mad_agent
*agent
;
1468 struct ib_sa_mad
*mad
;
1474 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1475 agent
= port
->agent
;
1477 if (method
!= IB_MGMT_METHOD_GET
&&
1478 method
!= IB_MGMT_METHOD_SET
&&
1479 method
!= IB_SA_METHOD_DELETE
)
1482 query
= kzalloc(sizeof(*query
), gfp_mask
);
1486 query
->sa_query
.port
= port
;
1487 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1491 ib_sa_client_get(client
);
1492 query
->sa_query
.client
= client
;
1493 query
->callback
= callback
;
1494 query
->context
= context
;
1496 mad
= query
->sa_query
.mad_buf
->mad
;
1497 init_mad(mad
, agent
);
1499 query
->sa_query
.callback
= callback
? ib_sa_service_rec_callback
: NULL
;
1500 query
->sa_query
.release
= ib_sa_service_rec_release
;
1501 mad
->mad_hdr
.method
= method
;
1502 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_SERVICE_REC
);
1503 mad
->sa_hdr
.comp_mask
= comp_mask
;
1505 ib_pack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
1508 *sa_query
= &query
->sa_query
;
1510 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1518 ib_sa_client_put(query
->sa_query
.client
);
1519 free_mad(&query
->sa_query
);
1525 EXPORT_SYMBOL(ib_sa_service_rec_query
);
1527 static void ib_sa_mcmember_rec_callback(struct ib_sa_query
*sa_query
,
1529 struct ib_sa_mad
*mad
)
1531 struct ib_sa_mcmember_query
*query
=
1532 container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
);
1535 struct ib_sa_mcmember_rec rec
;
1537 ib_unpack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
1539 query
->callback(status
, &rec
, query
->context
);
1541 query
->callback(status
, NULL
, query
->context
);
1544 static void ib_sa_mcmember_rec_release(struct ib_sa_query
*sa_query
)
1546 kfree(container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
));
1549 int ib_sa_mcmember_rec_query(struct ib_sa_client
*client
,
1550 struct ib_device
*device
, u8 port_num
,
1552 struct ib_sa_mcmember_rec
*rec
,
1553 ib_sa_comp_mask comp_mask
,
1554 int timeout_ms
, gfp_t gfp_mask
,
1555 void (*callback
)(int status
,
1556 struct ib_sa_mcmember_rec
*resp
,
1559 struct ib_sa_query
**sa_query
)
1561 struct ib_sa_mcmember_query
*query
;
1562 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1563 struct ib_sa_port
*port
;
1564 struct ib_mad_agent
*agent
;
1565 struct ib_sa_mad
*mad
;
1571 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1572 agent
= port
->agent
;
1574 query
= kzalloc(sizeof(*query
), gfp_mask
);
1578 query
->sa_query
.port
= port
;
1579 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1583 ib_sa_client_get(client
);
1584 query
->sa_query
.client
= client
;
1585 query
->callback
= callback
;
1586 query
->context
= context
;
1588 mad
= query
->sa_query
.mad_buf
->mad
;
1589 init_mad(mad
, agent
);
1591 query
->sa_query
.callback
= callback
? ib_sa_mcmember_rec_callback
: NULL
;
1592 query
->sa_query
.release
= ib_sa_mcmember_rec_release
;
1593 mad
->mad_hdr
.method
= method
;
1594 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
1595 mad
->sa_hdr
.comp_mask
= comp_mask
;
1597 ib_pack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
1600 *sa_query
= &query
->sa_query
;
1602 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1610 ib_sa_client_put(query
->sa_query
.client
);
1611 free_mad(&query
->sa_query
);
1618 /* Support GuidInfoRecord */
1619 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query
*sa_query
,
1621 struct ib_sa_mad
*mad
)
1623 struct ib_sa_guidinfo_query
*query
=
1624 container_of(sa_query
, struct ib_sa_guidinfo_query
, sa_query
);
1627 struct ib_sa_guidinfo_rec rec
;
1629 ib_unpack(guidinfo_rec_table
, ARRAY_SIZE(guidinfo_rec_table
),
1631 query
->callback(status
, &rec
, query
->context
);
1633 query
->callback(status
, NULL
, query
->context
);
1636 static void ib_sa_guidinfo_rec_release(struct ib_sa_query
*sa_query
)
1638 kfree(container_of(sa_query
, struct ib_sa_guidinfo_query
, sa_query
));
1641 int ib_sa_guid_info_rec_query(struct ib_sa_client
*client
,
1642 struct ib_device
*device
, u8 port_num
,
1643 struct ib_sa_guidinfo_rec
*rec
,
1644 ib_sa_comp_mask comp_mask
, u8 method
,
1645 int timeout_ms
, gfp_t gfp_mask
,
1646 void (*callback
)(int status
,
1647 struct ib_sa_guidinfo_rec
*resp
,
1650 struct ib_sa_query
**sa_query
)
1652 struct ib_sa_guidinfo_query
*query
;
1653 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1654 struct ib_sa_port
*port
;
1655 struct ib_mad_agent
*agent
;
1656 struct ib_sa_mad
*mad
;
1662 if (method
!= IB_MGMT_METHOD_GET
&&
1663 method
!= IB_MGMT_METHOD_SET
&&
1664 method
!= IB_SA_METHOD_DELETE
) {
1668 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1669 agent
= port
->agent
;
1671 query
= kzalloc(sizeof(*query
), gfp_mask
);
1675 query
->sa_query
.port
= port
;
1676 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1680 ib_sa_client_get(client
);
1681 query
->sa_query
.client
= client
;
1682 query
->callback
= callback
;
1683 query
->context
= context
;
1685 mad
= query
->sa_query
.mad_buf
->mad
;
1686 init_mad(mad
, agent
);
1688 query
->sa_query
.callback
= callback
? ib_sa_guidinfo_rec_callback
: NULL
;
1689 query
->sa_query
.release
= ib_sa_guidinfo_rec_release
;
1691 mad
->mad_hdr
.method
= method
;
1692 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC
);
1693 mad
->sa_hdr
.comp_mask
= comp_mask
;
1695 ib_pack(guidinfo_rec_table
, ARRAY_SIZE(guidinfo_rec_table
), rec
,
1698 *sa_query
= &query
->sa_query
;
1700 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1708 ib_sa_client_put(query
->sa_query
.client
);
1709 free_mad(&query
->sa_query
);
1715 EXPORT_SYMBOL(ib_sa_guid_info_rec_query
);
1717 /* Support get SA ClassPortInfo */
1718 static void ib_sa_classport_info_rec_callback(struct ib_sa_query
*sa_query
,
1720 struct ib_sa_mad
*mad
)
1722 struct ib_sa_classport_info_query
*query
=
1723 container_of(sa_query
, struct ib_sa_classport_info_query
, sa_query
);
1726 struct ib_class_port_info rec
;
1728 ib_unpack(classport_info_rec_table
,
1729 ARRAY_SIZE(classport_info_rec_table
),
1731 query
->callback(status
, &rec
, query
->context
);
1733 query
->callback(status
, NULL
, query
->context
);
1737 static void ib_sa_portclass_info_rec_release(struct ib_sa_query
*sa_query
)
1739 kfree(container_of(sa_query
, struct ib_sa_classport_info_query
,
1743 int ib_sa_classport_info_rec_query(struct ib_sa_client
*client
,
1744 struct ib_device
*device
, u8 port_num
,
1745 int timeout_ms
, gfp_t gfp_mask
,
1746 void (*callback
)(int status
,
1747 struct ib_class_port_info
*resp
,
1750 struct ib_sa_query
**sa_query
)
1752 struct ib_sa_classport_info_query
*query
;
1753 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1754 struct ib_sa_port
*port
;
1755 struct ib_mad_agent
*agent
;
1756 struct ib_sa_mad
*mad
;
1762 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1763 agent
= port
->agent
;
1765 query
= kzalloc(sizeof(*query
), gfp_mask
);
1769 query
->sa_query
.port
= port
;
1770 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1774 ib_sa_client_get(client
);
1775 query
->sa_query
.client
= client
;
1776 query
->callback
= callback
;
1777 query
->context
= context
;
1779 mad
= query
->sa_query
.mad_buf
->mad
;
1780 init_mad(mad
, agent
);
1782 query
->sa_query
.callback
= callback
? ib_sa_classport_info_rec_callback
: NULL
;
1784 query
->sa_query
.release
= ib_sa_portclass_info_rec_release
;
1785 /* support GET only */
1786 mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
1787 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO
);
1788 mad
->sa_hdr
.comp_mask
= 0;
1789 *sa_query
= &query
->sa_query
;
1791 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1799 ib_sa_client_put(query
->sa_query
.client
);
1800 free_mad(&query
->sa_query
);
1806 EXPORT_SYMBOL(ib_sa_classport_info_rec_query
);
1808 static void send_handler(struct ib_mad_agent
*agent
,
1809 struct ib_mad_send_wc
*mad_send_wc
)
1811 struct ib_sa_query
*query
= mad_send_wc
->send_buf
->context
[0];
1812 unsigned long flags
;
1814 if (query
->callback
)
1815 switch (mad_send_wc
->status
) {
1817 /* No callback -- already got recv */
1819 case IB_WC_RESP_TIMEOUT_ERR
:
1820 query
->callback(query
, -ETIMEDOUT
, NULL
);
1822 case IB_WC_WR_FLUSH_ERR
:
1823 query
->callback(query
, -EINTR
, NULL
);
1826 query
->callback(query
, -EIO
, NULL
);
1830 spin_lock_irqsave(&idr_lock
, flags
);
1831 idr_remove(&query_idr
, query
->id
);
1832 spin_unlock_irqrestore(&idr_lock
, flags
);
1835 ib_sa_client_put(query
->client
);
1836 query
->release(query
);
1839 static void recv_handler(struct ib_mad_agent
*mad_agent
,
1840 struct ib_mad_send_buf
*send_buf
,
1841 struct ib_mad_recv_wc
*mad_recv_wc
)
1843 struct ib_sa_query
*query
;
1848 query
= send_buf
->context
[0];
1849 if (query
->callback
) {
1850 if (mad_recv_wc
->wc
->status
== IB_WC_SUCCESS
)
1851 query
->callback(query
,
1852 mad_recv_wc
->recv_buf
.mad
->mad_hdr
.status
?
1854 (struct ib_sa_mad
*) mad_recv_wc
->recv_buf
.mad
);
1856 query
->callback(query
, -EIO
, NULL
);
1859 ib_free_recv_mad(mad_recv_wc
);
1862 static void ib_sa_add_one(struct ib_device
*device
)
1864 struct ib_sa_device
*sa_dev
;
1868 s
= rdma_start_port(device
);
1869 e
= rdma_end_port(device
);
1871 sa_dev
= kzalloc(sizeof *sa_dev
+
1872 (e
- s
+ 1) * sizeof (struct ib_sa_port
),
1877 sa_dev
->start_port
= s
;
1878 sa_dev
->end_port
= e
;
1880 for (i
= 0; i
<= e
- s
; ++i
) {
1881 spin_lock_init(&sa_dev
->port
[i
].ah_lock
);
1882 if (!rdma_cap_ib_sa(device
, i
+ 1))
1885 sa_dev
->port
[i
].sm_ah
= NULL
;
1886 sa_dev
->port
[i
].port_num
= i
+ s
;
1888 sa_dev
->port
[i
].agent
=
1889 ib_register_mad_agent(device
, i
+ s
, IB_QPT_GSI
,
1890 NULL
, 0, send_handler
,
1891 recv_handler
, sa_dev
, 0);
1892 if (IS_ERR(sa_dev
->port
[i
].agent
))
1895 INIT_WORK(&sa_dev
->port
[i
].update_task
, update_sm_ah
);
1903 ib_set_client_data(device
, &sa_client
, sa_dev
);
1906 * We register our event handler after everything is set up,
1907 * and then update our cached info after the event handler is
1908 * registered to avoid any problems if a port changes state
1909 * during our initialization.
1912 INIT_IB_EVENT_HANDLER(&sa_dev
->event_handler
, device
, ib_sa_event
);
1913 if (ib_register_event_handler(&sa_dev
->event_handler
))
1916 for (i
= 0; i
<= e
- s
; ++i
) {
1917 if (rdma_cap_ib_sa(device
, i
+ 1))
1918 update_sm_ah(&sa_dev
->port
[i
].update_task
);
1925 if (rdma_cap_ib_sa(device
, i
+ 1))
1926 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
1933 static void ib_sa_remove_one(struct ib_device
*device
, void *client_data
)
1935 struct ib_sa_device
*sa_dev
= client_data
;
1941 ib_unregister_event_handler(&sa_dev
->event_handler
);
1943 flush_workqueue(ib_wq
);
1945 for (i
= 0; i
<= sa_dev
->end_port
- sa_dev
->start_port
; ++i
) {
1946 if (rdma_cap_ib_sa(device
, i
+ 1)) {
1947 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
1948 if (sa_dev
->port
[i
].sm_ah
)
1949 kref_put(&sa_dev
->port
[i
].sm_ah
->ref
, free_sm_ah
);
1957 int ib_sa_init(void)
1961 get_random_bytes(&tid
, sizeof tid
);
1963 atomic_set(&ib_nl_sa_request_seq
, 0);
1965 ret
= ib_register_client(&sa_client
);
1967 pr_err("Couldn't register ib_sa client\n");
1973 pr_err("Couldn't initialize multicast handling\n");
1977 ib_nl_wq
= create_singlethread_workqueue("ib_nl_sa_wq");
1983 INIT_DELAYED_WORK(&ib_nl_timed_work
, ib_nl_request_timeout
);
1990 ib_unregister_client(&sa_client
);
1995 void ib_sa_cleanup(void)
1997 cancel_delayed_work(&ib_nl_timed_work
);
1998 flush_workqueue(ib_nl_wq
);
1999 destroy_workqueue(ib_nl_wq
);
2001 ib_unregister_client(&sa_client
);
2002 idr_destroy(&query_idr
);