2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
54 MODULE_AUTHOR("Roland Dreier");
55 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
56 MODULE_LICENSE("Dual BSD/GPL");
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
59 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
61 static int sa_local_svc_timeout_ms
= IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT
;
71 struct ib_mad_agent
*agent
;
72 struct ib_sa_sm_ah
*sm_ah
;
73 struct work_struct update_task
;
79 int start_port
, end_port
;
80 struct ib_event_handler event_handler
;
81 struct ib_sa_port port
[0];
85 void (*callback
)(struct ib_sa_query
*, int, struct ib_sa_mad
*);
86 void (*release
)(struct ib_sa_query
*);
87 struct ib_sa_client
*client
;
88 struct ib_sa_port
*port
;
89 struct ib_mad_send_buf
*mad_buf
;
90 struct ib_sa_sm_ah
*sm_ah
;
93 struct list_head list
; /* Local svc request list */
94 u32 seq
; /* Local svc request sequence number */
95 unsigned long timeout
; /* Local svc timeout */
96 u8 path_use
; /* How will the pathrecord be used */
99 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
100 #define IB_SA_CANCEL 0x00000002
102 struct ib_sa_service_query
{
103 void (*callback
)(int, struct ib_sa_service_rec
*, void *);
105 struct ib_sa_query sa_query
;
108 struct ib_sa_path_query
{
109 void (*callback
)(int, struct ib_sa_path_rec
*, void *);
111 struct ib_sa_query sa_query
;
114 struct ib_sa_guidinfo_query
{
115 void (*callback
)(int, struct ib_sa_guidinfo_rec
*, void *);
117 struct ib_sa_query sa_query
;
120 struct ib_sa_mcmember_query
{
121 void (*callback
)(int, struct ib_sa_mcmember_rec
*, void *);
123 struct ib_sa_query sa_query
;
126 static LIST_HEAD(ib_nl_request_list
);
127 static DEFINE_SPINLOCK(ib_nl_request_lock
);
128 static atomic_t ib_nl_sa_request_seq
;
129 static struct workqueue_struct
*ib_nl_wq
;
130 static struct delayed_work ib_nl_timed_work
;
131 static const struct nla_policy ib_nl_policy
[LS_NLA_TYPE_MAX
] = {
132 [LS_NLA_TYPE_PATH_RECORD
] = {.type
= NLA_BINARY
,
133 .len
= sizeof(struct ib_path_rec_data
)},
134 [LS_NLA_TYPE_TIMEOUT
] = {.type
= NLA_U32
},
135 [LS_NLA_TYPE_SERVICE_ID
] = {.type
= NLA_U64
},
136 [LS_NLA_TYPE_DGID
] = {.type
= NLA_BINARY
,
137 .len
= sizeof(struct rdma_nla_ls_gid
)},
138 [LS_NLA_TYPE_SGID
] = {.type
= NLA_BINARY
,
139 .len
= sizeof(struct rdma_nla_ls_gid
)},
140 [LS_NLA_TYPE_TCLASS
] = {.type
= NLA_U8
},
141 [LS_NLA_TYPE_PKEY
] = {.type
= NLA_U16
},
142 [LS_NLA_TYPE_QOS_CLASS
] = {.type
= NLA_U16
},
146 static void ib_sa_add_one(struct ib_device
*device
);
147 static void ib_sa_remove_one(struct ib_device
*device
, void *client_data
);
149 static struct ib_client sa_client
= {
151 .add
= ib_sa_add_one
,
152 .remove
= ib_sa_remove_one
155 static DEFINE_SPINLOCK(idr_lock
);
156 static DEFINE_IDR(query_idr
);
158 static DEFINE_SPINLOCK(tid_lock
);
161 #define PATH_REC_FIELD(field) \
162 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
163 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
164 .field_name = "sa_path_rec:" #field
166 static const struct ib_field path_rec_table
[] = {
167 { PATH_REC_FIELD(service_id
),
171 { PATH_REC_FIELD(dgid
),
175 { PATH_REC_FIELD(sgid
),
179 { PATH_REC_FIELD(dlid
),
183 { PATH_REC_FIELD(slid
),
187 { PATH_REC_FIELD(raw_traffic
),
195 { PATH_REC_FIELD(flow_label
),
199 { PATH_REC_FIELD(hop_limit
),
203 { PATH_REC_FIELD(traffic_class
),
207 { PATH_REC_FIELD(reversible
),
211 { PATH_REC_FIELD(numb_path
),
215 { PATH_REC_FIELD(pkey
),
219 { PATH_REC_FIELD(qos_class
),
223 { PATH_REC_FIELD(sl
),
227 { PATH_REC_FIELD(mtu_selector
),
231 { PATH_REC_FIELD(mtu
),
235 { PATH_REC_FIELD(rate_selector
),
239 { PATH_REC_FIELD(rate
),
243 { PATH_REC_FIELD(packet_life_time_selector
),
247 { PATH_REC_FIELD(packet_life_time
),
251 { PATH_REC_FIELD(preference
),
261 #define MCMEMBER_REC_FIELD(field) \
262 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
263 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
264 .field_name = "sa_mcmember_rec:" #field
266 static const struct ib_field mcmember_rec_table
[] = {
267 { MCMEMBER_REC_FIELD(mgid
),
271 { MCMEMBER_REC_FIELD(port_gid
),
275 { MCMEMBER_REC_FIELD(qkey
),
279 { MCMEMBER_REC_FIELD(mlid
),
283 { MCMEMBER_REC_FIELD(mtu_selector
),
287 { MCMEMBER_REC_FIELD(mtu
),
291 { MCMEMBER_REC_FIELD(traffic_class
),
295 { MCMEMBER_REC_FIELD(pkey
),
299 { MCMEMBER_REC_FIELD(rate_selector
),
303 { MCMEMBER_REC_FIELD(rate
),
307 { MCMEMBER_REC_FIELD(packet_life_time_selector
),
311 { MCMEMBER_REC_FIELD(packet_life_time
),
315 { MCMEMBER_REC_FIELD(sl
),
319 { MCMEMBER_REC_FIELD(flow_label
),
323 { MCMEMBER_REC_FIELD(hop_limit
),
327 { MCMEMBER_REC_FIELD(scope
),
331 { MCMEMBER_REC_FIELD(join_state
),
335 { MCMEMBER_REC_FIELD(proxy_join
),
345 #define SERVICE_REC_FIELD(field) \
346 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
347 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
348 .field_name = "sa_service_rec:" #field
350 static const struct ib_field service_rec_table
[] = {
351 { SERVICE_REC_FIELD(id
),
355 { SERVICE_REC_FIELD(gid
),
359 { SERVICE_REC_FIELD(pkey
),
363 { SERVICE_REC_FIELD(lease
),
367 { SERVICE_REC_FIELD(key
),
371 { SERVICE_REC_FIELD(name
),
375 { SERVICE_REC_FIELD(data8
),
379 { SERVICE_REC_FIELD(data16
),
383 { SERVICE_REC_FIELD(data32
),
387 { SERVICE_REC_FIELD(data64
),
393 #define GUIDINFO_REC_FIELD(field) \
394 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
395 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
396 .field_name = "sa_guidinfo_rec:" #field
398 static const struct ib_field guidinfo_rec_table
[] = {
399 { GUIDINFO_REC_FIELD(lid
),
403 { GUIDINFO_REC_FIELD(block_num
),
407 { GUIDINFO_REC_FIELD(res1
),
411 { GUIDINFO_REC_FIELD(res2
),
415 { GUIDINFO_REC_FIELD(guid_info_list
),
421 static inline void ib_sa_disable_local_svc(struct ib_sa_query
*query
)
423 query
->flags
&= ~IB_SA_ENABLE_LOCAL_SERVICE
;
426 static inline int ib_sa_query_cancelled(struct ib_sa_query
*query
)
428 return (query
->flags
& IB_SA_CANCEL
);
431 static void ib_nl_set_path_rec_attrs(struct sk_buff
*skb
,
432 struct ib_sa_query
*query
)
434 struct ib_sa_path_rec
*sa_rec
= query
->mad_buf
->context
[1];
435 struct ib_sa_mad
*mad
= query
->mad_buf
->mad
;
436 ib_sa_comp_mask comp_mask
= mad
->sa_hdr
.comp_mask
;
439 struct rdma_ls_resolve_header
*header
;
441 query
->mad_buf
->context
[1] = NULL
;
443 /* Construct the family header first */
444 header
= (struct rdma_ls_resolve_header
*)
445 skb_put(skb
, NLMSG_ALIGN(sizeof(*header
)));
446 memcpy(header
->device_name
, query
->port
->agent
->device
->name
,
448 header
->port_num
= query
->port
->port_num
;
450 if ((comp_mask
& IB_SA_PATH_REC_REVERSIBLE
) &&
451 sa_rec
->reversible
!= 0)
452 query
->path_use
= LS_RESOLVE_PATH_USE_GMP
;
454 query
->path_use
= LS_RESOLVE_PATH_USE_UNIDIRECTIONAL
;
455 header
->path_use
= query
->path_use
;
457 /* Now build the attributes */
458 if (comp_mask
& IB_SA_PATH_REC_SERVICE_ID
) {
459 val64
= be64_to_cpu(sa_rec
->service_id
);
460 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_SERVICE_ID
,
461 sizeof(val64
), &val64
);
463 if (comp_mask
& IB_SA_PATH_REC_DGID
)
464 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_DGID
,
465 sizeof(sa_rec
->dgid
), &sa_rec
->dgid
);
466 if (comp_mask
& IB_SA_PATH_REC_SGID
)
467 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_SGID
,
468 sizeof(sa_rec
->sgid
), &sa_rec
->sgid
);
469 if (comp_mask
& IB_SA_PATH_REC_TRAFFIC_CLASS
)
470 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_TCLASS
,
471 sizeof(sa_rec
->traffic_class
), &sa_rec
->traffic_class
);
473 if (comp_mask
& IB_SA_PATH_REC_PKEY
) {
474 val16
= be16_to_cpu(sa_rec
->pkey
);
475 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_PKEY
,
476 sizeof(val16
), &val16
);
478 if (comp_mask
& IB_SA_PATH_REC_QOS_CLASS
) {
479 val16
= be16_to_cpu(sa_rec
->qos_class
);
480 nla_put(skb
, RDMA_NLA_F_MANDATORY
| LS_NLA_TYPE_QOS_CLASS
,
481 sizeof(val16
), &val16
);
485 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask
)
489 if (comp_mask
& IB_SA_PATH_REC_SERVICE_ID
)
490 len
+= nla_total_size(sizeof(u64
));
491 if (comp_mask
& IB_SA_PATH_REC_DGID
)
492 len
+= nla_total_size(sizeof(struct rdma_nla_ls_gid
));
493 if (comp_mask
& IB_SA_PATH_REC_SGID
)
494 len
+= nla_total_size(sizeof(struct rdma_nla_ls_gid
));
495 if (comp_mask
& IB_SA_PATH_REC_TRAFFIC_CLASS
)
496 len
+= nla_total_size(sizeof(u8
));
497 if (comp_mask
& IB_SA_PATH_REC_PKEY
)
498 len
+= nla_total_size(sizeof(u16
));
499 if (comp_mask
& IB_SA_PATH_REC_QOS_CLASS
)
500 len
+= nla_total_size(sizeof(u16
));
503 * Make sure that at least some of the required comp_mask bits are
506 if (WARN_ON(len
== 0))
509 /* Add the family header */
510 len
+= NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header
));
515 static int ib_nl_send_msg(struct ib_sa_query
*query
)
517 struct sk_buff
*skb
= NULL
;
518 struct nlmsghdr
*nlh
;
521 struct ib_sa_mad
*mad
;
524 mad
= query
->mad_buf
->mad
;
525 len
= ib_nl_get_path_rec_attrs_len(mad
->sa_hdr
.comp_mask
);
529 skb
= nlmsg_new(len
, GFP_KERNEL
);
533 /* Put nlmsg header only for now */
534 data
= ibnl_put_msg(skb
, &nlh
, query
->seq
, 0, RDMA_NL_LS
,
535 RDMA_NL_LS_OP_RESOLVE
, NLM_F_REQUEST
);
542 ib_nl_set_path_rec_attrs(skb
, query
);
544 /* Repair the nlmsg header length */
547 ret
= ibnl_multicast(skb
, nlh
, RDMA_NL_GROUP_LS
, GFP_KERNEL
);
556 static int ib_nl_make_request(struct ib_sa_query
*query
)
562 INIT_LIST_HEAD(&query
->list
);
563 query
->seq
= (u32
)atomic_inc_return(&ib_nl_sa_request_seq
);
565 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
566 ret
= ib_nl_send_msg(query
);
574 delay
= msecs_to_jiffies(sa_local_svc_timeout_ms
);
575 query
->timeout
= delay
+ jiffies
;
576 list_add_tail(&query
->list
, &ib_nl_request_list
);
577 /* Start the timeout if this is the only request */
578 if (ib_nl_request_list
.next
== &query
->list
)
579 queue_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, delay
);
582 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
587 static int ib_nl_cancel_request(struct ib_sa_query
*query
)
590 struct ib_sa_query
*wait_query
;
593 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
594 list_for_each_entry(wait_query
, &ib_nl_request_list
, list
) {
595 /* Let the timeout to take care of the callback */
596 if (query
== wait_query
) {
597 query
->flags
|= IB_SA_CANCEL
;
598 query
->timeout
= jiffies
;
599 list_move(&query
->list
, &ib_nl_request_list
);
601 mod_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, 1);
605 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
610 static void send_handler(struct ib_mad_agent
*agent
,
611 struct ib_mad_send_wc
*mad_send_wc
);
613 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query
*query
,
614 const struct nlmsghdr
*nlh
)
616 struct ib_mad_send_wc mad_send_wc
;
617 struct ib_sa_mad
*mad
= NULL
;
618 const struct nlattr
*head
, *curr
;
619 struct ib_path_rec_data
*rec
;
624 if (query
->callback
) {
625 head
= (const struct nlattr
*) nlmsg_data(nlh
);
626 len
= nlmsg_len(nlh
);
627 switch (query
->path_use
) {
628 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL
:
629 mask
= IB_PATH_PRIMARY
| IB_PATH_OUTBOUND
;
632 case LS_RESOLVE_PATH_USE_ALL
:
633 case LS_RESOLVE_PATH_USE_GMP
:
635 mask
= IB_PATH_PRIMARY
| IB_PATH_GMP
|
636 IB_PATH_BIDIRECTIONAL
;
639 nla_for_each_attr(curr
, head
, len
, rem
) {
640 if (curr
->nla_type
== LS_NLA_TYPE_PATH_RECORD
) {
641 rec
= nla_data(curr
);
643 * Get the first one. In the future, we may
644 * need to get up to 6 pathrecords.
646 if ((rec
->flags
& mask
) == mask
) {
647 mad
= query
->mad_buf
->mad
;
648 mad
->mad_hdr
.method
|=
650 memcpy(mad
->data
, rec
->path_rec
,
651 sizeof(rec
->path_rec
));
657 query
->callback(query
, status
, mad
);
660 mad_send_wc
.send_buf
= query
->mad_buf
;
661 mad_send_wc
.status
= IB_WC_SUCCESS
;
662 send_handler(query
->mad_buf
->mad_agent
, &mad_send_wc
);
665 static void ib_nl_request_timeout(struct work_struct
*work
)
668 struct ib_sa_query
*query
;
670 struct ib_mad_send_wc mad_send_wc
;
673 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
674 while (!list_empty(&ib_nl_request_list
)) {
675 query
= list_entry(ib_nl_request_list
.next
,
676 struct ib_sa_query
, list
);
678 if (time_after(query
->timeout
, jiffies
)) {
679 delay
= query
->timeout
- jiffies
;
680 if ((long)delay
<= 0)
682 queue_delayed_work(ib_nl_wq
, &ib_nl_timed_work
, delay
);
686 list_del(&query
->list
);
687 ib_sa_disable_local_svc(query
);
688 /* Hold the lock to protect against query cancellation */
689 if (ib_sa_query_cancelled(query
))
692 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
694 mad_send_wc
.send_buf
= query
->mad_buf
;
695 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
696 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
697 send_handler(query
->port
->agent
, &mad_send_wc
);
698 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
701 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
704 static int ib_nl_handle_set_timeout(struct sk_buff
*skb
,
705 struct netlink_callback
*cb
)
707 const struct nlmsghdr
*nlh
= (struct nlmsghdr
*)cb
->nlh
;
708 int timeout
, delta
, abs_delta
;
709 const struct nlattr
*attr
;
711 struct ib_sa_query
*query
;
713 struct nlattr
*tb
[LS_NLA_TYPE_MAX
];
716 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
719 ret
= nla_parse(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
720 nlmsg_len(nlh
), ib_nl_policy
);
721 attr
= (const struct nlattr
*)tb
[LS_NLA_TYPE_TIMEOUT
];
725 timeout
= *(int *) nla_data(attr
);
726 if (timeout
< IB_SA_LOCAL_SVC_TIMEOUT_MIN
)
727 timeout
= IB_SA_LOCAL_SVC_TIMEOUT_MIN
;
728 if (timeout
> IB_SA_LOCAL_SVC_TIMEOUT_MAX
)
729 timeout
= IB_SA_LOCAL_SVC_TIMEOUT_MAX
;
731 delta
= timeout
- sa_local_svc_timeout_ms
;
738 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
739 sa_local_svc_timeout_ms
= timeout
;
740 list_for_each_entry(query
, &ib_nl_request_list
, list
) {
741 if (delta
< 0 && abs_delta
> query
->timeout
)
744 query
->timeout
+= delta
;
746 /* Get the new delay from the first entry */
748 delay
= query
->timeout
- jiffies
;
754 mod_delayed_work(ib_nl_wq
, &ib_nl_timed_work
,
755 (unsigned long)delay
);
756 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
763 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr
*nlh
)
765 struct nlattr
*tb
[LS_NLA_TYPE_MAX
];
768 if (nlh
->nlmsg_flags
& RDMA_NL_LS_F_ERR
)
771 ret
= nla_parse(tb
, LS_NLA_TYPE_MAX
- 1, nlmsg_data(nlh
),
772 nlmsg_len(nlh
), ib_nl_policy
);
779 static int ib_nl_handle_resolve_resp(struct sk_buff
*skb
,
780 struct netlink_callback
*cb
)
782 const struct nlmsghdr
*nlh
= (struct nlmsghdr
*)cb
->nlh
;
784 struct ib_sa_query
*query
;
785 struct ib_mad_send_buf
*send_buf
;
786 struct ib_mad_send_wc mad_send_wc
;
790 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
793 spin_lock_irqsave(&ib_nl_request_lock
, flags
);
794 list_for_each_entry(query
, &ib_nl_request_list
, list
) {
796 * If the query is cancelled, let the timeout routine
799 if (nlh
->nlmsg_seq
== query
->seq
) {
800 found
= !ib_sa_query_cancelled(query
);
802 list_del(&query
->list
);
808 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
812 send_buf
= query
->mad_buf
;
814 if (!ib_nl_is_good_resolve_resp(nlh
)) {
815 /* if the result is a failure, send out the packet via IB */
816 ib_sa_disable_local_svc(query
);
817 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
818 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
820 mad_send_wc
.send_buf
= send_buf
;
821 mad_send_wc
.status
= IB_WC_GENERAL_ERR
;
822 send_handler(query
->port
->agent
, &mad_send_wc
);
825 spin_unlock_irqrestore(&ib_nl_request_lock
, flags
);
826 ib_nl_process_good_resolve_rsp(query
, nlh
);
833 static struct ibnl_client_cbs ib_sa_cb_table
[] = {
834 [RDMA_NL_LS_OP_RESOLVE
] = {
835 .dump
= ib_nl_handle_resolve_resp
,
836 .module
= THIS_MODULE
},
837 [RDMA_NL_LS_OP_SET_TIMEOUT
] = {
838 .dump
= ib_nl_handle_set_timeout
,
839 .module
= THIS_MODULE
},
842 static void free_sm_ah(struct kref
*kref
)
844 struct ib_sa_sm_ah
*sm_ah
= container_of(kref
, struct ib_sa_sm_ah
, ref
);
846 ib_destroy_ah(sm_ah
->ah
);
850 static void update_sm_ah(struct work_struct
*work
)
852 struct ib_sa_port
*port
=
853 container_of(work
, struct ib_sa_port
, update_task
);
854 struct ib_sa_sm_ah
*new_ah
;
855 struct ib_port_attr port_attr
;
856 struct ib_ah_attr ah_attr
;
858 if (ib_query_port(port
->agent
->device
, port
->port_num
, &port_attr
)) {
859 printk(KERN_WARNING
"Couldn't query port\n");
863 new_ah
= kmalloc(sizeof *new_ah
, GFP_KERNEL
);
865 printk(KERN_WARNING
"Couldn't allocate new SM AH\n");
869 kref_init(&new_ah
->ref
);
870 new_ah
->src_path_mask
= (1 << port_attr
.lmc
) - 1;
872 new_ah
->pkey_index
= 0;
873 if (ib_find_pkey(port
->agent
->device
, port
->port_num
,
874 IB_DEFAULT_PKEY_FULL
, &new_ah
->pkey_index
))
875 printk(KERN_ERR
"Couldn't find index for default PKey\n");
877 memset(&ah_attr
, 0, sizeof ah_attr
);
878 ah_attr
.dlid
= port_attr
.sm_lid
;
879 ah_attr
.sl
= port_attr
.sm_sl
;
880 ah_attr
.port_num
= port
->port_num
;
882 new_ah
->ah
= ib_create_ah(port
->agent
->qp
->pd
, &ah_attr
);
883 if (IS_ERR(new_ah
->ah
)) {
884 printk(KERN_WARNING
"Couldn't create new SM AH\n");
889 spin_lock_irq(&port
->ah_lock
);
891 kref_put(&port
->sm_ah
->ref
, free_sm_ah
);
892 port
->sm_ah
= new_ah
;
893 spin_unlock_irq(&port
->ah_lock
);
897 static void ib_sa_event(struct ib_event_handler
*handler
, struct ib_event
*event
)
899 if (event
->event
== IB_EVENT_PORT_ERR
||
900 event
->event
== IB_EVENT_PORT_ACTIVE
||
901 event
->event
== IB_EVENT_LID_CHANGE
||
902 event
->event
== IB_EVENT_PKEY_CHANGE
||
903 event
->event
== IB_EVENT_SM_CHANGE
||
904 event
->event
== IB_EVENT_CLIENT_REREGISTER
) {
906 struct ib_sa_device
*sa_dev
=
907 container_of(handler
, typeof(*sa_dev
), event_handler
);
908 struct ib_sa_port
*port
=
909 &sa_dev
->port
[event
->element
.port_num
- sa_dev
->start_port
];
911 if (!rdma_cap_ib_sa(handler
->device
, port
->port_num
))
914 spin_lock_irqsave(&port
->ah_lock
, flags
);
916 kref_put(&port
->sm_ah
->ref
, free_sm_ah
);
918 spin_unlock_irqrestore(&port
->ah_lock
, flags
);
920 queue_work(ib_wq
, &sa_dev
->port
[event
->element
.port_num
-
921 sa_dev
->start_port
].update_task
);
925 void ib_sa_register_client(struct ib_sa_client
*client
)
927 atomic_set(&client
->users
, 1);
928 init_completion(&client
->comp
);
930 EXPORT_SYMBOL(ib_sa_register_client
);
932 void ib_sa_unregister_client(struct ib_sa_client
*client
)
934 ib_sa_client_put(client
);
935 wait_for_completion(&client
->comp
);
937 EXPORT_SYMBOL(ib_sa_unregister_client
);
940 * ib_sa_cancel_query - try to cancel an SA query
941 * @id:ID of query to cancel
942 * @query:query pointer to cancel
944 * Try to cancel an SA query. If the id and query don't match up or
945 * the query has already completed, nothing is done. Otherwise the
946 * query is canceled and will complete with a status of -EINTR.
948 void ib_sa_cancel_query(int id
, struct ib_sa_query
*query
)
951 struct ib_mad_agent
*agent
;
952 struct ib_mad_send_buf
*mad_buf
;
954 spin_lock_irqsave(&idr_lock
, flags
);
955 if (idr_find(&query_idr
, id
) != query
) {
956 spin_unlock_irqrestore(&idr_lock
, flags
);
959 agent
= query
->port
->agent
;
960 mad_buf
= query
->mad_buf
;
961 spin_unlock_irqrestore(&idr_lock
, flags
);
964 * If the query is still on the netlink request list, schedule
965 * it to be cancelled by the timeout routine. Otherwise, it has been
966 * sent to the MAD layer and has to be cancelled from there.
968 if (!ib_nl_cancel_request(query
))
969 ib_cancel_mad(agent
, mad_buf
);
971 EXPORT_SYMBOL(ib_sa_cancel_query
);
973 static u8
get_src_path_mask(struct ib_device
*device
, u8 port_num
)
975 struct ib_sa_device
*sa_dev
;
976 struct ib_sa_port
*port
;
980 sa_dev
= ib_get_client_data(device
, &sa_client
);
984 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
985 spin_lock_irqsave(&port
->ah_lock
, flags
);
986 src_path_mask
= port
->sm_ah
? port
->sm_ah
->src_path_mask
: 0x7f;
987 spin_unlock_irqrestore(&port
->ah_lock
, flags
);
989 return src_path_mask
;
992 int ib_init_ah_from_path(struct ib_device
*device
, u8 port_num
,
993 struct ib_sa_path_rec
*rec
, struct ib_ah_attr
*ah_attr
)
999 memset(ah_attr
, 0, sizeof *ah_attr
);
1000 ah_attr
->dlid
= be16_to_cpu(rec
->dlid
);
1001 ah_attr
->sl
= rec
->sl
;
1002 ah_attr
->src_path_bits
= be16_to_cpu(rec
->slid
) &
1003 get_src_path_mask(device
, port_num
);
1004 ah_attr
->port_num
= port_num
;
1005 ah_attr
->static_rate
= rec
->rate
;
1007 force_grh
= rdma_cap_eth_ah(device
, port_num
);
1009 if (rec
->hop_limit
> 1 || force_grh
) {
1010 ah_attr
->ah_flags
= IB_AH_GRH
;
1011 ah_attr
->grh
.dgid
= rec
->dgid
;
1013 ret
= ib_find_cached_gid(device
, &rec
->sgid
, &port_num
,
1018 ah_attr
->grh
.sgid_index
= gid_index
;
1019 ah_attr
->grh
.flow_label
= be32_to_cpu(rec
->flow_label
);
1020 ah_attr
->grh
.hop_limit
= rec
->hop_limit
;
1021 ah_attr
->grh
.traffic_class
= rec
->traffic_class
;
1024 memcpy(ah_attr
->dmac
, rec
->dmac
, ETH_ALEN
);
1025 ah_attr
->vlan_id
= rec
->vlan_id
;
1027 ah_attr
->vlan_id
= 0xffff;
1032 EXPORT_SYMBOL(ib_init_ah_from_path
);
1034 static int alloc_mad(struct ib_sa_query
*query
, gfp_t gfp_mask
)
1036 unsigned long flags
;
1038 spin_lock_irqsave(&query
->port
->ah_lock
, flags
);
1039 if (!query
->port
->sm_ah
) {
1040 spin_unlock_irqrestore(&query
->port
->ah_lock
, flags
);
1043 kref_get(&query
->port
->sm_ah
->ref
);
1044 query
->sm_ah
= query
->port
->sm_ah
;
1045 spin_unlock_irqrestore(&query
->port
->ah_lock
, flags
);
1047 query
->mad_buf
= ib_create_send_mad(query
->port
->agent
, 1,
1048 query
->sm_ah
->pkey_index
,
1049 0, IB_MGMT_SA_HDR
, IB_MGMT_SA_DATA
,
1051 IB_MGMT_BASE_VERSION
);
1052 if (IS_ERR(query
->mad_buf
)) {
1053 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
1057 query
->mad_buf
->ah
= query
->sm_ah
->ah
;
1062 static void free_mad(struct ib_sa_query
*query
)
1064 ib_free_send_mad(query
->mad_buf
);
1065 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
1068 static void init_mad(struct ib_sa_mad
*mad
, struct ib_mad_agent
*agent
)
1070 unsigned long flags
;
1072 memset(mad
, 0, sizeof *mad
);
1074 mad
->mad_hdr
.base_version
= IB_MGMT_BASE_VERSION
;
1075 mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
1076 mad
->mad_hdr
.class_version
= IB_SA_CLASS_VERSION
;
1078 spin_lock_irqsave(&tid_lock
, flags
);
1080 cpu_to_be64(((u64
) agent
->hi_tid
) << 32 | tid
++);
1081 spin_unlock_irqrestore(&tid_lock
, flags
);
1084 static int send_mad(struct ib_sa_query
*query
, int timeout_ms
, gfp_t gfp_mask
)
1086 bool preload
= gfpflags_allow_blocking(gfp_mask
);
1087 unsigned long flags
;
1091 idr_preload(gfp_mask
);
1092 spin_lock_irqsave(&idr_lock
, flags
);
1094 id
= idr_alloc(&query_idr
, query
, 0, 0, GFP_NOWAIT
);
1096 spin_unlock_irqrestore(&idr_lock
, flags
);
1102 query
->mad_buf
->timeout_ms
= timeout_ms
;
1103 query
->mad_buf
->context
[0] = query
;
1106 if (query
->flags
& IB_SA_ENABLE_LOCAL_SERVICE
) {
1107 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS
)) {
1108 if (!ib_nl_make_request(query
))
1111 ib_sa_disable_local_svc(query
);
1114 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
1116 spin_lock_irqsave(&idr_lock
, flags
);
1117 idr_remove(&query_idr
, id
);
1118 spin_unlock_irqrestore(&idr_lock
, flags
);
1122 * It's not safe to dereference query any more, because the
1123 * send may already have completed and freed the query in
1126 return ret
? ret
: id
;
1129 void ib_sa_unpack_path(void *attribute
, struct ib_sa_path_rec
*rec
)
1131 ib_unpack(path_rec_table
, ARRAY_SIZE(path_rec_table
), attribute
, rec
);
1133 EXPORT_SYMBOL(ib_sa_unpack_path
);
1135 void ib_sa_pack_path(struct ib_sa_path_rec
*rec
, void *attribute
)
1137 ib_pack(path_rec_table
, ARRAY_SIZE(path_rec_table
), rec
, attribute
);
1139 EXPORT_SYMBOL(ib_sa_pack_path
);
1141 static void ib_sa_path_rec_callback(struct ib_sa_query
*sa_query
,
1143 struct ib_sa_mad
*mad
)
1145 struct ib_sa_path_query
*query
=
1146 container_of(sa_query
, struct ib_sa_path_query
, sa_query
);
1149 struct ib_sa_path_rec rec
;
1151 ib_unpack(path_rec_table
, ARRAY_SIZE(path_rec_table
),
1153 rec
.vlan_id
= 0xffff;
1154 memset(rec
.dmac
, 0, ETH_ALEN
);
1155 memset(rec
.smac
, 0, ETH_ALEN
);
1156 query
->callback(status
, &rec
, query
->context
);
1158 query
->callback(status
, NULL
, query
->context
);
1161 static void ib_sa_path_rec_release(struct ib_sa_query
*sa_query
)
1163 kfree(container_of(sa_query
, struct ib_sa_path_query
, sa_query
));
1167 * ib_sa_path_rec_get - Start a Path get query
1169 * @device:device to send query on
1170 * @port_num: port number to send query on
1171 * @rec:Path Record to send in query
1172 * @comp_mask:component mask to send in query
1173 * @timeout_ms:time to wait for response
1174 * @gfp_mask:GFP mask to use for internal allocations
1175 * @callback:function called when query completes, times out or is
1177 * @context:opaque user context passed to callback
1178 * @sa_query:query context, used to cancel query
1180 * Send a Path Record Get query to the SA to look up a path. The
1181 * callback function will be called when the query completes (or
1182 * fails); status is 0 for a successful response, -EINTR if the query
1183 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1184 * occurred sending the query. The resp parameter of the callback is
1185 * only valid if status is 0.
1187 * If the return value of ib_sa_path_rec_get() is negative, it is an
1188 * error code. Otherwise it is a query ID that can be used to cancel
1191 int ib_sa_path_rec_get(struct ib_sa_client
*client
,
1192 struct ib_device
*device
, u8 port_num
,
1193 struct ib_sa_path_rec
*rec
,
1194 ib_sa_comp_mask comp_mask
,
1195 int timeout_ms
, gfp_t gfp_mask
,
1196 void (*callback
)(int status
,
1197 struct ib_sa_path_rec
*resp
,
1200 struct ib_sa_query
**sa_query
)
1202 struct ib_sa_path_query
*query
;
1203 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1204 struct ib_sa_port
*port
;
1205 struct ib_mad_agent
*agent
;
1206 struct ib_sa_mad
*mad
;
1212 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1213 agent
= port
->agent
;
1215 query
= kzalloc(sizeof(*query
), gfp_mask
);
1219 query
->sa_query
.port
= port
;
1220 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1224 ib_sa_client_get(client
);
1225 query
->sa_query
.client
= client
;
1226 query
->callback
= callback
;
1227 query
->context
= context
;
1229 mad
= query
->sa_query
.mad_buf
->mad
;
1230 init_mad(mad
, agent
);
1232 query
->sa_query
.callback
= callback
? ib_sa_path_rec_callback
: NULL
;
1233 query
->sa_query
.release
= ib_sa_path_rec_release
;
1234 mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
1235 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_PATH_REC
);
1236 mad
->sa_hdr
.comp_mask
= comp_mask
;
1238 ib_pack(path_rec_table
, ARRAY_SIZE(path_rec_table
), rec
, mad
->data
);
1240 *sa_query
= &query
->sa_query
;
1242 query
->sa_query
.flags
|= IB_SA_ENABLE_LOCAL_SERVICE
;
1243 query
->sa_query
.mad_buf
->context
[1] = rec
;
1245 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1253 ib_sa_client_put(query
->sa_query
.client
);
1254 free_mad(&query
->sa_query
);
1260 EXPORT_SYMBOL(ib_sa_path_rec_get
);
1262 static void ib_sa_service_rec_callback(struct ib_sa_query
*sa_query
,
1264 struct ib_sa_mad
*mad
)
1266 struct ib_sa_service_query
*query
=
1267 container_of(sa_query
, struct ib_sa_service_query
, sa_query
);
1270 struct ib_sa_service_rec rec
;
1272 ib_unpack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
1274 query
->callback(status
, &rec
, query
->context
);
1276 query
->callback(status
, NULL
, query
->context
);
1279 static void ib_sa_service_rec_release(struct ib_sa_query
*sa_query
)
1281 kfree(container_of(sa_query
, struct ib_sa_service_query
, sa_query
));
1285 * ib_sa_service_rec_query - Start Service Record operation
1287 * @device:device to send request on
1288 * @port_num: port number to send request on
1289 * @method:SA method - should be get, set, or delete
1290 * @rec:Service Record to send in request
1291 * @comp_mask:component mask to send in request
1292 * @timeout_ms:time to wait for response
1293 * @gfp_mask:GFP mask to use for internal allocations
1294 * @callback:function called when request completes, times out or is
1296 * @context:opaque user context passed to callback
1297 * @sa_query:request context, used to cancel request
1299 * Send a Service Record set/get/delete to the SA to register,
1300 * unregister or query a service record.
1301 * The callback function will be called when the request completes (or
1302 * fails); status is 0 for a successful response, -EINTR if the query
1303 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1304 * occurred sending the query. The resp parameter of the callback is
1305 * only valid if status is 0.
1307 * If the return value of ib_sa_service_rec_query() is negative, it is an
1308 * error code. Otherwise it is a request ID that can be used to cancel
1311 int ib_sa_service_rec_query(struct ib_sa_client
*client
,
1312 struct ib_device
*device
, u8 port_num
, u8 method
,
1313 struct ib_sa_service_rec
*rec
,
1314 ib_sa_comp_mask comp_mask
,
1315 int timeout_ms
, gfp_t gfp_mask
,
1316 void (*callback
)(int status
,
1317 struct ib_sa_service_rec
*resp
,
1320 struct ib_sa_query
**sa_query
)
1322 struct ib_sa_service_query
*query
;
1323 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1324 struct ib_sa_port
*port
;
1325 struct ib_mad_agent
*agent
;
1326 struct ib_sa_mad
*mad
;
1332 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1333 agent
= port
->agent
;
1335 if (method
!= IB_MGMT_METHOD_GET
&&
1336 method
!= IB_MGMT_METHOD_SET
&&
1337 method
!= IB_SA_METHOD_DELETE
)
1340 query
= kzalloc(sizeof(*query
), gfp_mask
);
1344 query
->sa_query
.port
= port
;
1345 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1349 ib_sa_client_get(client
);
1350 query
->sa_query
.client
= client
;
1351 query
->callback
= callback
;
1352 query
->context
= context
;
1354 mad
= query
->sa_query
.mad_buf
->mad
;
1355 init_mad(mad
, agent
);
1357 query
->sa_query
.callback
= callback
? ib_sa_service_rec_callback
: NULL
;
1358 query
->sa_query
.release
= ib_sa_service_rec_release
;
1359 mad
->mad_hdr
.method
= method
;
1360 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_SERVICE_REC
);
1361 mad
->sa_hdr
.comp_mask
= comp_mask
;
1363 ib_pack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
1366 *sa_query
= &query
->sa_query
;
1368 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1376 ib_sa_client_put(query
->sa_query
.client
);
1377 free_mad(&query
->sa_query
);
1383 EXPORT_SYMBOL(ib_sa_service_rec_query
);
1385 static void ib_sa_mcmember_rec_callback(struct ib_sa_query
*sa_query
,
1387 struct ib_sa_mad
*mad
)
1389 struct ib_sa_mcmember_query
*query
=
1390 container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
);
1393 struct ib_sa_mcmember_rec rec
;
1395 ib_unpack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
1397 query
->callback(status
, &rec
, query
->context
);
1399 query
->callback(status
, NULL
, query
->context
);
1402 static void ib_sa_mcmember_rec_release(struct ib_sa_query
*sa_query
)
1404 kfree(container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
));
1407 int ib_sa_mcmember_rec_query(struct ib_sa_client
*client
,
1408 struct ib_device
*device
, u8 port_num
,
1410 struct ib_sa_mcmember_rec
*rec
,
1411 ib_sa_comp_mask comp_mask
,
1412 int timeout_ms
, gfp_t gfp_mask
,
1413 void (*callback
)(int status
,
1414 struct ib_sa_mcmember_rec
*resp
,
1417 struct ib_sa_query
**sa_query
)
1419 struct ib_sa_mcmember_query
*query
;
1420 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1421 struct ib_sa_port
*port
;
1422 struct ib_mad_agent
*agent
;
1423 struct ib_sa_mad
*mad
;
1429 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1430 agent
= port
->agent
;
1432 query
= kzalloc(sizeof(*query
), gfp_mask
);
1436 query
->sa_query
.port
= port
;
1437 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1441 ib_sa_client_get(client
);
1442 query
->sa_query
.client
= client
;
1443 query
->callback
= callback
;
1444 query
->context
= context
;
1446 mad
= query
->sa_query
.mad_buf
->mad
;
1447 init_mad(mad
, agent
);
1449 query
->sa_query
.callback
= callback
? ib_sa_mcmember_rec_callback
: NULL
;
1450 query
->sa_query
.release
= ib_sa_mcmember_rec_release
;
1451 mad
->mad_hdr
.method
= method
;
1452 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
1453 mad
->sa_hdr
.comp_mask
= comp_mask
;
1455 ib_pack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
1458 *sa_query
= &query
->sa_query
;
1460 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1468 ib_sa_client_put(query
->sa_query
.client
);
1469 free_mad(&query
->sa_query
);
1476 /* Support GuidInfoRecord */
1477 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query
*sa_query
,
1479 struct ib_sa_mad
*mad
)
1481 struct ib_sa_guidinfo_query
*query
=
1482 container_of(sa_query
, struct ib_sa_guidinfo_query
, sa_query
);
1485 struct ib_sa_guidinfo_rec rec
;
1487 ib_unpack(guidinfo_rec_table
, ARRAY_SIZE(guidinfo_rec_table
),
1489 query
->callback(status
, &rec
, query
->context
);
1491 query
->callback(status
, NULL
, query
->context
);
1494 static void ib_sa_guidinfo_rec_release(struct ib_sa_query
*sa_query
)
1496 kfree(container_of(sa_query
, struct ib_sa_guidinfo_query
, sa_query
));
1499 int ib_sa_guid_info_rec_query(struct ib_sa_client
*client
,
1500 struct ib_device
*device
, u8 port_num
,
1501 struct ib_sa_guidinfo_rec
*rec
,
1502 ib_sa_comp_mask comp_mask
, u8 method
,
1503 int timeout_ms
, gfp_t gfp_mask
,
1504 void (*callback
)(int status
,
1505 struct ib_sa_guidinfo_rec
*resp
,
1508 struct ib_sa_query
**sa_query
)
1510 struct ib_sa_guidinfo_query
*query
;
1511 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
1512 struct ib_sa_port
*port
;
1513 struct ib_mad_agent
*agent
;
1514 struct ib_sa_mad
*mad
;
1520 if (method
!= IB_MGMT_METHOD_GET
&&
1521 method
!= IB_MGMT_METHOD_SET
&&
1522 method
!= IB_SA_METHOD_DELETE
) {
1526 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
1527 agent
= port
->agent
;
1529 query
= kzalloc(sizeof(*query
), gfp_mask
);
1533 query
->sa_query
.port
= port
;
1534 ret
= alloc_mad(&query
->sa_query
, gfp_mask
);
1538 ib_sa_client_get(client
);
1539 query
->sa_query
.client
= client
;
1540 query
->callback
= callback
;
1541 query
->context
= context
;
1543 mad
= query
->sa_query
.mad_buf
->mad
;
1544 init_mad(mad
, agent
);
1546 query
->sa_query
.callback
= callback
? ib_sa_guidinfo_rec_callback
: NULL
;
1547 query
->sa_query
.release
= ib_sa_guidinfo_rec_release
;
1549 mad
->mad_hdr
.method
= method
;
1550 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC
);
1551 mad
->sa_hdr
.comp_mask
= comp_mask
;
1553 ib_pack(guidinfo_rec_table
, ARRAY_SIZE(guidinfo_rec_table
), rec
,
1556 *sa_query
= &query
->sa_query
;
1558 ret
= send_mad(&query
->sa_query
, timeout_ms
, gfp_mask
);
1566 ib_sa_client_put(query
->sa_query
.client
);
1567 free_mad(&query
->sa_query
);
1573 EXPORT_SYMBOL(ib_sa_guid_info_rec_query
);
1575 static void send_handler(struct ib_mad_agent
*agent
,
1576 struct ib_mad_send_wc
*mad_send_wc
)
1578 struct ib_sa_query
*query
= mad_send_wc
->send_buf
->context
[0];
1579 unsigned long flags
;
1581 if (query
->callback
)
1582 switch (mad_send_wc
->status
) {
1584 /* No callback -- already got recv */
1586 case IB_WC_RESP_TIMEOUT_ERR
:
1587 query
->callback(query
, -ETIMEDOUT
, NULL
);
1589 case IB_WC_WR_FLUSH_ERR
:
1590 query
->callback(query
, -EINTR
, NULL
);
1593 query
->callback(query
, -EIO
, NULL
);
1597 spin_lock_irqsave(&idr_lock
, flags
);
1598 idr_remove(&query_idr
, query
->id
);
1599 spin_unlock_irqrestore(&idr_lock
, flags
);
1602 ib_sa_client_put(query
->client
);
1603 query
->release(query
);
1606 static void recv_handler(struct ib_mad_agent
*mad_agent
,
1607 struct ib_mad_recv_wc
*mad_recv_wc
)
1609 struct ib_sa_query
*query
;
1610 struct ib_mad_send_buf
*mad_buf
;
1612 mad_buf
= (void *) (unsigned long) mad_recv_wc
->wc
->wr_id
;
1613 query
= mad_buf
->context
[0];
1615 if (query
->callback
) {
1616 if (mad_recv_wc
->wc
->status
== IB_WC_SUCCESS
)
1617 query
->callback(query
,
1618 mad_recv_wc
->recv_buf
.mad
->mad_hdr
.status
?
1620 (struct ib_sa_mad
*) mad_recv_wc
->recv_buf
.mad
);
1622 query
->callback(query
, -EIO
, NULL
);
1625 ib_free_recv_mad(mad_recv_wc
);
1628 static void ib_sa_add_one(struct ib_device
*device
)
1630 struct ib_sa_device
*sa_dev
;
1634 s
= rdma_start_port(device
);
1635 e
= rdma_end_port(device
);
1637 sa_dev
= kzalloc(sizeof *sa_dev
+
1638 (e
- s
+ 1) * sizeof (struct ib_sa_port
),
1643 sa_dev
->start_port
= s
;
1644 sa_dev
->end_port
= e
;
1646 for (i
= 0; i
<= e
- s
; ++i
) {
1647 spin_lock_init(&sa_dev
->port
[i
].ah_lock
);
1648 if (!rdma_cap_ib_sa(device
, i
+ 1))
1651 sa_dev
->port
[i
].sm_ah
= NULL
;
1652 sa_dev
->port
[i
].port_num
= i
+ s
;
1654 sa_dev
->port
[i
].agent
=
1655 ib_register_mad_agent(device
, i
+ s
, IB_QPT_GSI
,
1656 NULL
, 0, send_handler
,
1657 recv_handler
, sa_dev
, 0);
1658 if (IS_ERR(sa_dev
->port
[i
].agent
))
1661 INIT_WORK(&sa_dev
->port
[i
].update_task
, update_sm_ah
);
1669 ib_set_client_data(device
, &sa_client
, sa_dev
);
1672 * We register our event handler after everything is set up,
1673 * and then update our cached info after the event handler is
1674 * registered to avoid any problems if a port changes state
1675 * during our initialization.
1678 INIT_IB_EVENT_HANDLER(&sa_dev
->event_handler
, device
, ib_sa_event
);
1679 if (ib_register_event_handler(&sa_dev
->event_handler
))
1682 for (i
= 0; i
<= e
- s
; ++i
) {
1683 if (rdma_cap_ib_sa(device
, i
+ 1))
1684 update_sm_ah(&sa_dev
->port
[i
].update_task
);
1691 if (rdma_cap_ib_sa(device
, i
+ 1))
1692 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
1699 static void ib_sa_remove_one(struct ib_device
*device
, void *client_data
)
1701 struct ib_sa_device
*sa_dev
= client_data
;
1707 ib_unregister_event_handler(&sa_dev
->event_handler
);
1709 flush_workqueue(ib_wq
);
1711 for (i
= 0; i
<= sa_dev
->end_port
- sa_dev
->start_port
; ++i
) {
1712 if (rdma_cap_ib_sa(device
, i
+ 1)) {
1713 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
1714 if (sa_dev
->port
[i
].sm_ah
)
1715 kref_put(&sa_dev
->port
[i
].sm_ah
->ref
, free_sm_ah
);
1723 static int __init
ib_sa_init(void)
1727 get_random_bytes(&tid
, sizeof tid
);
1729 atomic_set(&ib_nl_sa_request_seq
, 0);
1731 ret
= ib_register_client(&sa_client
);
1733 printk(KERN_ERR
"Couldn't register ib_sa client\n");
1739 printk(KERN_ERR
"Couldn't initialize multicast handling\n");
1743 ib_nl_wq
= create_singlethread_workqueue("ib_nl_sa_wq");
1749 if (ibnl_add_client(RDMA_NL_LS
, RDMA_NL_LS_NUM_OPS
,
1751 pr_err("Failed to add netlink callback\n");
1755 INIT_DELAYED_WORK(&ib_nl_timed_work
, ib_nl_request_timeout
);
1759 destroy_workqueue(ib_nl_wq
);
1763 ib_unregister_client(&sa_client
);
1768 static void __exit
ib_sa_cleanup(void)
1770 ibnl_remove_client(RDMA_NL_LS
);
1771 cancel_delayed_work(&ib_nl_timed_work
);
1772 flush_workqueue(ib_nl_wq
);
1773 destroy_workqueue(ib_nl_wq
);
1775 ib_unregister_client(&sa_client
);
1776 idr_destroy(&query_idr
);
1779 module_init(ib_sa_init
);
1780 module_exit(ib_sa_cleanup
);