2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/err.h>
39 #include <linux/random.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kref.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include <rdma/ib_pack.h>
49 #include <rdma/ib_sa.h>
50 #include <rdma/ib_cache.h>
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("InfiniBand subnet administration query support");
54 MODULE_LICENSE("Dual BSD/GPL");
62 struct ib_mad_agent
*agent
;
63 struct ib_sa_sm_ah
*sm_ah
;
64 struct work_struct update_task
;
70 int start_port
, end_port
;
71 struct ib_event_handler event_handler
;
72 struct ib_sa_port port
[0];
76 void (*callback
)(struct ib_sa_query
*, int, struct ib_sa_mad
*);
77 void (*release
)(struct ib_sa_query
*);
78 struct ib_sa_port
*port
;
79 struct ib_mad_send_buf
*mad_buf
;
80 struct ib_sa_sm_ah
*sm_ah
;
84 struct ib_sa_service_query
{
85 void (*callback
)(int, struct ib_sa_service_rec
*, void *);
87 struct ib_sa_query sa_query
;
90 struct ib_sa_path_query
{
91 void (*callback
)(int, struct ib_sa_path_rec
*, void *);
93 struct ib_sa_query sa_query
;
96 struct ib_sa_mcmember_query
{
97 void (*callback
)(int, struct ib_sa_mcmember_rec
*, void *);
99 struct ib_sa_query sa_query
;
102 static void ib_sa_add_one(struct ib_device
*device
);
103 static void ib_sa_remove_one(struct ib_device
*device
);
105 static struct ib_client sa_client
= {
107 .add
= ib_sa_add_one
,
108 .remove
= ib_sa_remove_one
111 static spinlock_t idr_lock
;
112 static DEFINE_IDR(query_idr
);
114 static spinlock_t tid_lock
;
117 #define PATH_REC_FIELD(field) \
118 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
119 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
120 .field_name = "sa_path_rec:" #field
122 static const struct ib_field path_rec_table
[] = {
131 { PATH_REC_FIELD(dgid
),
135 { PATH_REC_FIELD(sgid
),
139 { PATH_REC_FIELD(dlid
),
143 { PATH_REC_FIELD(slid
),
147 { PATH_REC_FIELD(raw_traffic
),
155 { PATH_REC_FIELD(flow_label
),
159 { PATH_REC_FIELD(hop_limit
),
163 { PATH_REC_FIELD(traffic_class
),
167 { PATH_REC_FIELD(reversible
),
171 { PATH_REC_FIELD(numb_path
),
175 { PATH_REC_FIELD(pkey
),
183 { PATH_REC_FIELD(sl
),
187 { PATH_REC_FIELD(mtu_selector
),
191 { PATH_REC_FIELD(mtu
),
195 { PATH_REC_FIELD(rate_selector
),
199 { PATH_REC_FIELD(rate
),
203 { PATH_REC_FIELD(packet_life_time_selector
),
207 { PATH_REC_FIELD(packet_life_time
),
211 { PATH_REC_FIELD(preference
),
221 #define MCMEMBER_REC_FIELD(field) \
222 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
223 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
224 .field_name = "sa_mcmember_rec:" #field
226 static const struct ib_field mcmember_rec_table
[] = {
227 { MCMEMBER_REC_FIELD(mgid
),
231 { MCMEMBER_REC_FIELD(port_gid
),
235 { MCMEMBER_REC_FIELD(qkey
),
239 { MCMEMBER_REC_FIELD(mlid
),
243 { MCMEMBER_REC_FIELD(mtu_selector
),
247 { MCMEMBER_REC_FIELD(mtu
),
251 { MCMEMBER_REC_FIELD(traffic_class
),
255 { MCMEMBER_REC_FIELD(pkey
),
259 { MCMEMBER_REC_FIELD(rate_selector
),
263 { MCMEMBER_REC_FIELD(rate
),
267 { MCMEMBER_REC_FIELD(packet_life_time_selector
),
271 { MCMEMBER_REC_FIELD(packet_life_time
),
275 { MCMEMBER_REC_FIELD(sl
),
279 { MCMEMBER_REC_FIELD(flow_label
),
283 { MCMEMBER_REC_FIELD(hop_limit
),
287 { MCMEMBER_REC_FIELD(scope
),
291 { MCMEMBER_REC_FIELD(join_state
),
295 { MCMEMBER_REC_FIELD(proxy_join
),
305 #define SERVICE_REC_FIELD(field) \
306 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
307 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
308 .field_name = "sa_service_rec:" #field
310 static const struct ib_field service_rec_table
[] = {
311 { SERVICE_REC_FIELD(id
),
315 { SERVICE_REC_FIELD(gid
),
319 { SERVICE_REC_FIELD(pkey
),
323 { SERVICE_REC_FIELD(lease
),
327 { SERVICE_REC_FIELD(key
),
331 { SERVICE_REC_FIELD(name
),
335 { SERVICE_REC_FIELD(data8
),
339 { SERVICE_REC_FIELD(data16
),
343 { SERVICE_REC_FIELD(data32
),
347 { SERVICE_REC_FIELD(data64
),
353 static void free_sm_ah(struct kref
*kref
)
355 struct ib_sa_sm_ah
*sm_ah
= container_of(kref
, struct ib_sa_sm_ah
, ref
);
357 ib_destroy_ah(sm_ah
->ah
);
361 static void update_sm_ah(void *port_ptr
)
363 struct ib_sa_port
*port
= port_ptr
;
364 struct ib_sa_sm_ah
*new_ah
, *old_ah
;
365 struct ib_port_attr port_attr
;
366 struct ib_ah_attr ah_attr
;
368 if (ib_query_port(port
->agent
->device
, port
->port_num
, &port_attr
)) {
369 printk(KERN_WARNING
"Couldn't query port\n");
373 new_ah
= kmalloc(sizeof *new_ah
, GFP_KERNEL
);
375 printk(KERN_WARNING
"Couldn't allocate new SM AH\n");
379 kref_init(&new_ah
->ref
);
381 memset(&ah_attr
, 0, sizeof ah_attr
);
382 ah_attr
.dlid
= port_attr
.sm_lid
;
383 ah_attr
.sl
= port_attr
.sm_sl
;
384 ah_attr
.port_num
= port
->port_num
;
386 new_ah
->ah
= ib_create_ah(port
->agent
->qp
->pd
, &ah_attr
);
387 if (IS_ERR(new_ah
->ah
)) {
388 printk(KERN_WARNING
"Couldn't create new SM AH\n");
393 spin_lock_irq(&port
->ah_lock
);
394 old_ah
= port
->sm_ah
;
395 port
->sm_ah
= new_ah
;
396 spin_unlock_irq(&port
->ah_lock
);
399 kref_put(&old_ah
->ref
, free_sm_ah
);
402 static void ib_sa_event(struct ib_event_handler
*handler
, struct ib_event
*event
)
404 if (event
->event
== IB_EVENT_PORT_ERR
||
405 event
->event
== IB_EVENT_PORT_ACTIVE
||
406 event
->event
== IB_EVENT_LID_CHANGE
||
407 event
->event
== IB_EVENT_PKEY_CHANGE
||
408 event
->event
== IB_EVENT_SM_CHANGE
) {
409 struct ib_sa_device
*sa_dev
;
410 sa_dev
= container_of(handler
, typeof(*sa_dev
), event_handler
);
412 schedule_work(&sa_dev
->port
[event
->element
.port_num
-
413 sa_dev
->start_port
].update_task
);
418 * ib_sa_cancel_query - try to cancel an SA query
419 * @id:ID of query to cancel
420 * @query:query pointer to cancel
422 * Try to cancel an SA query. If the id and query don't match up or
423 * the query has already completed, nothing is done. Otherwise the
424 * query is canceled and will complete with a status of -EINTR.
426 void ib_sa_cancel_query(int id
, struct ib_sa_query
*query
)
429 struct ib_mad_agent
*agent
;
430 struct ib_mad_send_buf
*mad_buf
;
432 spin_lock_irqsave(&idr_lock
, flags
);
433 if (idr_find(&query_idr
, id
) != query
) {
434 spin_unlock_irqrestore(&idr_lock
, flags
);
437 agent
= query
->port
->agent
;
438 mad_buf
= query
->mad_buf
;
439 spin_unlock_irqrestore(&idr_lock
, flags
);
441 ib_cancel_mad(agent
, mad_buf
);
443 EXPORT_SYMBOL(ib_sa_cancel_query
);
445 int ib_init_ah_from_path(struct ib_device
*device
, u8 port_num
,
446 struct ib_sa_path_rec
*rec
, struct ib_ah_attr
*ah_attr
)
451 memset(ah_attr
, 0, sizeof *ah_attr
);
452 ah_attr
->dlid
= be16_to_cpu(rec
->dlid
);
453 ah_attr
->sl
= rec
->sl
;
454 ah_attr
->src_path_bits
= be16_to_cpu(rec
->slid
) & 0x7f;
455 ah_attr
->port_num
= port_num
;
457 if (rec
->hop_limit
> 1) {
458 ah_attr
->ah_flags
= IB_AH_GRH
;
459 ah_attr
->grh
.dgid
= rec
->dgid
;
461 ret
= ib_find_cached_gid(device
, &rec
->sgid
, &port_num
,
466 ah_attr
->grh
.sgid_index
= gid_index
;
467 ah_attr
->grh
.flow_label
= be32_to_cpu(rec
->flow_label
);
468 ah_attr
->grh
.hop_limit
= rec
->hop_limit
;
469 ah_attr
->grh
.traffic_class
= rec
->traffic_class
;
473 EXPORT_SYMBOL(ib_init_ah_from_path
);
475 static void init_mad(struct ib_sa_mad
*mad
, struct ib_mad_agent
*agent
)
479 memset(mad
, 0, sizeof *mad
);
481 mad
->mad_hdr
.base_version
= IB_MGMT_BASE_VERSION
;
482 mad
->mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
483 mad
->mad_hdr
.class_version
= IB_SA_CLASS_VERSION
;
485 spin_lock_irqsave(&tid_lock
, flags
);
487 cpu_to_be64(((u64
) agent
->hi_tid
) << 32 | tid
++);
488 spin_unlock_irqrestore(&tid_lock
, flags
);
491 static int send_mad(struct ib_sa_query
*query
, int timeout_ms
)
497 if (!idr_pre_get(&query_idr
, GFP_ATOMIC
))
499 spin_lock_irqsave(&idr_lock
, flags
);
500 ret
= idr_get_new(&query_idr
, query
, &id
);
501 spin_unlock_irqrestore(&idr_lock
, flags
);
507 query
->mad_buf
->timeout_ms
= timeout_ms
;
508 query
->mad_buf
->context
[0] = query
;
511 spin_lock_irqsave(&query
->port
->ah_lock
, flags
);
512 kref_get(&query
->port
->sm_ah
->ref
);
513 query
->sm_ah
= query
->port
->sm_ah
;
514 spin_unlock_irqrestore(&query
->port
->ah_lock
, flags
);
516 query
->mad_buf
->ah
= query
->sm_ah
->ah
;
518 ret
= ib_post_send_mad(query
->mad_buf
, NULL
);
520 spin_lock_irqsave(&idr_lock
, flags
);
521 idr_remove(&query_idr
, id
);
522 spin_unlock_irqrestore(&idr_lock
, flags
);
524 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
528 * It's not safe to dereference query any more, because the
529 * send may already have completed and freed the query in
532 return ret
? ret
: id
;
535 static void ib_sa_path_rec_callback(struct ib_sa_query
*sa_query
,
537 struct ib_sa_mad
*mad
)
539 struct ib_sa_path_query
*query
=
540 container_of(sa_query
, struct ib_sa_path_query
, sa_query
);
543 struct ib_sa_path_rec rec
;
545 ib_unpack(path_rec_table
, ARRAY_SIZE(path_rec_table
),
547 query
->callback(status
, &rec
, query
->context
);
549 query
->callback(status
, NULL
, query
->context
);
552 static void ib_sa_path_rec_release(struct ib_sa_query
*sa_query
)
554 kfree(container_of(sa_query
, struct ib_sa_path_query
, sa_query
));
558 * ib_sa_path_rec_get - Start a Path get query
559 * @device:device to send query on
560 * @port_num: port number to send query on
561 * @rec:Path Record to send in query
562 * @comp_mask:component mask to send in query
563 * @timeout_ms:time to wait for response
564 * @gfp_mask:GFP mask to use for internal allocations
565 * @callback:function called when query completes, times out or is
567 * @context:opaque user context passed to callback
568 * @sa_query:query context, used to cancel query
570 * Send a Path Record Get query to the SA to look up a path. The
571 * callback function will be called when the query completes (or
572 * fails); status is 0 for a successful response, -EINTR if the query
573 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
574 * occurred sending the query. The resp parameter of the callback is
575 * only valid if status is 0.
577 * If the return value of ib_sa_path_rec_get() is negative, it is an
578 * error code. Otherwise it is a query ID that can be used to cancel
581 int ib_sa_path_rec_get(struct ib_device
*device
, u8 port_num
,
582 struct ib_sa_path_rec
*rec
,
583 ib_sa_comp_mask comp_mask
,
584 int timeout_ms
, gfp_t gfp_mask
,
585 void (*callback
)(int status
,
586 struct ib_sa_path_rec
*resp
,
589 struct ib_sa_query
**sa_query
)
591 struct ib_sa_path_query
*query
;
592 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
593 struct ib_sa_port
*port
;
594 struct ib_mad_agent
*agent
;
595 struct ib_sa_mad
*mad
;
601 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
604 query
= kmalloc(sizeof *query
, gfp_mask
);
608 query
->sa_query
.mad_buf
= ib_create_send_mad(agent
, 1, 0,
610 IB_MGMT_SA_DATA
, gfp_mask
);
611 if (!query
->sa_query
.mad_buf
) {
616 query
->callback
= callback
;
617 query
->context
= context
;
619 mad
= query
->sa_query
.mad_buf
->mad
;
620 init_mad(mad
, agent
);
622 query
->sa_query
.callback
= callback
? ib_sa_path_rec_callback
: NULL
;
623 query
->sa_query
.release
= ib_sa_path_rec_release
;
624 query
->sa_query
.port
= port
;
625 mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET
;
626 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_PATH_REC
);
627 mad
->sa_hdr
.comp_mask
= comp_mask
;
629 ib_pack(path_rec_table
, ARRAY_SIZE(path_rec_table
), rec
, mad
->data
);
631 *sa_query
= &query
->sa_query
;
633 ret
= send_mad(&query
->sa_query
, timeout_ms
);
641 ib_free_send_mad(query
->sa_query
.mad_buf
);
647 EXPORT_SYMBOL(ib_sa_path_rec_get
);
649 static void ib_sa_service_rec_callback(struct ib_sa_query
*sa_query
,
651 struct ib_sa_mad
*mad
)
653 struct ib_sa_service_query
*query
=
654 container_of(sa_query
, struct ib_sa_service_query
, sa_query
);
657 struct ib_sa_service_rec rec
;
659 ib_unpack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
661 query
->callback(status
, &rec
, query
->context
);
663 query
->callback(status
, NULL
, query
->context
);
666 static void ib_sa_service_rec_release(struct ib_sa_query
*sa_query
)
668 kfree(container_of(sa_query
, struct ib_sa_service_query
, sa_query
));
672 * ib_sa_service_rec_query - Start Service Record operation
673 * @device:device to send request on
674 * @port_num: port number to send request on
675 * @method:SA method - should be get, set, or delete
676 * @rec:Service Record to send in request
677 * @comp_mask:component mask to send in request
678 * @timeout_ms:time to wait for response
679 * @gfp_mask:GFP mask to use for internal allocations
680 * @callback:function called when request completes, times out or is
682 * @context:opaque user context passed to callback
683 * @sa_query:request context, used to cancel request
685 * Send a Service Record set/get/delete to the SA to register,
686 * unregister or query a service record.
687 * The callback function will be called when the request completes (or
688 * fails); status is 0 for a successful response, -EINTR if the query
689 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
690 * occurred sending the query. The resp parameter of the callback is
691 * only valid if status is 0.
693 * If the return value of ib_sa_service_rec_query() is negative, it is an
694 * error code. Otherwise it is a request ID that can be used to cancel
697 int ib_sa_service_rec_query(struct ib_device
*device
, u8 port_num
, u8 method
,
698 struct ib_sa_service_rec
*rec
,
699 ib_sa_comp_mask comp_mask
,
700 int timeout_ms
, gfp_t gfp_mask
,
701 void (*callback
)(int status
,
702 struct ib_sa_service_rec
*resp
,
705 struct ib_sa_query
**sa_query
)
707 struct ib_sa_service_query
*query
;
708 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
709 struct ib_sa_port
*port
;
710 struct ib_mad_agent
*agent
;
711 struct ib_sa_mad
*mad
;
717 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
720 if (method
!= IB_MGMT_METHOD_GET
&&
721 method
!= IB_MGMT_METHOD_SET
&&
722 method
!= IB_SA_METHOD_DELETE
)
725 query
= kmalloc(sizeof *query
, gfp_mask
);
729 query
->sa_query
.mad_buf
= ib_create_send_mad(agent
, 1, 0,
731 IB_MGMT_SA_DATA
, gfp_mask
);
732 if (!query
->sa_query
.mad_buf
) {
737 query
->callback
= callback
;
738 query
->context
= context
;
740 mad
= query
->sa_query
.mad_buf
->mad
;
741 init_mad(mad
, agent
);
743 query
->sa_query
.callback
= callback
? ib_sa_service_rec_callback
: NULL
;
744 query
->sa_query
.release
= ib_sa_service_rec_release
;
745 query
->sa_query
.port
= port
;
746 mad
->mad_hdr
.method
= method
;
747 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_SERVICE_REC
);
748 mad
->sa_hdr
.comp_mask
= comp_mask
;
750 ib_pack(service_rec_table
, ARRAY_SIZE(service_rec_table
),
753 *sa_query
= &query
->sa_query
;
755 ret
= send_mad(&query
->sa_query
, timeout_ms
);
763 ib_free_send_mad(query
->sa_query
.mad_buf
);
769 EXPORT_SYMBOL(ib_sa_service_rec_query
);
771 static void ib_sa_mcmember_rec_callback(struct ib_sa_query
*sa_query
,
773 struct ib_sa_mad
*mad
)
775 struct ib_sa_mcmember_query
*query
=
776 container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
);
779 struct ib_sa_mcmember_rec rec
;
781 ib_unpack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
783 query
->callback(status
, &rec
, query
->context
);
785 query
->callback(status
, NULL
, query
->context
);
788 static void ib_sa_mcmember_rec_release(struct ib_sa_query
*sa_query
)
790 kfree(container_of(sa_query
, struct ib_sa_mcmember_query
, sa_query
));
793 int ib_sa_mcmember_rec_query(struct ib_device
*device
, u8 port_num
,
795 struct ib_sa_mcmember_rec
*rec
,
796 ib_sa_comp_mask comp_mask
,
797 int timeout_ms
, gfp_t gfp_mask
,
798 void (*callback
)(int status
,
799 struct ib_sa_mcmember_rec
*resp
,
802 struct ib_sa_query
**sa_query
)
804 struct ib_sa_mcmember_query
*query
;
805 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
806 struct ib_sa_port
*port
;
807 struct ib_mad_agent
*agent
;
808 struct ib_sa_mad
*mad
;
814 port
= &sa_dev
->port
[port_num
- sa_dev
->start_port
];
817 query
= kmalloc(sizeof *query
, gfp_mask
);
821 query
->sa_query
.mad_buf
= ib_create_send_mad(agent
, 1, 0,
823 IB_MGMT_SA_DATA
, gfp_mask
);
824 if (!query
->sa_query
.mad_buf
) {
829 query
->callback
= callback
;
830 query
->context
= context
;
832 mad
= query
->sa_query
.mad_buf
->mad
;
833 init_mad(mad
, agent
);
835 query
->sa_query
.callback
= callback
? ib_sa_mcmember_rec_callback
: NULL
;
836 query
->sa_query
.release
= ib_sa_mcmember_rec_release
;
837 query
->sa_query
.port
= port
;
838 mad
->mad_hdr
.method
= method
;
839 mad
->mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
840 mad
->sa_hdr
.comp_mask
= comp_mask
;
842 ib_pack(mcmember_rec_table
, ARRAY_SIZE(mcmember_rec_table
),
845 *sa_query
= &query
->sa_query
;
847 ret
= send_mad(&query
->sa_query
, timeout_ms
);
855 ib_free_send_mad(query
->sa_query
.mad_buf
);
861 EXPORT_SYMBOL(ib_sa_mcmember_rec_query
);
863 static void send_handler(struct ib_mad_agent
*agent
,
864 struct ib_mad_send_wc
*mad_send_wc
)
866 struct ib_sa_query
*query
= mad_send_wc
->send_buf
->context
[0];
870 switch (mad_send_wc
->status
) {
872 /* No callback -- already got recv */
874 case IB_WC_RESP_TIMEOUT_ERR
:
875 query
->callback(query
, -ETIMEDOUT
, NULL
);
877 case IB_WC_WR_FLUSH_ERR
:
878 query
->callback(query
, -EINTR
, NULL
);
881 query
->callback(query
, -EIO
, NULL
);
885 spin_lock_irqsave(&idr_lock
, flags
);
886 idr_remove(&query_idr
, query
->id
);
887 spin_unlock_irqrestore(&idr_lock
, flags
);
889 ib_free_send_mad(mad_send_wc
->send_buf
);
890 kref_put(&query
->sm_ah
->ref
, free_sm_ah
);
891 query
->release(query
);
894 static void recv_handler(struct ib_mad_agent
*mad_agent
,
895 struct ib_mad_recv_wc
*mad_recv_wc
)
897 struct ib_sa_query
*query
;
898 struct ib_mad_send_buf
*mad_buf
;
900 mad_buf
= (void *) (unsigned long) mad_recv_wc
->wc
->wr_id
;
901 query
= mad_buf
->context
[0];
903 if (query
->callback
) {
904 if (mad_recv_wc
->wc
->status
== IB_WC_SUCCESS
)
905 query
->callback(query
,
906 mad_recv_wc
->recv_buf
.mad
->mad_hdr
.status
?
908 (struct ib_sa_mad
*) mad_recv_wc
->recv_buf
.mad
);
910 query
->callback(query
, -EIO
, NULL
);
913 ib_free_recv_mad(mad_recv_wc
);
916 static void ib_sa_add_one(struct ib_device
*device
)
918 struct ib_sa_device
*sa_dev
;
921 if (device
->node_type
== IB_NODE_SWITCH
)
925 e
= device
->phys_port_cnt
;
928 sa_dev
= kmalloc(sizeof *sa_dev
+
929 (e
- s
+ 1) * sizeof (struct ib_sa_port
),
934 sa_dev
->start_port
= s
;
935 sa_dev
->end_port
= e
;
937 for (i
= 0; i
<= e
- s
; ++i
) {
938 sa_dev
->port
[i
].sm_ah
= NULL
;
939 sa_dev
->port
[i
].port_num
= i
+ s
;
940 spin_lock_init(&sa_dev
->port
[i
].ah_lock
);
942 sa_dev
->port
[i
].agent
=
943 ib_register_mad_agent(device
, i
+ s
, IB_QPT_GSI
,
944 NULL
, 0, send_handler
,
945 recv_handler
, sa_dev
);
946 if (IS_ERR(sa_dev
->port
[i
].agent
))
949 INIT_WORK(&sa_dev
->port
[i
].update_task
,
950 update_sm_ah
, &sa_dev
->port
[i
]);
953 ib_set_client_data(device
, &sa_client
, sa_dev
);
956 * We register our event handler after everything is set up,
957 * and then update our cached info after the event handler is
958 * registered to avoid any problems if a port changes state
959 * during our initialization.
962 INIT_IB_EVENT_HANDLER(&sa_dev
->event_handler
, device
, ib_sa_event
);
963 if (ib_register_event_handler(&sa_dev
->event_handler
))
966 for (i
= 0; i
<= e
- s
; ++i
)
967 update_sm_ah(&sa_dev
->port
[i
]);
973 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
980 static void ib_sa_remove_one(struct ib_device
*device
)
982 struct ib_sa_device
*sa_dev
= ib_get_client_data(device
, &sa_client
);
988 ib_unregister_event_handler(&sa_dev
->event_handler
);
990 flush_scheduled_work();
992 for (i
= 0; i
<= sa_dev
->end_port
- sa_dev
->start_port
; ++i
) {
993 ib_unregister_mad_agent(sa_dev
->port
[i
].agent
);
994 kref_put(&sa_dev
->port
[i
].sm_ah
->ref
, free_sm_ah
);
1000 static int __init
ib_sa_init(void)
1004 spin_lock_init(&idr_lock
);
1005 spin_lock_init(&tid_lock
);
1007 get_random_bytes(&tid
, sizeof tid
);
1009 ret
= ib_register_client(&sa_client
);
1011 printk(KERN_ERR
"Couldn't register ib_sa client\n");
1016 static void __exit
ib_sa_cleanup(void)
1018 ib_unregister_client(&sa_client
);
1019 idr_destroy(&query_idr
);
1022 module_init(ib_sa_init
);
1023 module_exit(ib_sa_cleanup
);