2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
36 #include <linux/dma-mapping.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
49 kmem_cache_t
*ib_mad_cache
;
51 static struct list_head ib_mad_port_list
;
52 static u32 ib_mad_client_id
= 0;
55 static spinlock_t ib_mad_port_list_lock
;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
60 struct ib_mad_reg_req
*mad_reg_req
);
61 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
62 static struct ib_mad_agent_private
*find_mad_agent(
63 struct ib_mad_port_private
*port_priv
,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
66 struct ib_mad_private
*mad
);
67 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
68 static void timeout_sends(void *data
);
69 static void local_completions(void *data
);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
71 struct ib_mad_agent_private
*agent_priv
,
73 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
74 struct ib_mad_agent_private
*agent_priv
);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private
*
81 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
83 struct ib_mad_port_private
*entry
;
85 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
86 if (entry
->device
== device
&& entry
->port_num
== port_num
)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private
*
97 ib_get_mad_port(struct ib_device
*device
, int port_num
)
99 struct ib_mad_port_private
*entry
;
102 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
103 entry
= __ib_get_mad_port(device
, port_num
);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
109 static inline u8
convert_mgmt_class(u8 mgmt_class
)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
116 static int get_spl_qp_index(enum ib_qp_type qp_type
)
129 static int vendor_class_index(u8 mgmt_class
)
131 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
134 static int is_vendor_class(u8 mgmt_class
)
136 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
137 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
142 static int is_vendor_oui(char *oui
)
144 if (oui
[0] || oui
[1] || oui
[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class
*vendor_class
,
151 struct ib_mad_reg_req
*mad_reg_req
)
153 struct ib_mad_mgmt_method_table
*method
;
156 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
157 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
158 method
= vendor_class
->method_table
[i
];
160 if (method_in_use(&method
, mad_reg_req
))
171 * ib_register_mad_agent - Register to send/receive MADs
173 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
175 enum ib_qp_type qp_type
,
176 struct ib_mad_reg_req
*mad_reg_req
,
178 ib_mad_send_handler send_handler
,
179 ib_mad_recv_handler recv_handler
,
182 struct ib_mad_port_private
*port_priv
;
183 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
184 struct ib_mad_agent_private
*mad_agent_priv
;
185 struct ib_mad_reg_req
*reg_req
= NULL
;
186 struct ib_mad_mgmt_class_table
*class;
187 struct ib_mad_mgmt_vendor_class_table
*vendor
;
188 struct ib_mad_mgmt_vendor_class
*vendor_class
;
189 struct ib_mad_mgmt_method_table
*method
;
192 u8 mgmt_class
, vclass
;
194 /* Validate parameters */
195 qpn
= get_spl_qp_index(qp_type
);
199 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
)
202 /* Validate MAD registration request if supplied */
204 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
208 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
210 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
211 * one in this range currently allowed
213 if (mad_reg_req
->mgmt_class
!=
214 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
216 } else if (mad_reg_req
->mgmt_class
== 0) {
218 * Class 0 is reserved in IBA and is used for
219 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
222 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
224 * If class is in "new" vendor range,
225 * ensure supplied OUI is not zero
227 if (!is_vendor_oui(mad_reg_req
->oui
))
230 /* Make sure class supplied is consistent with QP type */
231 if (qp_type
== IB_QPT_SMI
) {
232 if ((mad_reg_req
->mgmt_class
!=
233 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
234 (mad_reg_req
->mgmt_class
!=
235 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
238 if ((mad_reg_req
->mgmt_class
==
239 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
240 (mad_reg_req
->mgmt_class
==
241 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
245 /* No registration request supplied */
250 /* Validate device and port */
251 port_priv
= ib_get_mad_port(device
, port_num
);
253 ret
= ERR_PTR(-ENODEV
);
257 /* Allocate structures */
258 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
259 if (!mad_agent_priv
) {
260 ret
= ERR_PTR(-ENOMEM
);
264 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
265 IB_ACCESS_LOCAL_WRITE
);
266 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
267 ret
= ERR_PTR(-ENOMEM
);
272 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
274 ret
= ERR_PTR(-ENOMEM
);
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
281 /* Now, fill in the various structures */
282 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
283 mad_agent_priv
->reg_req
= reg_req
;
284 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
285 mad_agent_priv
->agent
.device
= device
;
286 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
287 mad_agent_priv
->agent
.send_handler
= send_handler
;
288 mad_agent_priv
->agent
.context
= context
;
289 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
290 mad_agent_priv
->agent
.port_num
= port_num
;
292 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
293 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
300 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
301 if (!is_vendor_class(mgmt_class
)) {
302 class = port_priv
->version
[mad_reg_req
->
303 mgmt_class_version
].class;
305 method
= class->method_table
[mgmt_class
];
307 if (method_in_use(&method
,
312 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
315 /* "New" vendor class range */
316 vendor
= port_priv
->version
[mad_reg_req
->
317 mgmt_class_version
].vendor
;
319 vclass
= vendor_class_index(mgmt_class
);
320 vendor_class
= vendor
->vendor_class
[vclass
];
322 if (is_vendor_method_in_use(
328 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
338 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
340 spin_lock_init(&mad_agent_priv
->lock
);
341 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
342 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
343 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
344 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
345 INIT_WORK(&mad_agent_priv
->timed_work
, timeout_sends
, mad_agent_priv
);
346 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
347 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
,
349 atomic_set(&mad_agent_priv
->refcount
, 1);
350 init_waitqueue_head(&mad_agent_priv
->wait
);
352 return &mad_agent_priv
->agent
;
355 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
358 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
360 kfree(mad_agent_priv
);
364 EXPORT_SYMBOL(ib_register_mad_agent
);
366 static inline int is_snooping_sends(int mad_snoop_flags
)
368 return (mad_snoop_flags
&
369 (/*IB_MAD_SNOOP_POSTED_SENDS |
370 IB_MAD_SNOOP_RMPP_SENDS |*/
371 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
372 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
375 static inline int is_snooping_recvs(int mad_snoop_flags
)
377 return (mad_snoop_flags
&
378 (IB_MAD_SNOOP_RECVS
/*|
379 IB_MAD_SNOOP_RMPP_RECVS*/));
382 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
383 struct ib_mad_snoop_private
*mad_snoop_priv
)
385 struct ib_mad_snoop_private
**new_snoop_table
;
389 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
390 /* Check for empty slot in array. */
391 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
392 if (!qp_info
->snoop_table
[i
])
395 if (i
== qp_info
->snoop_table_size
) {
397 new_snoop_table
= kmalloc(sizeof mad_snoop_priv
*
398 qp_info
->snoop_table_size
+ 1,
400 if (!new_snoop_table
) {
404 if (qp_info
->snoop_table
) {
405 memcpy(new_snoop_table
, qp_info
->snoop_table
,
406 sizeof mad_snoop_priv
*
407 qp_info
->snoop_table_size
);
408 kfree(qp_info
->snoop_table
);
410 qp_info
->snoop_table
= new_snoop_table
;
411 qp_info
->snoop_table_size
++;
413 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
414 atomic_inc(&qp_info
->snoop_count
);
416 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
420 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
422 enum ib_qp_type qp_type
,
424 ib_mad_snoop_handler snoop_handler
,
425 ib_mad_recv_handler recv_handler
,
428 struct ib_mad_port_private
*port_priv
;
429 struct ib_mad_agent
*ret
;
430 struct ib_mad_snoop_private
*mad_snoop_priv
;
433 /* Validate parameters */
434 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
435 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
436 ret
= ERR_PTR(-EINVAL
);
439 qpn
= get_spl_qp_index(qp_type
);
441 ret
= ERR_PTR(-EINVAL
);
444 port_priv
= ib_get_mad_port(device
, port_num
);
446 ret
= ERR_PTR(-ENODEV
);
449 /* Allocate structures */
450 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
451 if (!mad_snoop_priv
) {
452 ret
= ERR_PTR(-ENOMEM
);
456 /* Now, fill in the various structures */
457 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
458 mad_snoop_priv
->agent
.device
= device
;
459 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
460 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
461 mad_snoop_priv
->agent
.context
= context
;
462 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
463 mad_snoop_priv
->agent
.port_num
= port_num
;
464 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
465 init_waitqueue_head(&mad_snoop_priv
->wait
);
466 mad_snoop_priv
->snoop_index
= register_snoop_agent(
467 &port_priv
->qp_info
[qpn
],
469 if (mad_snoop_priv
->snoop_index
< 0) {
470 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
474 atomic_set(&mad_snoop_priv
->refcount
, 1);
475 return &mad_snoop_priv
->agent
;
478 kfree(mad_snoop_priv
);
482 EXPORT_SYMBOL(ib_register_mad_snoop
);
484 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
486 struct ib_mad_port_private
*port_priv
;
489 /* Note that we could still be handling received MADs */
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
495 cancel_mads(mad_agent_priv
);
496 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
497 cancel_delayed_work(&mad_agent_priv
->timed_work
);
499 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
500 remove_mad_reg_req(mad_agent_priv
);
501 list_del(&mad_agent_priv
->agent_list
);
502 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
504 flush_workqueue(port_priv
->wq
);
505 ib_cancel_rmpp_recvs(mad_agent_priv
);
507 atomic_dec(&mad_agent_priv
->refcount
);
508 wait_event(mad_agent_priv
->wait
,
509 !atomic_read(&mad_agent_priv
->refcount
));
511 kfree(mad_agent_priv
->reg_req
);
512 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
513 kfree(mad_agent_priv
);
516 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
518 struct ib_mad_qp_info
*qp_info
;
521 qp_info
= mad_snoop_priv
->qp_info
;
522 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
523 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
524 atomic_dec(&qp_info
->snoop_count
);
525 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
527 atomic_dec(&mad_snoop_priv
->refcount
);
528 wait_event(mad_snoop_priv
->wait
,
529 !atomic_read(&mad_snoop_priv
->refcount
));
531 kfree(mad_snoop_priv
);
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
537 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
539 struct ib_mad_agent_private
*mad_agent_priv
;
540 struct ib_mad_snoop_private
*mad_snoop_priv
;
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent
->hi_tid
) {
544 mad_agent_priv
= container_of(mad_agent
,
545 struct ib_mad_agent_private
,
547 unregister_mad_agent(mad_agent_priv
);
549 mad_snoop_priv
= container_of(mad_agent
,
550 struct ib_mad_snoop_private
,
552 unregister_mad_snoop(mad_snoop_priv
);
556 EXPORT_SYMBOL(ib_unregister_mad_agent
);
558 static inline int response_mad(struct ib_mad
*mad
)
560 /* Trap represses are responses although response bit is reset */
561 return ((mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
562 (mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
));
565 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
567 struct ib_mad_queue
*mad_queue
;
570 BUG_ON(!mad_list
->mad_queue
);
571 mad_queue
= mad_list
->mad_queue
;
572 spin_lock_irqsave(&mad_queue
->lock
, flags
);
573 list_del(&mad_list
->list
);
575 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
578 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
579 struct ib_mad_send_buf
*send_buf
,
580 struct ib_mad_send_wc
*mad_send_wc
,
583 struct ib_mad_snoop_private
*mad_snoop_priv
;
587 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
588 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
589 mad_snoop_priv
= qp_info
->snoop_table
[i
];
590 if (!mad_snoop_priv
||
591 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
594 atomic_inc(&mad_snoop_priv
->refcount
);
595 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
596 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
597 send_buf
, mad_send_wc
);
598 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
599 wake_up(&mad_snoop_priv
->wait
);
600 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
602 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
605 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
606 struct ib_mad_recv_wc
*mad_recv_wc
,
609 struct ib_mad_snoop_private
*mad_snoop_priv
;
613 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
614 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
615 mad_snoop_priv
= qp_info
->snoop_table
[i
];
616 if (!mad_snoop_priv
||
617 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
620 atomic_inc(&mad_snoop_priv
->refcount
);
621 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
622 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
624 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
625 wake_up(&mad_snoop_priv
->wait
);
626 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
628 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
631 static void build_smp_wc(u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
634 memset(wc
, 0, sizeof *wc
);
636 wc
->status
= IB_WC_SUCCESS
;
637 wc
->opcode
= IB_WC_RECV
;
638 wc
->pkey_index
= pkey_index
;
639 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
644 wc
->dlid_path_bits
= 0;
645 wc
->port_num
= port_num
;
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
653 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
654 struct ib_mad_send_wr_private
*mad_send_wr
)
657 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
659 struct ib_mad_local_private
*local
;
660 struct ib_mad_private
*mad_priv
;
661 struct ib_mad_port_private
*port_priv
;
662 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
663 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
664 u8 port_num
= mad_agent_priv
->agent
.port_num
;
666 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
669 * Directed route handling starts if the initial LID routed part of
670 * a request or the ending LID routed part of a response is empty.
671 * If we are at the start of the LID routed part, don't update the
672 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
674 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
676 !smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
)) {
678 printk(KERN_ERR PFX
"Invalid directed route\n");
681 /* Check to post send on QP or process locally */
682 ret
= smi_check_local_dr_smp(smp
, device
, port_num
);
683 if (!ret
|| !device
->process_mad
)
686 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
689 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
692 local
->mad_priv
= NULL
;
693 local
->recv_mad_agent
= NULL
;
694 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
697 printk(KERN_ERR PFX
"No memory for local response MAD\n");
702 build_smp_wc(send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
703 send_wr
->wr
.ud
.pkey_index
,
704 send_wr
->wr
.ud
.port_num
, &mad_wc
);
706 /* No GRH for DR SMP */
707 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
708 (struct ib_mad
*)smp
,
709 (struct ib_mad
*)&mad_priv
->mad
);
712 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
713 if (response_mad(&mad_priv
->mad
.mad
) &&
714 mad_agent_priv
->agent
.recv_handler
) {
715 local
->mad_priv
= mad_priv
;
716 local
->recv_mad_agent
= mad_agent_priv
;
718 * Reference MAD agent until receive
719 * side of local completion handled
721 atomic_inc(&mad_agent_priv
->refcount
);
723 kmem_cache_free(ib_mad_cache
, mad_priv
);
725 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
726 kmem_cache_free(ib_mad_cache
, mad_priv
);
728 case IB_MAD_RESULT_SUCCESS
:
729 /* Treat like an incoming receive MAD */
730 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
731 mad_agent_priv
->agent
.port_num
);
733 mad_priv
->mad
.mad
.mad_hdr
.tid
=
734 ((struct ib_mad
*)smp
)->mad_hdr
.tid
;
735 recv_mad_agent
= find_mad_agent(port_priv
,
738 if (!port_priv
|| !recv_mad_agent
) {
739 kmem_cache_free(ib_mad_cache
, mad_priv
);
744 local
->mad_priv
= mad_priv
;
745 local
->recv_mad_agent
= recv_mad_agent
;
748 kmem_cache_free(ib_mad_cache
, mad_priv
);
754 local
->mad_send_wr
= mad_send_wr
;
755 /* Reference MAD agent until send side of local completion handled */
756 atomic_inc(&mad_agent_priv
->refcount
);
757 /* Queue local completion to local list */
758 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
759 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
760 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
761 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
762 &mad_agent_priv
->local_work
);
768 static int get_buf_length(int hdr_len
, int data_len
)
772 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
773 if (data_len
&& seg_size
) {
774 pad
= seg_size
- data_len
% seg_size
;
779 return hdr_len
+ data_len
+ pad
;
782 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
783 u32 remote_qpn
, u16 pkey_index
,
785 int hdr_len
, int data_len
,
788 struct ib_mad_agent_private
*mad_agent_priv
;
789 struct ib_mad_send_wr_private
*mad_send_wr
;
793 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
795 buf_size
= get_buf_length(hdr_len
, data_len
);
797 if ((!mad_agent
->rmpp_version
&&
798 (rmpp_active
|| buf_size
> sizeof(struct ib_mad
))) ||
799 (!rmpp_active
&& buf_size
> sizeof(struct ib_mad
)))
800 return ERR_PTR(-EINVAL
);
802 buf
= kzalloc(sizeof *mad_send_wr
+ buf_size
, gfp_mask
);
804 return ERR_PTR(-ENOMEM
);
806 mad_send_wr
= buf
+ buf_size
;
807 mad_send_wr
->send_buf
.mad
= buf
;
809 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
810 mad_send_wr
->sg_list
[0].length
= buf_size
;
811 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
813 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
814 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
815 mad_send_wr
->send_wr
.num_sge
= 1;
816 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
817 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
818 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
819 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
820 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
823 struct ib_rmpp_mad
*rmpp_mad
= mad_send_wr
->send_buf
.mad
;
824 rmpp_mad
->rmpp_hdr
.paylen_newwin
= cpu_to_be32(hdr_len
-
825 IB_MGMT_RMPP_HDR
+ data_len
);
826 rmpp_mad
->rmpp_hdr
.rmpp_version
= mad_agent
->rmpp_version
;
827 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
828 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
,
829 IB_MGMT_RMPP_FLAG_ACTIVE
);
832 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
833 atomic_inc(&mad_agent_priv
->refcount
);
834 return &mad_send_wr
->send_buf
;
836 EXPORT_SYMBOL(ib_create_send_mad
);
838 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
840 struct ib_mad_agent_private
*mad_agent_priv
;
842 mad_agent_priv
= container_of(send_buf
->mad_agent
,
843 struct ib_mad_agent_private
, agent
);
844 kfree(send_buf
->mad
);
846 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
847 wake_up(&mad_agent_priv
->wait
);
849 EXPORT_SYMBOL(ib_free_send_mad
);
851 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
853 struct ib_mad_qp_info
*qp_info
;
854 struct list_head
*list
;
855 struct ib_send_wr
*bad_send_wr
;
856 struct ib_mad_agent
*mad_agent
;
861 /* Set WR ID to find mad_send_wr upon completion */
862 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
863 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
864 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
866 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
867 sge
= mad_send_wr
->sg_list
;
868 sge
->addr
= dma_map_single(mad_agent
->device
->dma_device
,
869 mad_send_wr
->send_buf
.mad
, sge
->length
,
871 pci_unmap_addr_set(mad_send_wr
, mapping
, sge
->addr
);
873 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
874 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
875 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
877 list
= &qp_info
->send_queue
.list
;
880 list
= &qp_info
->overflow_list
;
884 qp_info
->send_queue
.count
++;
885 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
887 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
889 dma_unmap_single(mad_agent
->device
->dma_device
,
890 pci_unmap_addr(mad_send_wr
, mapping
),
891 sge
->length
, DMA_TO_DEVICE
);
897 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
898 * with the registered client
900 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
901 struct ib_mad_send_buf
**bad_send_buf
)
903 struct ib_mad_agent_private
*mad_agent_priv
;
904 struct ib_mad_send_buf
*next_send_buf
;
905 struct ib_mad_send_wr_private
*mad_send_wr
;
909 /* Walk list of send WRs and post each on send list */
910 for (; send_buf
; send_buf
= next_send_buf
) {
912 mad_send_wr
= container_of(send_buf
,
913 struct ib_mad_send_wr_private
,
915 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
917 if (!send_buf
->mad_agent
->send_handler
||
918 (send_buf
->timeout_ms
&&
919 !send_buf
->mad_agent
->recv_handler
)) {
925 * Save pointer to next work request to post in case the
926 * current one completes, and the user modifies the work
927 * request associated with the completion
929 next_send_buf
= send_buf
->next
;
930 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
932 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
933 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
934 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
936 if (ret
< 0) /* error */
938 else if (ret
== 1) /* locally consumed */
942 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
943 /* Timeout will be updated after send completes */
944 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
945 mad_send_wr
->retries
= send_buf
->retries
;
946 /* Reference for work request to QP + response */
947 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
948 mad_send_wr
->status
= IB_WC_SUCCESS
;
950 /* Reference MAD agent until send completes */
951 atomic_inc(&mad_agent_priv
->refcount
);
952 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
953 list_add_tail(&mad_send_wr
->agent_list
,
954 &mad_agent_priv
->send_list
);
955 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
957 if (mad_agent_priv
->agent
.rmpp_version
) {
958 ret
= ib_send_rmpp_mad(mad_send_wr
);
959 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
960 ret
= ib_send_mad(mad_send_wr
);
962 ret
= ib_send_mad(mad_send_wr
);
964 /* Fail send request */
965 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
966 list_del(&mad_send_wr
->agent_list
);
967 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
968 atomic_dec(&mad_agent_priv
->refcount
);
975 *bad_send_buf
= send_buf
;
978 EXPORT_SYMBOL(ib_post_send_mad
);
981 * ib_free_recv_mad - Returns data buffers used to receive
982 * a MAD to the access layer
984 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
986 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
987 struct ib_mad_private_header
*mad_priv_hdr
;
988 struct ib_mad_private
*priv
;
989 struct list_head free_list
;
991 INIT_LIST_HEAD(&free_list
);
992 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
994 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
996 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
998 mad_priv_hdr
= container_of(mad_recv_wc
,
999 struct ib_mad_private_header
,
1001 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1003 kmem_cache_free(ib_mad_cache
, priv
);
1006 EXPORT_SYMBOL(ib_free_recv_mad
);
1008 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1010 ib_mad_send_handler send_handler
,
1011 ib_mad_recv_handler recv_handler
,
1014 return ERR_PTR(-EINVAL
); /* XXX: for now */
1016 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1018 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1021 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
1024 EXPORT_SYMBOL(ib_process_mad_wc
);
1026 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1027 struct ib_mad_reg_req
*mad_reg_req
)
1031 for (i
= find_first_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
);
1032 i
< IB_MGMT_MAX_METHODS
;
1033 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1035 if ((*method
)->agent
[i
]) {
1036 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
1043 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1045 /* Allocate management method table */
1046 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1048 printk(KERN_ERR PFX
"No memory for "
1049 "ib_mad_mgmt_method_table\n");
1057 * Check to see if there are any methods still in use
1059 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1063 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1064 if (method
->agent
[i
])
1070 * Check to see if there are any method tables for this class still in use
1072 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1076 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1077 if (class->method_table
[i
])
1082 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1086 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1087 if (vendor_class
->method_table
[i
])
1092 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1097 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1098 /* Is there matching OUI for this vendor class ? */
1099 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1105 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1109 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1110 if (vendor
->vendor_class
[i
])
1116 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1117 struct ib_mad_agent_private
*agent
)
1121 /* Remove any methods for this mad agent */
1122 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1123 if (method
->agent
[i
] == agent
) {
1124 method
->agent
[i
] = NULL
;
1129 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1130 struct ib_mad_agent_private
*agent_priv
,
1133 struct ib_mad_port_private
*port_priv
;
1134 struct ib_mad_mgmt_class_table
**class;
1135 struct ib_mad_mgmt_method_table
**method
;
1138 port_priv
= agent_priv
->qp_info
->port_priv
;
1139 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1141 /* Allocate management class table for "new" class version */
1142 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1144 printk(KERN_ERR PFX
"No memory for "
1145 "ib_mad_mgmt_class_table\n");
1150 /* Allocate method table for this management class */
1151 method
= &(*class)->method_table
[mgmt_class
];
1152 if ((ret
= allocate_method_table(method
)))
1155 method
= &(*class)->method_table
[mgmt_class
];
1157 /* Allocate method table for this management class */
1158 if ((ret
= allocate_method_table(method
)))
1163 /* Now, make sure methods are not already in use */
1164 if (method_in_use(method
, mad_reg_req
))
1167 /* Finally, add in methods being registered */
1168 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1169 IB_MGMT_MAX_METHODS
);
1170 i
< IB_MGMT_MAX_METHODS
;
1171 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1173 (*method
)->agent
[i
] = agent_priv
;
1178 /* Remove any methods for this mad agent */
1179 remove_methods_mad_agent(*method
, agent_priv
);
1180 /* Now, check to see if there are any methods in use */
1181 if (!check_method_table(*method
)) {
1182 /* If not, release management method table */
1195 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1196 struct ib_mad_agent_private
*agent_priv
)
1198 struct ib_mad_port_private
*port_priv
;
1199 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1200 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1201 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1202 struct ib_mad_mgmt_method_table
**method
;
1203 int i
, ret
= -ENOMEM
;
1206 /* "New" vendor (with OUI) class */
1207 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1208 port_priv
= agent_priv
->qp_info
->port_priv
;
1209 vendor_table
= &port_priv
->version
[
1210 mad_reg_req
->mgmt_class_version
].vendor
;
1211 if (!*vendor_table
) {
1212 /* Allocate mgmt vendor class table for "new" class version */
1213 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1215 printk(KERN_ERR PFX
"No memory for "
1216 "ib_mad_mgmt_vendor_class_table\n");
1220 *vendor_table
= vendor
;
1222 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1223 /* Allocate table for this management vendor class */
1224 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1225 if (!vendor_class
) {
1226 printk(KERN_ERR PFX
"No memory for "
1227 "ib_mad_mgmt_vendor_class\n");
1231 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1233 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1234 /* Is there matching OUI for this vendor class ? */
1235 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1236 mad_reg_req
->oui
, 3)) {
1237 method
= &(*vendor_table
)->vendor_class
[
1238 vclass
]->method_table
[i
];
1243 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1244 /* OUI slot available ? */
1245 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1247 method
= &(*vendor_table
)->vendor_class
[
1248 vclass
]->method_table
[i
];
1250 /* Allocate method table for this OUI */
1251 if ((ret
= allocate_method_table(method
)))
1253 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1254 mad_reg_req
->oui
, 3);
1258 printk(KERN_ERR PFX
"All OUI slots in use\n");
1262 /* Now, make sure methods are not already in use */
1263 if (method_in_use(method
, mad_reg_req
))
1266 /* Finally, add in methods being registered */
1267 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1268 IB_MGMT_MAX_METHODS
);
1269 i
< IB_MGMT_MAX_METHODS
;
1270 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1272 (*method
)->agent
[i
] = agent_priv
;
1277 /* Remove any methods for this mad agent */
1278 remove_methods_mad_agent(*method
, agent_priv
);
1279 /* Now, check to see if there are any methods in use */
1280 if (!check_method_table(*method
)) {
1281 /* If not, release management method table */
1288 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1289 kfree(vendor_class
);
1293 *vendor_table
= NULL
;
1300 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1302 struct ib_mad_port_private
*port_priv
;
1303 struct ib_mad_mgmt_class_table
*class;
1304 struct ib_mad_mgmt_method_table
*method
;
1305 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1306 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1311 * Was MAD registration request supplied
1312 * with original registration ?
1314 if (!agent_priv
->reg_req
) {
1318 port_priv
= agent_priv
->qp_info
->port_priv
;
1319 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1320 class = port_priv
->version
[
1321 agent_priv
->reg_req
->mgmt_class_version
].class;
1325 method
= class->method_table
[mgmt_class
];
1327 /* Remove any methods for this mad agent */
1328 remove_methods_mad_agent(method
, agent_priv
);
1329 /* Now, check to see if there are any methods still in use */
1330 if (!check_method_table(method
)) {
1331 /* If not, release management method table */
1333 class->method_table
[mgmt_class
] = NULL
;
1334 /* Any management classes left ? */
1335 if (!check_class_table(class)) {
1336 /* If not, release management class table */
1339 agent_priv
->reg_req
->
1340 mgmt_class_version
].class = NULL
;
1346 if (!is_vendor_class(mgmt_class
))
1349 /* normalize mgmt_class to vendor range 2 */
1350 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1351 vendor
= port_priv
->version
[
1352 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1357 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1359 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1362 method
= vendor_class
->method_table
[index
];
1364 /* Remove any methods for this mad agent */
1365 remove_methods_mad_agent(method
, agent_priv
);
1367 * Now, check to see if there are
1368 * any methods still in use
1370 if (!check_method_table(method
)) {
1371 /* If not, release management method table */
1373 vendor_class
->method_table
[index
] = NULL
;
1374 memset(vendor_class
->oui
[index
], 0, 3);
1375 /* Any OUIs left ? */
1376 if (!check_vendor_class(vendor_class
)) {
1377 /* If not, release vendor class table */
1378 kfree(vendor_class
);
1379 vendor
->vendor_class
[mgmt_class
] = NULL
;
1380 /* Any other vendor classes left ? */
1381 if (!check_vendor_table(vendor
)) {
1384 agent_priv
->reg_req
->
1385 mgmt_class_version
].
1397 static struct ib_mad_agent_private
*
1398 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1401 struct ib_mad_agent_private
*mad_agent
= NULL
;
1402 unsigned long flags
;
1404 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1405 if (response_mad(mad
)) {
1407 struct ib_mad_agent_private
*entry
;
1410 * Routing is based on high 32 bits of transaction ID
1413 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1414 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1415 if (entry
->agent
.hi_tid
== hi_tid
) {
1421 struct ib_mad_mgmt_class_table
*class;
1422 struct ib_mad_mgmt_method_table
*method
;
1423 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1424 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1425 struct ib_vendor_mad
*vendor_mad
;
1429 * Routing is based on version, class, and method
1430 * For "newer" vendor MADs, also based on OUI
1432 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1434 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1435 class = port_priv
->version
[
1436 mad
->mad_hdr
.class_version
].class;
1439 method
= class->method_table
[convert_mgmt_class(
1440 mad
->mad_hdr
.mgmt_class
)];
1442 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1443 ~IB_MGMT_METHOD_RESP
];
1445 vendor
= port_priv
->version
[
1446 mad
->mad_hdr
.class_version
].vendor
;
1449 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1450 mad
->mad_hdr
.mgmt_class
)];
1453 /* Find matching OUI */
1454 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1455 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1458 method
= vendor_class
->method_table
[index
];
1460 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1461 ~IB_MGMT_METHOD_RESP
];
1467 if (mad_agent
->agent
.recv_handler
)
1468 atomic_inc(&mad_agent
->refcount
);
1470 printk(KERN_NOTICE PFX
"No receive handler for client "
1472 &mad_agent
->agent
, port_priv
->port_num
);
1477 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1482 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1486 /* Make sure MAD base version is understood */
1487 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1488 printk(KERN_ERR PFX
"MAD received with unsupported base "
1489 "version %d\n", mad
->mad_hdr
.base_version
);
1493 /* Filter SMI packets sent to other than QP0 */
1494 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1495 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1499 /* Filter GSI packets sent to QP0 */
1508 static int is_data_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1509 struct ib_mad_hdr
*mad_hdr
)
1511 struct ib_rmpp_mad
*rmpp_mad
;
1513 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1514 return !mad_agent_priv
->agent
.rmpp_version
||
1515 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1516 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1517 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1520 struct ib_mad_send_wr_private
*
1521 ib_find_send_mad(struct ib_mad_agent_private
*mad_agent_priv
, __be64 tid
)
1523 struct ib_mad_send_wr_private
*mad_send_wr
;
1525 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
1527 if (mad_send_wr
->tid
== tid
)
1532 * It's possible to receive the response before we've
1533 * been notified that the send has completed
1535 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
1537 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
1538 mad_send_wr
->tid
== tid
&& mad_send_wr
->timeout
) {
1539 /* Verify request has not been canceled */
1540 return (mad_send_wr
->status
== IB_WC_SUCCESS
) ?
1547 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1549 mad_send_wr
->timeout
= 0;
1550 if (mad_send_wr
->refcount
== 1) {
1551 list_del(&mad_send_wr
->agent_list
);
1552 list_add_tail(&mad_send_wr
->agent_list
,
1553 &mad_send_wr
->mad_agent_priv
->done_list
);
1557 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1558 struct ib_mad_recv_wc
*mad_recv_wc
)
1560 struct ib_mad_send_wr_private
*mad_send_wr
;
1561 struct ib_mad_send_wc mad_send_wc
;
1562 unsigned long flags
;
1565 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1566 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1567 if (mad_agent_priv
->agent
.rmpp_version
) {
1568 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1571 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1572 wake_up(&mad_agent_priv
->wait
);
1577 /* Complete corresponding request */
1578 if (response_mad(mad_recv_wc
->recv_buf
.mad
)) {
1579 tid
= mad_recv_wc
->recv_buf
.mad
->mad_hdr
.tid
;
1580 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1581 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, tid
);
1583 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1584 ib_free_recv_mad(mad_recv_wc
);
1585 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1586 wake_up(&mad_agent_priv
->wait
);
1589 ib_mark_mad_done(mad_send_wr
);
1590 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1592 /* Defined behavior is to complete response before request */
1593 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1594 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1596 atomic_dec(&mad_agent_priv
->refcount
);
1598 mad_send_wc
.status
= IB_WC_SUCCESS
;
1599 mad_send_wc
.vendor_err
= 0;
1600 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1601 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1603 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1605 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1606 wake_up(&mad_agent_priv
->wait
);
1610 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1613 struct ib_mad_qp_info
*qp_info
;
1614 struct ib_mad_private_header
*mad_priv_hdr
;
1615 struct ib_mad_private
*recv
, *response
;
1616 struct ib_mad_list_head
*mad_list
;
1617 struct ib_mad_agent_private
*mad_agent
;
1619 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1621 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1622 "for response buffer\n");
1624 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1625 qp_info
= mad_list
->mad_queue
->qp_info
;
1626 dequeue_mad(mad_list
);
1628 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1630 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1631 dma_unmap_single(port_priv
->device
->dma_device
,
1632 pci_unmap_addr(&recv
->header
, mapping
),
1633 sizeof(struct ib_mad_private
) -
1634 sizeof(struct ib_mad_private_header
),
1637 /* Setup MAD receive work completion from "normal" work completion */
1638 recv
->header
.wc
= *wc
;
1639 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
1640 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1641 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1642 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1644 if (atomic_read(&qp_info
->snoop_count
))
1645 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1648 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1651 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1652 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1653 if (!smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1654 port_priv
->device
->node_type
,
1655 port_priv
->port_num
,
1656 port_priv
->device
->phys_port_cnt
))
1658 if (!smi_check_forward_dr_smp(&recv
->mad
.smp
))
1660 if (!smi_handle_dr_smp_send(&recv
->mad
.smp
,
1661 port_priv
->device
->node_type
,
1662 port_priv
->port_num
))
1664 if (!smi_check_local_dr_smp(&recv
->mad
.smp
,
1666 port_priv
->port_num
))
1671 /* Give driver "right of first refusal" on incoming MAD */
1672 if (port_priv
->device
->process_mad
) {
1676 printk(KERN_ERR PFX
"No memory for response MAD\n");
1678 * Is it better to assume that
1679 * it wouldn't be processed ?
1684 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1685 port_priv
->port_num
,
1688 &response
->mad
.mad
);
1689 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1690 if (ret
& IB_MAD_RESULT_CONSUMED
)
1692 if (ret
& IB_MAD_RESULT_REPLY
) {
1693 agent_send_response(&response
->mad
.mad
,
1696 port_priv
->port_num
,
1697 qp_info
->qp
->qp_num
);
1703 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
);
1705 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
1707 * recv is freed up in error cases in ib_mad_complete_recv
1708 * or via recv_handler in ib_mad_complete_recv()
1714 /* Post another receive request for this QP */
1716 ib_mad_post_receive_mads(qp_info
, response
);
1718 kmem_cache_free(ib_mad_cache
, recv
);
1720 ib_mad_post_receive_mads(qp_info
, recv
);
1723 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1725 struct ib_mad_send_wr_private
*mad_send_wr
;
1726 unsigned long delay
;
1728 if (list_empty(&mad_agent_priv
->wait_list
)) {
1729 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1731 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1732 struct ib_mad_send_wr_private
,
1735 if (time_after(mad_agent_priv
->timeout
,
1736 mad_send_wr
->timeout
)) {
1737 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1738 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1739 delay
= mad_send_wr
->timeout
- jiffies
;
1740 if ((long)delay
<= 0)
1742 queue_delayed_work(mad_agent_priv
->qp_info
->
1744 &mad_agent_priv
->timed_work
, delay
);
1749 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
1751 struct ib_mad_agent_private
*mad_agent_priv
;
1752 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1753 struct list_head
*list_item
;
1754 unsigned long delay
;
1756 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1757 list_del(&mad_send_wr
->agent_list
);
1759 delay
= mad_send_wr
->timeout
;
1760 mad_send_wr
->timeout
+= jiffies
;
1763 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
1764 temp_mad_send_wr
= list_entry(list_item
,
1765 struct ib_mad_send_wr_private
,
1767 if (time_after(mad_send_wr
->timeout
,
1768 temp_mad_send_wr
->timeout
))
1773 list_item
= &mad_agent_priv
->wait_list
;
1774 list_add(&mad_send_wr
->agent_list
, list_item
);
1776 /* Reschedule a work item if we have a shorter timeout */
1777 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
1778 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1779 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
1780 &mad_agent_priv
->timed_work
, delay
);
1784 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
1787 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
1788 wait_for_response(mad_send_wr
);
1792 * Process a send work completion
1794 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
1795 struct ib_mad_send_wc
*mad_send_wc
)
1797 struct ib_mad_agent_private
*mad_agent_priv
;
1798 unsigned long flags
;
1801 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1802 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1803 if (mad_agent_priv
->agent
.rmpp_version
) {
1804 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
1805 if (ret
== IB_RMPP_RESULT_CONSUMED
)
1808 ret
= IB_RMPP_RESULT_UNHANDLED
;
1810 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
1811 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1812 mad_send_wr
->status
= mad_send_wc
->status
;
1813 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
1816 if (--mad_send_wr
->refcount
> 0) {
1817 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
1818 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1819 wait_for_response(mad_send_wr
);
1824 /* Remove send from MAD agent and notify client of completion */
1825 list_del(&mad_send_wr
->agent_list
);
1826 adjust_timeout(mad_agent_priv
);
1827 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1829 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
1830 mad_send_wc
->status
= mad_send_wr
->status
;
1831 if (ret
== IB_RMPP_RESULT_INTERNAL
)
1832 ib_rmpp_send_handler(mad_send_wc
);
1834 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
1837 /* Release reference on agent taken when sending */
1838 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1839 wake_up(&mad_agent_priv
->wait
);
1842 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1845 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
1848 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
1849 struct ib_mad_list_head
*mad_list
;
1850 struct ib_mad_qp_info
*qp_info
;
1851 struct ib_mad_queue
*send_queue
;
1852 struct ib_send_wr
*bad_send_wr
;
1853 struct ib_mad_send_wc mad_send_wc
;
1854 unsigned long flags
;
1857 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1858 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1860 send_queue
= mad_list
->mad_queue
;
1861 qp_info
= send_queue
->qp_info
;
1864 dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
->dma_device
,
1865 pci_unmap_addr(mad_send_wr
, mapping
),
1866 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
1867 queued_send_wr
= NULL
;
1868 spin_lock_irqsave(&send_queue
->lock
, flags
);
1869 list_del(&mad_list
->list
);
1871 /* Move queued send to the send queue */
1872 if (send_queue
->count
-- > send_queue
->max_active
) {
1873 mad_list
= container_of(qp_info
->overflow_list
.next
,
1874 struct ib_mad_list_head
, list
);
1875 queued_send_wr
= container_of(mad_list
,
1876 struct ib_mad_send_wr_private
,
1878 list_del(&mad_list
->list
);
1879 list_add_tail(&mad_list
->list
, &send_queue
->list
);
1881 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
1883 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1884 mad_send_wc
.status
= wc
->status
;
1885 mad_send_wc
.vendor_err
= wc
->vendor_err
;
1886 if (atomic_read(&qp_info
->snoop_count
))
1887 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
1888 IB_MAD_SNOOP_SEND_COMPLETIONS
);
1889 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1891 if (queued_send_wr
) {
1892 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
1895 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
1896 mad_send_wr
= queued_send_wr
;
1897 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
1903 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
1905 struct ib_mad_send_wr_private
*mad_send_wr
;
1906 struct ib_mad_list_head
*mad_list
;
1907 unsigned long flags
;
1909 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1910 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
1911 mad_send_wr
= container_of(mad_list
,
1912 struct ib_mad_send_wr_private
,
1914 mad_send_wr
->retry
= 1;
1916 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1919 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
1922 struct ib_mad_list_head
*mad_list
;
1923 struct ib_mad_qp_info
*qp_info
;
1924 struct ib_mad_send_wr_private
*mad_send_wr
;
1927 /* Determine if failure was a send or receive */
1928 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1929 qp_info
= mad_list
->mad_queue
->qp_info
;
1930 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
1932 * Receive errors indicate that the QP has entered the error
1933 * state - error handling/shutdown code will cleanup
1938 * Send errors will transition the QP to SQE - move
1939 * QP to RTS and repost flushed work requests
1941 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1943 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
1944 if (mad_send_wr
->retry
) {
1946 struct ib_send_wr
*bad_send_wr
;
1948 mad_send_wr
->retry
= 0;
1949 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
1952 ib_mad_send_done_handler(port_priv
, wc
);
1954 ib_mad_send_done_handler(port_priv
, wc
);
1956 struct ib_qp_attr
*attr
;
1958 /* Transition QP to RTS and fail offending send */
1959 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1961 attr
->qp_state
= IB_QPS_RTS
;
1962 attr
->cur_qp_state
= IB_QPS_SQE
;
1963 ret
= ib_modify_qp(qp_info
->qp
, attr
,
1964 IB_QP_STATE
| IB_QP_CUR_STATE
);
1967 printk(KERN_ERR PFX
"mad_error_handler - "
1968 "ib_modify_qp to RTS : %d\n", ret
);
1970 mark_sends_for_retry(qp_info
);
1972 ib_mad_send_done_handler(port_priv
, wc
);
1977 * IB MAD completion callback
1979 static void ib_mad_completion_handler(void *data
)
1981 struct ib_mad_port_private
*port_priv
;
1984 port_priv
= (struct ib_mad_port_private
*)data
;
1985 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
1987 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
1988 if (wc
.status
== IB_WC_SUCCESS
) {
1989 switch (wc
.opcode
) {
1991 ib_mad_send_done_handler(port_priv
, &wc
);
1994 ib_mad_recv_done_handler(port_priv
, &wc
);
2001 mad_error_handler(port_priv
, &wc
);
2005 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2007 unsigned long flags
;
2008 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2009 struct ib_mad_send_wc mad_send_wc
;
2010 struct list_head cancel_list
;
2012 INIT_LIST_HEAD(&cancel_list
);
2014 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2015 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2016 &mad_agent_priv
->send_list
, agent_list
) {
2017 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2018 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2019 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2023 /* Empty wait list to prevent receives from finding a request */
2024 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2025 /* Empty local completion list as well */
2026 list_splice_init(&mad_agent_priv
->local_list
, &cancel_list
);
2027 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2029 /* Report all cancelled requests */
2030 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2031 mad_send_wc
.vendor_err
= 0;
2033 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2034 &cancel_list
, agent_list
) {
2035 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2036 list_del(&mad_send_wr
->agent_list
);
2037 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2039 atomic_dec(&mad_agent_priv
->refcount
);
2043 static struct ib_mad_send_wr_private
*
2044 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2045 struct ib_mad_send_buf
*send_buf
)
2047 struct ib_mad_send_wr_private
*mad_send_wr
;
2049 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2051 if (&mad_send_wr
->send_buf
== send_buf
)
2055 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2057 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
2058 &mad_send_wr
->send_buf
== send_buf
)
2064 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2065 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2067 struct ib_mad_agent_private
*mad_agent_priv
;
2068 struct ib_mad_send_wr_private
*mad_send_wr
;
2069 unsigned long flags
;
2072 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2074 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2075 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2076 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2077 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2081 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2083 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2084 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2087 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2089 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2091 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2093 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2096 EXPORT_SYMBOL(ib_modify_mad
);
2098 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2099 struct ib_mad_send_buf
*send_buf
)
2101 ib_modify_mad(mad_agent
, send_buf
, 0);
2103 EXPORT_SYMBOL(ib_cancel_mad
);
2105 static void local_completions(void *data
)
2107 struct ib_mad_agent_private
*mad_agent_priv
;
2108 struct ib_mad_local_private
*local
;
2109 struct ib_mad_agent_private
*recv_mad_agent
;
2110 unsigned long flags
;
2113 struct ib_mad_send_wc mad_send_wc
;
2115 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2117 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2118 while (!list_empty(&mad_agent_priv
->local_list
)) {
2119 local
= list_entry(mad_agent_priv
->local_list
.next
,
2120 struct ib_mad_local_private
,
2122 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2123 if (local
->mad_priv
) {
2124 recv_mad_agent
= local
->recv_mad_agent
;
2125 if (!recv_mad_agent
) {
2126 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2127 goto local_send_completion
;
2132 * Defined behavior is to complete response
2135 build_smp_wc((unsigned long) local
->mad_send_wr
,
2136 be16_to_cpu(IB_LID_PERMISSIVE
),
2137 0, recv_mad_agent
->agent
.port_num
, &wc
);
2139 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2140 local
->mad_priv
->header
.recv_wc
.mad_len
=
2141 sizeof(struct ib_mad
);
2142 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2143 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2144 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2145 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2146 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2147 &local
->mad_priv
->mad
.mad
;
2148 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2149 snoop_recv(recv_mad_agent
->qp_info
,
2150 &local
->mad_priv
->header
.recv_wc
,
2151 IB_MAD_SNOOP_RECVS
);
2152 recv_mad_agent
->agent
.recv_handler(
2153 &recv_mad_agent
->agent
,
2154 &local
->mad_priv
->header
.recv_wc
);
2155 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2156 atomic_dec(&recv_mad_agent
->refcount
);
2157 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2160 local_send_completion
:
2162 mad_send_wc
.status
= IB_WC_SUCCESS
;
2163 mad_send_wc
.vendor_err
= 0;
2164 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2165 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2166 snoop_send(mad_agent_priv
->qp_info
,
2167 &local
->mad_send_wr
->send_buf
,
2168 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2169 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2172 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2173 list_del(&local
->completion_list
);
2174 atomic_dec(&mad_agent_priv
->refcount
);
2176 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2179 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2182 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2186 if (!mad_send_wr
->retries
--)
2189 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2191 if (mad_send_wr
->mad_agent_priv
->agent
.rmpp_version
) {
2192 ret
= ib_retry_rmpp(mad_send_wr
);
2194 case IB_RMPP_RESULT_UNHANDLED
:
2195 ret
= ib_send_mad(mad_send_wr
);
2197 case IB_RMPP_RESULT_CONSUMED
:
2205 ret
= ib_send_mad(mad_send_wr
);
2208 mad_send_wr
->refcount
++;
2209 list_add_tail(&mad_send_wr
->agent_list
,
2210 &mad_send_wr
->mad_agent_priv
->send_list
);
2215 static void timeout_sends(void *data
)
2217 struct ib_mad_agent_private
*mad_agent_priv
;
2218 struct ib_mad_send_wr_private
*mad_send_wr
;
2219 struct ib_mad_send_wc mad_send_wc
;
2220 unsigned long flags
, delay
;
2222 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2223 mad_send_wc
.vendor_err
= 0;
2225 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2226 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2227 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2228 struct ib_mad_send_wr_private
,
2231 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2232 delay
= mad_send_wr
->timeout
- jiffies
;
2233 if ((long)delay
<= 0)
2235 queue_delayed_work(mad_agent_priv
->qp_info
->
2237 &mad_agent_priv
->timed_work
, delay
);
2241 list_del(&mad_send_wr
->agent_list
);
2242 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2243 !retry_send(mad_send_wr
))
2246 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2248 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2249 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2251 mad_send_wc
.status
= mad_send_wr
->status
;
2252 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2253 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2256 atomic_dec(&mad_agent_priv
->refcount
);
2257 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2259 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2262 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2264 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2266 queue_work(port_priv
->wq
, &port_priv
->work
);
2270 * Allocate receive MADs and post receive WRs for them
2272 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2273 struct ib_mad_private
*mad
)
2275 unsigned long flags
;
2277 struct ib_mad_private
*mad_priv
;
2278 struct ib_sge sg_list
;
2279 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2280 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2282 /* Initialize common scatter list fields */
2283 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2284 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2286 /* Initialize common receive WR fields */
2287 recv_wr
.next
= NULL
;
2288 recv_wr
.sg_list
= &sg_list
;
2289 recv_wr
.num_sge
= 1;
2292 /* Allocate and map receive buffer */
2297 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2299 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2304 sg_list
.addr
= dma_map_single(qp_info
->port_priv
->
2308 sizeof mad_priv
->header
,
2310 pci_unmap_addr_set(&mad_priv
->header
, mapping
, sg_list
.addr
);
2311 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2312 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2314 /* Post receive WR */
2315 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2316 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2317 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2318 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2319 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2321 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2322 list_del(&mad_priv
->header
.mad_list
.list
);
2323 recv_queue
->count
--;
2324 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2325 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2326 pci_unmap_addr(&mad_priv
->header
,
2329 sizeof mad_priv
->header
,
2331 kmem_cache_free(ib_mad_cache
, mad_priv
);
2332 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2341 * Return all the posted receive MADs
2343 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2345 struct ib_mad_private_header
*mad_priv_hdr
;
2346 struct ib_mad_private
*recv
;
2347 struct ib_mad_list_head
*mad_list
;
2349 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2351 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2352 struct ib_mad_list_head
, list
);
2353 mad_priv_hdr
= container_of(mad_list
,
2354 struct ib_mad_private_header
,
2356 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2359 /* Remove from posted receive MAD list */
2360 list_del(&mad_list
->list
);
2362 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2363 pci_unmap_addr(&recv
->header
, mapping
),
2364 sizeof(struct ib_mad_private
) -
2365 sizeof(struct ib_mad_private_header
),
2367 kmem_cache_free(ib_mad_cache
, recv
);
2370 qp_info
->recv_queue
.count
= 0;
2376 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2379 struct ib_qp_attr
*attr
;
2382 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2384 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2388 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2389 qp
= port_priv
->qp_info
[i
].qp
;
2391 * PKey index for QP1 is irrelevant but
2392 * one is needed for the Reset to Init transition
2394 attr
->qp_state
= IB_QPS_INIT
;
2395 attr
->pkey_index
= 0;
2396 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2397 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2398 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2400 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2401 "INIT: %d\n", i
, ret
);
2405 attr
->qp_state
= IB_QPS_RTR
;
2406 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2408 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2409 "RTR: %d\n", i
, ret
);
2413 attr
->qp_state
= IB_QPS_RTS
;
2414 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2415 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2417 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2418 "RTS: %d\n", i
, ret
);
2423 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2425 printk(KERN_ERR PFX
"Failed to request completion "
2426 "notification: %d\n", ret
);
2430 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2431 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2433 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2442 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2444 struct ib_mad_qp_info
*qp_info
= qp_context
;
2446 /* It's worse than that! He's dead, Jim! */
2447 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2448 event
->event
, qp_info
->qp
->qp_num
);
2451 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2452 struct ib_mad_queue
*mad_queue
)
2454 mad_queue
->qp_info
= qp_info
;
2455 mad_queue
->count
= 0;
2456 spin_lock_init(&mad_queue
->lock
);
2457 INIT_LIST_HEAD(&mad_queue
->list
);
2460 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2461 struct ib_mad_qp_info
*qp_info
)
2463 qp_info
->port_priv
= port_priv
;
2464 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2465 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2466 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2467 spin_lock_init(&qp_info
->snoop_lock
);
2468 qp_info
->snoop_table
= NULL
;
2469 qp_info
->snoop_table_size
= 0;
2470 atomic_set(&qp_info
->snoop_count
, 0);
2473 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2474 enum ib_qp_type qp_type
)
2476 struct ib_qp_init_attr qp_init_attr
;
2479 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2480 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2481 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2482 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2483 qp_init_attr
.cap
.max_send_wr
= IB_MAD_QP_SEND_SIZE
;
2484 qp_init_attr
.cap
.max_recv_wr
= IB_MAD_QP_RECV_SIZE
;
2485 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2486 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2487 qp_init_attr
.qp_type
= qp_type
;
2488 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2489 qp_init_attr
.qp_context
= qp_info
;
2490 qp_init_attr
.event_handler
= qp_event_handler
;
2491 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2492 if (IS_ERR(qp_info
->qp
)) {
2493 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2494 get_spl_qp_index(qp_type
));
2495 ret
= PTR_ERR(qp_info
->qp
);
2498 /* Use minimum queue sizes unless the CQ is resized */
2499 qp_info
->send_queue
.max_active
= IB_MAD_QP_SEND_SIZE
;
2500 qp_info
->recv_queue
.max_active
= IB_MAD_QP_RECV_SIZE
;
2507 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2509 ib_destroy_qp(qp_info
->qp
);
2510 kfree(qp_info
->snoop_table
);
2515 * Create the QP, PD, MR, and CQ if needed
2517 static int ib_mad_port_open(struct ib_device
*device
,
2521 struct ib_mad_port_private
*port_priv
;
2522 unsigned long flags
;
2523 char name
[sizeof "ib_mad123"];
2525 /* Create new device info */
2526 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2528 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2532 port_priv
->device
= device
;
2533 port_priv
->port_num
= port_num
;
2534 spin_lock_init(&port_priv
->reg_lock
);
2535 INIT_LIST_HEAD(&port_priv
->agent_list
);
2536 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2537 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2539 cq_size
= (IB_MAD_QP_SEND_SIZE
+ IB_MAD_QP_RECV_SIZE
) * 2;
2540 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2541 ib_mad_thread_completion_handler
,
2542 NULL
, port_priv
, cq_size
);
2543 if (IS_ERR(port_priv
->cq
)) {
2544 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2545 ret
= PTR_ERR(port_priv
->cq
);
2549 port_priv
->pd
= ib_alloc_pd(device
);
2550 if (IS_ERR(port_priv
->pd
)) {
2551 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2552 ret
= PTR_ERR(port_priv
->pd
);
2556 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2557 if (IS_ERR(port_priv
->mr
)) {
2558 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2559 ret
= PTR_ERR(port_priv
->mr
);
2563 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2566 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2570 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2571 port_priv
->wq
= create_singlethread_workqueue(name
);
2572 if (!port_priv
->wq
) {
2576 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
, port_priv
);
2578 ret
= ib_mad_port_start(port_priv
);
2580 printk(KERN_ERR PFX
"Couldn't start port\n");
2584 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2585 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2586 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2590 destroy_workqueue(port_priv
->wq
);
2592 destroy_mad_qp(&port_priv
->qp_info
[1]);
2594 destroy_mad_qp(&port_priv
->qp_info
[0]);
2596 ib_dereg_mr(port_priv
->mr
);
2598 ib_dealloc_pd(port_priv
->pd
);
2600 ib_destroy_cq(port_priv
->cq
);
2601 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2602 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2611 * If there are no classes using the port, free the port
2612 * resources (CQ, MR, PD, QP) and remove the port's info structure
2614 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2616 struct ib_mad_port_private
*port_priv
;
2617 unsigned long flags
;
2619 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2620 port_priv
= __ib_get_mad_port(device
, port_num
);
2621 if (port_priv
== NULL
) {
2622 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2623 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2626 list_del(&port_priv
->port_list
);
2627 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2629 /* Stop processing completions. */
2630 flush_workqueue(port_priv
->wq
);
2631 destroy_workqueue(port_priv
->wq
);
2632 destroy_mad_qp(&port_priv
->qp_info
[1]);
2633 destroy_mad_qp(&port_priv
->qp_info
[0]);
2634 ib_dereg_mr(port_priv
->mr
);
2635 ib_dealloc_pd(port_priv
->pd
);
2636 ib_destroy_cq(port_priv
->cq
);
2637 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2638 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2639 /* XXX: Handle deallocation of MAD registration tables */
2646 static void ib_mad_init_device(struct ib_device
*device
)
2650 if (device
->node_type
== IB_NODE_SWITCH
) {
2655 end
= device
->phys_port_cnt
;
2658 for (i
= start
; i
<= end
; i
++) {
2659 if (ib_mad_port_open(device
, i
)) {
2660 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2664 if (ib_agent_port_open(device
, i
)) {
2665 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2674 if (ib_mad_port_close(device
, i
))
2675 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2681 while (i
>= start
) {
2682 if (ib_agent_port_close(device
, i
))
2683 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2686 if (ib_mad_port_close(device
, i
))
2687 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2693 static void ib_mad_remove_device(struct ib_device
*device
)
2695 int i
, num_ports
, cur_port
;
2697 if (device
->node_type
== IB_NODE_SWITCH
) {
2701 num_ports
= device
->phys_port_cnt
;
2704 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2705 if (ib_agent_port_close(device
, cur_port
))
2706 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2708 device
->name
, cur_port
);
2709 if (ib_mad_port_close(device
, cur_port
))
2710 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2711 device
->name
, cur_port
);
2715 static struct ib_client mad_client
= {
2717 .add
= ib_mad_init_device
,
2718 .remove
= ib_mad_remove_device
2721 static int __init
ib_mad_init_module(void)
2725 spin_lock_init(&ib_mad_port_list_lock
);
2727 ib_mad_cache
= kmem_cache_create("ib_mad",
2728 sizeof(struct ib_mad_private
),
2733 if (!ib_mad_cache
) {
2734 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
2739 INIT_LIST_HEAD(&ib_mad_port_list
);
2741 if (ib_register_client(&mad_client
)) {
2742 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
2750 kmem_cache_destroy(ib_mad_cache
);
2755 static void __exit
ib_mad_cleanup_module(void)
2757 ib_unregister_client(&mad_client
);
2759 if (kmem_cache_destroy(ib_mad_cache
)) {
2760 printk(KERN_DEBUG PFX
"Failed to destroy ib_mad cache\n");
2764 module_init(ib_mad_init_module
);
2765 module_exit(ib_mad_cleanup_module
);