2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
36 #include <linux/dma-mapping.h>
37 #include <rdma/ib_cache.h>
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
49 static kmem_cache_t
*ib_mad_cache
;
51 static struct list_head ib_mad_port_list
;
52 static u32 ib_mad_client_id
= 0;
55 static spinlock_t ib_mad_port_list_lock
;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
60 struct ib_mad_reg_req
*mad_reg_req
);
61 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
62 static struct ib_mad_agent_private
*find_mad_agent(
63 struct ib_mad_port_private
*port_priv
,
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
66 struct ib_mad_private
*mad
);
67 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
68 static void timeout_sends(struct work_struct
*work
);
69 static void local_completions(struct work_struct
*work
);
70 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
71 struct ib_mad_agent_private
*agent_priv
,
73 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
74 struct ib_mad_agent_private
*agent_priv
);
77 * Returns a ib_mad_port_private structure or NULL for a device/port
78 * Assumes ib_mad_port_list_lock is being held
80 static inline struct ib_mad_port_private
*
81 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
83 struct ib_mad_port_private
*entry
;
85 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
86 if (entry
->device
== device
&& entry
->port_num
== port_num
)
93 * Wrapper function to return a ib_mad_port_private structure or NULL
96 static inline struct ib_mad_port_private
*
97 ib_get_mad_port(struct ib_device
*device
, int port_num
)
99 struct ib_mad_port_private
*entry
;
102 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
103 entry
= __ib_get_mad_port(device
, port_num
);
104 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
109 static inline u8
convert_mgmt_class(u8 mgmt_class
)
111 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
112 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
116 static int get_spl_qp_index(enum ib_qp_type qp_type
)
129 static int vendor_class_index(u8 mgmt_class
)
131 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
134 static int is_vendor_class(u8 mgmt_class
)
136 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
137 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
142 static int is_vendor_oui(char *oui
)
144 if (oui
[0] || oui
[1] || oui
[2])
149 static int is_vendor_method_in_use(
150 struct ib_mad_mgmt_vendor_class
*vendor_class
,
151 struct ib_mad_reg_req
*mad_reg_req
)
153 struct ib_mad_mgmt_method_table
*method
;
156 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
157 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
158 method
= vendor_class
->method_table
[i
];
160 if (method_in_use(&method
, mad_reg_req
))
170 int ib_response_mad(struct ib_mad
*mad
)
172 return ((mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
) ||
173 (mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
174 ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_BM
) &&
175 (mad
->mad_hdr
.attr_mod
& IB_BM_ATTR_MOD_RESP
)));
177 EXPORT_SYMBOL(ib_response_mad
);
180 * ib_register_mad_agent - Register to send/receive MADs
182 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
184 enum ib_qp_type qp_type
,
185 struct ib_mad_reg_req
*mad_reg_req
,
187 ib_mad_send_handler send_handler
,
188 ib_mad_recv_handler recv_handler
,
191 struct ib_mad_port_private
*port_priv
;
192 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
193 struct ib_mad_agent_private
*mad_agent_priv
;
194 struct ib_mad_reg_req
*reg_req
= NULL
;
195 struct ib_mad_mgmt_class_table
*class;
196 struct ib_mad_mgmt_vendor_class_table
*vendor
;
197 struct ib_mad_mgmt_vendor_class
*vendor_class
;
198 struct ib_mad_mgmt_method_table
*method
;
201 u8 mgmt_class
, vclass
;
203 /* Validate parameters */
204 qpn
= get_spl_qp_index(qp_type
);
208 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
)
211 /* Validate MAD registration request if supplied */
213 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
217 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
219 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
220 * one in this range currently allowed
222 if (mad_reg_req
->mgmt_class
!=
223 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
225 } else if (mad_reg_req
->mgmt_class
== 0) {
227 * Class 0 is reserved in IBA and is used for
228 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
231 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
233 * If class is in "new" vendor range,
234 * ensure supplied OUI is not zero
236 if (!is_vendor_oui(mad_reg_req
->oui
))
239 /* Make sure class supplied is consistent with RMPP */
240 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
244 /* Make sure class supplied is consistent with QP type */
245 if (qp_type
== IB_QPT_SMI
) {
246 if ((mad_reg_req
->mgmt_class
!=
247 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
248 (mad_reg_req
->mgmt_class
!=
249 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
252 if ((mad_reg_req
->mgmt_class
==
253 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
254 (mad_reg_req
->mgmt_class
==
255 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
259 /* No registration request supplied */
264 /* Validate device and port */
265 port_priv
= ib_get_mad_port(device
, port_num
);
267 ret
= ERR_PTR(-ENODEV
);
271 /* Allocate structures */
272 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
273 if (!mad_agent_priv
) {
274 ret
= ERR_PTR(-ENOMEM
);
278 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
279 IB_ACCESS_LOCAL_WRITE
);
280 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
281 ret
= ERR_PTR(-ENOMEM
);
286 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
288 ret
= ERR_PTR(-ENOMEM
);
291 /* Make a copy of the MAD registration request */
292 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
295 /* Now, fill in the various structures */
296 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
297 mad_agent_priv
->reg_req
= reg_req
;
298 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
299 mad_agent_priv
->agent
.device
= device
;
300 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
301 mad_agent_priv
->agent
.send_handler
= send_handler
;
302 mad_agent_priv
->agent
.context
= context
;
303 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
304 mad_agent_priv
->agent
.port_num
= port_num
;
306 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
307 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
310 * Make sure MAD registration (if supplied)
311 * is non overlapping with any existing ones
314 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
315 if (!is_vendor_class(mgmt_class
)) {
316 class = port_priv
->version
[mad_reg_req
->
317 mgmt_class_version
].class;
319 method
= class->method_table
[mgmt_class
];
321 if (method_in_use(&method
,
326 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
329 /* "New" vendor class range */
330 vendor
= port_priv
->version
[mad_reg_req
->
331 mgmt_class_version
].vendor
;
333 vclass
= vendor_class_index(mgmt_class
);
334 vendor_class
= vendor
->vendor_class
[vclass
];
336 if (is_vendor_method_in_use(
342 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
350 /* Add mad agent into port's agent list */
351 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
352 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
354 spin_lock_init(&mad_agent_priv
->lock
);
355 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
356 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
357 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
358 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
359 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
360 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
361 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
362 atomic_set(&mad_agent_priv
->refcount
, 1);
363 init_completion(&mad_agent_priv
->comp
);
365 return &mad_agent_priv
->agent
;
368 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
371 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
373 kfree(mad_agent_priv
);
377 EXPORT_SYMBOL(ib_register_mad_agent
);
379 static inline int is_snooping_sends(int mad_snoop_flags
)
381 return (mad_snoop_flags
&
382 (/*IB_MAD_SNOOP_POSTED_SENDS |
383 IB_MAD_SNOOP_RMPP_SENDS |*/
384 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
385 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
388 static inline int is_snooping_recvs(int mad_snoop_flags
)
390 return (mad_snoop_flags
&
391 (IB_MAD_SNOOP_RECVS
/*|
392 IB_MAD_SNOOP_RMPP_RECVS*/));
395 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
396 struct ib_mad_snoop_private
*mad_snoop_priv
)
398 struct ib_mad_snoop_private
**new_snoop_table
;
402 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
403 /* Check for empty slot in array. */
404 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
405 if (!qp_info
->snoop_table
[i
])
408 if (i
== qp_info
->snoop_table_size
) {
410 new_snoop_table
= kmalloc(sizeof mad_snoop_priv
*
411 qp_info
->snoop_table_size
+ 1,
413 if (!new_snoop_table
) {
417 if (qp_info
->snoop_table
) {
418 memcpy(new_snoop_table
, qp_info
->snoop_table
,
419 sizeof mad_snoop_priv
*
420 qp_info
->snoop_table_size
);
421 kfree(qp_info
->snoop_table
);
423 qp_info
->snoop_table
= new_snoop_table
;
424 qp_info
->snoop_table_size
++;
426 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
427 atomic_inc(&qp_info
->snoop_count
);
429 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
433 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
435 enum ib_qp_type qp_type
,
437 ib_mad_snoop_handler snoop_handler
,
438 ib_mad_recv_handler recv_handler
,
441 struct ib_mad_port_private
*port_priv
;
442 struct ib_mad_agent
*ret
;
443 struct ib_mad_snoop_private
*mad_snoop_priv
;
446 /* Validate parameters */
447 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
448 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
449 ret
= ERR_PTR(-EINVAL
);
452 qpn
= get_spl_qp_index(qp_type
);
454 ret
= ERR_PTR(-EINVAL
);
457 port_priv
= ib_get_mad_port(device
, port_num
);
459 ret
= ERR_PTR(-ENODEV
);
462 /* Allocate structures */
463 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
464 if (!mad_snoop_priv
) {
465 ret
= ERR_PTR(-ENOMEM
);
469 /* Now, fill in the various structures */
470 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
471 mad_snoop_priv
->agent
.device
= device
;
472 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
473 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
474 mad_snoop_priv
->agent
.context
= context
;
475 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
476 mad_snoop_priv
->agent
.port_num
= port_num
;
477 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
478 init_completion(&mad_snoop_priv
->comp
);
479 mad_snoop_priv
->snoop_index
= register_snoop_agent(
480 &port_priv
->qp_info
[qpn
],
482 if (mad_snoop_priv
->snoop_index
< 0) {
483 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
487 atomic_set(&mad_snoop_priv
->refcount
, 1);
488 return &mad_snoop_priv
->agent
;
491 kfree(mad_snoop_priv
);
495 EXPORT_SYMBOL(ib_register_mad_snoop
);
497 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
499 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
500 complete(&mad_agent_priv
->comp
);
503 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
505 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
506 complete(&mad_snoop_priv
->comp
);
509 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
511 struct ib_mad_port_private
*port_priv
;
514 /* Note that we could still be handling received MADs */
517 * Canceling all sends results in dropping received response
518 * MADs, preventing us from queuing additional work
520 cancel_mads(mad_agent_priv
);
521 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
522 cancel_delayed_work(&mad_agent_priv
->timed_work
);
524 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
525 remove_mad_reg_req(mad_agent_priv
);
526 list_del(&mad_agent_priv
->agent_list
);
527 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
529 flush_workqueue(port_priv
->wq
);
530 ib_cancel_rmpp_recvs(mad_agent_priv
);
532 deref_mad_agent(mad_agent_priv
);
533 wait_for_completion(&mad_agent_priv
->comp
);
535 kfree(mad_agent_priv
->reg_req
);
536 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
537 kfree(mad_agent_priv
);
540 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
542 struct ib_mad_qp_info
*qp_info
;
545 qp_info
= mad_snoop_priv
->qp_info
;
546 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
547 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
548 atomic_dec(&qp_info
->snoop_count
);
549 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
551 deref_snoop_agent(mad_snoop_priv
);
552 wait_for_completion(&mad_snoop_priv
->comp
);
554 kfree(mad_snoop_priv
);
558 * ib_unregister_mad_agent - Unregisters a client from using MAD services
560 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
562 struct ib_mad_agent_private
*mad_agent_priv
;
563 struct ib_mad_snoop_private
*mad_snoop_priv
;
565 /* If the TID is zero, the agent can only snoop. */
566 if (mad_agent
->hi_tid
) {
567 mad_agent_priv
= container_of(mad_agent
,
568 struct ib_mad_agent_private
,
570 unregister_mad_agent(mad_agent_priv
);
572 mad_snoop_priv
= container_of(mad_agent
,
573 struct ib_mad_snoop_private
,
575 unregister_mad_snoop(mad_snoop_priv
);
579 EXPORT_SYMBOL(ib_unregister_mad_agent
);
581 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
583 struct ib_mad_queue
*mad_queue
;
586 BUG_ON(!mad_list
->mad_queue
);
587 mad_queue
= mad_list
->mad_queue
;
588 spin_lock_irqsave(&mad_queue
->lock
, flags
);
589 list_del(&mad_list
->list
);
591 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
594 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
595 struct ib_mad_send_buf
*send_buf
,
596 struct ib_mad_send_wc
*mad_send_wc
,
599 struct ib_mad_snoop_private
*mad_snoop_priv
;
603 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
604 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
605 mad_snoop_priv
= qp_info
->snoop_table
[i
];
606 if (!mad_snoop_priv
||
607 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
610 atomic_inc(&mad_snoop_priv
->refcount
);
611 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
612 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
613 send_buf
, mad_send_wc
);
614 deref_snoop_agent(mad_snoop_priv
);
615 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
617 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
620 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
621 struct ib_mad_recv_wc
*mad_recv_wc
,
624 struct ib_mad_snoop_private
*mad_snoop_priv
;
628 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
629 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
630 mad_snoop_priv
= qp_info
->snoop_table
[i
];
631 if (!mad_snoop_priv
||
632 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
635 atomic_inc(&mad_snoop_priv
->refcount
);
636 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
637 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
639 deref_snoop_agent(mad_snoop_priv
);
640 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
642 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
645 static void build_smp_wc(u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
648 memset(wc
, 0, sizeof *wc
);
650 wc
->status
= IB_WC_SUCCESS
;
651 wc
->opcode
= IB_WC_RECV
;
652 wc
->pkey_index
= pkey_index
;
653 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
658 wc
->dlid_path_bits
= 0;
659 wc
->port_num
= port_num
;
663 * Return 0 if SMP is to be sent
664 * Return 1 if SMP was consumed locally (whether or not solicited)
665 * Return < 0 if error
667 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
668 struct ib_mad_send_wr_private
*mad_send_wr
)
671 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
673 struct ib_mad_local_private
*local
;
674 struct ib_mad_private
*mad_priv
;
675 struct ib_mad_port_private
*port_priv
;
676 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
677 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
678 u8 port_num
= mad_agent_priv
->agent
.port_num
;
680 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
683 * Directed route handling starts if the initial LID routed part of
684 * a request or the ending LID routed part of a response is empty.
685 * If we are at the start of the LID routed part, don't update the
686 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
688 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
690 !smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
)) {
692 printk(KERN_ERR PFX
"Invalid directed route\n");
695 /* Check to post send on QP or process locally */
696 ret
= smi_check_local_smp(smp
, device
);
700 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
703 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
706 local
->mad_priv
= NULL
;
707 local
->recv_mad_agent
= NULL
;
708 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
711 printk(KERN_ERR PFX
"No memory for local response MAD\n");
716 build_smp_wc(send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
717 send_wr
->wr
.ud
.pkey_index
,
718 send_wr
->wr
.ud
.port_num
, &mad_wc
);
720 /* No GRH for DR SMP */
721 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
722 (struct ib_mad
*)smp
,
723 (struct ib_mad
*)&mad_priv
->mad
);
726 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
727 if (ib_response_mad(&mad_priv
->mad
.mad
) &&
728 mad_agent_priv
->agent
.recv_handler
) {
729 local
->mad_priv
= mad_priv
;
730 local
->recv_mad_agent
= mad_agent_priv
;
732 * Reference MAD agent until receive
733 * side of local completion handled
735 atomic_inc(&mad_agent_priv
->refcount
);
737 kmem_cache_free(ib_mad_cache
, mad_priv
);
739 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
740 kmem_cache_free(ib_mad_cache
, mad_priv
);
742 case IB_MAD_RESULT_SUCCESS
:
743 /* Treat like an incoming receive MAD */
744 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
745 mad_agent_priv
->agent
.port_num
);
747 mad_priv
->mad
.mad
.mad_hdr
.tid
=
748 ((struct ib_mad
*)smp
)->mad_hdr
.tid
;
749 recv_mad_agent
= find_mad_agent(port_priv
,
752 if (!port_priv
|| !recv_mad_agent
) {
753 kmem_cache_free(ib_mad_cache
, mad_priv
);
758 local
->mad_priv
= mad_priv
;
759 local
->recv_mad_agent
= recv_mad_agent
;
762 kmem_cache_free(ib_mad_cache
, mad_priv
);
768 local
->mad_send_wr
= mad_send_wr
;
769 /* Reference MAD agent until send side of local completion handled */
770 atomic_inc(&mad_agent_priv
->refcount
);
771 /* Queue local completion to local list */
772 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
773 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
774 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
775 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
776 &mad_agent_priv
->local_work
);
782 static int get_pad_size(int hdr_len
, int data_len
)
786 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
787 if (data_len
&& seg_size
) {
788 pad
= seg_size
- data_len
% seg_size
;
789 return pad
== seg_size
? 0 : pad
;
794 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
796 struct ib_rmpp_segment
*s
, *t
;
798 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
804 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
807 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
808 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
809 struct ib_rmpp_segment
*seg
= NULL
;
810 int left
, seg_size
, pad
;
812 send_buf
->seg_size
= sizeof (struct ib_mad
) - send_buf
->hdr_len
;
813 seg_size
= send_buf
->seg_size
;
816 /* Allocate data segments. */
817 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
818 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
820 printk(KERN_ERR
"alloc_send_rmpp_segs: RMPP mem "
821 "alloc failed for len %zd, gfp %#x\n",
822 sizeof (*seg
) + seg_size
, gfp_mask
);
823 free_send_rmpp_list(send_wr
);
826 seg
->num
= ++send_buf
->seg_count
;
827 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
830 /* Zero any padding */
832 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
834 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
836 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
837 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
839 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
840 struct ib_rmpp_segment
, list
);
841 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
845 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
846 u32 remote_qpn
, u16 pkey_index
,
848 int hdr_len
, int data_len
,
851 struct ib_mad_agent_private
*mad_agent_priv
;
852 struct ib_mad_send_wr_private
*mad_send_wr
;
853 int pad
, message_size
, ret
, size
;
856 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
858 pad
= get_pad_size(hdr_len
, data_len
);
859 message_size
= hdr_len
+ data_len
+ pad
;
861 if ((!mad_agent
->rmpp_version
&&
862 (rmpp_active
|| message_size
> sizeof(struct ib_mad
))) ||
863 (!rmpp_active
&& message_size
> sizeof(struct ib_mad
)))
864 return ERR_PTR(-EINVAL
);
866 size
= rmpp_active
? hdr_len
: sizeof(struct ib_mad
);
867 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
869 return ERR_PTR(-ENOMEM
);
871 mad_send_wr
= buf
+ size
;
872 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
873 mad_send_wr
->send_buf
.mad
= buf
;
874 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
875 mad_send_wr
->send_buf
.data_len
= data_len
;
876 mad_send_wr
->pad
= pad
;
878 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
879 mad_send_wr
->sg_list
[0].length
= hdr_len
;
880 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
881 mad_send_wr
->sg_list
[1].length
= sizeof(struct ib_mad
) - hdr_len
;
882 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->mr
->lkey
;
884 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
885 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
886 mad_send_wr
->send_wr
.num_sge
= 2;
887 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
888 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
889 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
890 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
891 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
894 ret
= alloc_send_rmpp_list(mad_send_wr
, gfp_mask
);
901 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
902 atomic_inc(&mad_agent_priv
->refcount
);
903 return &mad_send_wr
->send_buf
;
905 EXPORT_SYMBOL(ib_create_send_mad
);
907 int ib_get_mad_data_offset(u8 mgmt_class
)
909 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
910 return IB_MGMT_SA_HDR
;
911 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
912 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
913 (mgmt_class
== IB_MGMT_CLASS_BIS
))
914 return IB_MGMT_DEVICE_HDR
;
915 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
916 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
917 return IB_MGMT_VENDOR_HDR
;
919 return IB_MGMT_MAD_HDR
;
921 EXPORT_SYMBOL(ib_get_mad_data_offset
);
923 int ib_is_mad_class_rmpp(u8 mgmt_class
)
925 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
926 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
927 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
928 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
929 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
930 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
934 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
936 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
938 struct ib_mad_send_wr_private
*mad_send_wr
;
939 struct list_head
*list
;
941 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
943 list
= &mad_send_wr
->cur_seg
->list
;
945 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
946 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
947 if (mad_send_wr
->cur_seg
->num
== seg_num
)
949 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
950 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
951 if (mad_send_wr
->cur_seg
->num
== seg_num
)
954 return mad_send_wr
->cur_seg
->data
;
956 EXPORT_SYMBOL(ib_get_rmpp_segment
);
958 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
960 if (mad_send_wr
->send_buf
.seg_count
)
961 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
962 mad_send_wr
->seg_num
);
964 return mad_send_wr
->send_buf
.mad
+
965 mad_send_wr
->send_buf
.hdr_len
;
968 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
970 struct ib_mad_agent_private
*mad_agent_priv
;
971 struct ib_mad_send_wr_private
*mad_send_wr
;
973 mad_agent_priv
= container_of(send_buf
->mad_agent
,
974 struct ib_mad_agent_private
, agent
);
975 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
978 free_send_rmpp_list(mad_send_wr
);
979 kfree(send_buf
->mad
);
980 deref_mad_agent(mad_agent_priv
);
982 EXPORT_SYMBOL(ib_free_send_mad
);
984 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
986 struct ib_mad_qp_info
*qp_info
;
987 struct list_head
*list
;
988 struct ib_send_wr
*bad_send_wr
;
989 struct ib_mad_agent
*mad_agent
;
994 /* Set WR ID to find mad_send_wr upon completion */
995 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
996 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
997 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
999 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1000 sge
= mad_send_wr
->sg_list
;
1001 sge
[0].addr
= dma_map_single(mad_agent
->device
->dma_device
,
1002 mad_send_wr
->send_buf
.mad
,
1005 pci_unmap_addr_set(mad_send_wr
, header_mapping
, sge
[0].addr
);
1007 sge
[1].addr
= dma_map_single(mad_agent
->device
->dma_device
,
1008 ib_get_payload(mad_send_wr
),
1011 pci_unmap_addr_set(mad_send_wr
, payload_mapping
, sge
[1].addr
);
1013 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1014 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1015 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
1017 list
= &qp_info
->send_queue
.list
;
1020 list
= &qp_info
->overflow_list
;
1024 qp_info
->send_queue
.count
++;
1025 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1027 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1029 dma_unmap_single(mad_agent
->device
->dma_device
,
1030 pci_unmap_addr(mad_send_wr
, header_mapping
),
1031 sge
[0].length
, DMA_TO_DEVICE
);
1032 dma_unmap_single(mad_agent
->device
->dma_device
,
1033 pci_unmap_addr(mad_send_wr
, payload_mapping
),
1034 sge
[1].length
, DMA_TO_DEVICE
);
1040 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1041 * with the registered client
1043 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1044 struct ib_mad_send_buf
**bad_send_buf
)
1046 struct ib_mad_agent_private
*mad_agent_priv
;
1047 struct ib_mad_send_buf
*next_send_buf
;
1048 struct ib_mad_send_wr_private
*mad_send_wr
;
1049 unsigned long flags
;
1052 /* Walk list of send WRs and post each on send list */
1053 for (; send_buf
; send_buf
= next_send_buf
) {
1055 mad_send_wr
= container_of(send_buf
,
1056 struct ib_mad_send_wr_private
,
1058 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1060 if (!send_buf
->mad_agent
->send_handler
||
1061 (send_buf
->timeout_ms
&&
1062 !send_buf
->mad_agent
->recv_handler
)) {
1067 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1068 if (mad_agent_priv
->agent
.rmpp_version
) {
1075 * Save pointer to next work request to post in case the
1076 * current one completes, and the user modifies the work
1077 * request associated with the completion
1079 next_send_buf
= send_buf
->next
;
1080 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
1082 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1083 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1084 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1086 if (ret
< 0) /* error */
1088 else if (ret
== 1) /* locally consumed */
1092 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1093 /* Timeout will be updated after send completes */
1094 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1095 mad_send_wr
->retries
= send_buf
->retries
;
1096 /* Reference for work request to QP + response */
1097 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1098 mad_send_wr
->status
= IB_WC_SUCCESS
;
1100 /* Reference MAD agent until send completes */
1101 atomic_inc(&mad_agent_priv
->refcount
);
1102 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1103 list_add_tail(&mad_send_wr
->agent_list
,
1104 &mad_agent_priv
->send_list
);
1105 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1107 if (mad_agent_priv
->agent
.rmpp_version
) {
1108 ret
= ib_send_rmpp_mad(mad_send_wr
);
1109 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1110 ret
= ib_send_mad(mad_send_wr
);
1112 ret
= ib_send_mad(mad_send_wr
);
1114 /* Fail send request */
1115 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1116 list_del(&mad_send_wr
->agent_list
);
1117 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1118 atomic_dec(&mad_agent_priv
->refcount
);
1125 *bad_send_buf
= send_buf
;
1128 EXPORT_SYMBOL(ib_post_send_mad
);
1131 * ib_free_recv_mad - Returns data buffers used to receive
1132 * a MAD to the access layer
1134 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1136 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1137 struct ib_mad_private_header
*mad_priv_hdr
;
1138 struct ib_mad_private
*priv
;
1139 struct list_head free_list
;
1141 INIT_LIST_HEAD(&free_list
);
1142 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1144 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1146 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1148 mad_priv_hdr
= container_of(mad_recv_wc
,
1149 struct ib_mad_private_header
,
1151 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1153 kmem_cache_free(ib_mad_cache
, priv
);
1156 EXPORT_SYMBOL(ib_free_recv_mad
);
1158 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1160 ib_mad_send_handler send_handler
,
1161 ib_mad_recv_handler recv_handler
,
1164 return ERR_PTR(-EINVAL
); /* XXX: for now */
1166 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1168 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1171 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
1174 EXPORT_SYMBOL(ib_process_mad_wc
);
1176 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1177 struct ib_mad_reg_req
*mad_reg_req
)
1181 for (i
= find_first_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
);
1182 i
< IB_MGMT_MAX_METHODS
;
1183 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1185 if ((*method
)->agent
[i
]) {
1186 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
1193 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1195 /* Allocate management method table */
1196 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1198 printk(KERN_ERR PFX
"No memory for "
1199 "ib_mad_mgmt_method_table\n");
1207 * Check to see if there are any methods still in use
1209 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1213 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1214 if (method
->agent
[i
])
1220 * Check to see if there are any method tables for this class still in use
1222 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1226 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1227 if (class->method_table
[i
])
1232 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1236 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1237 if (vendor_class
->method_table
[i
])
1242 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1247 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1248 /* Is there matching OUI for this vendor class ? */
1249 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1255 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1259 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1260 if (vendor
->vendor_class
[i
])
1266 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1267 struct ib_mad_agent_private
*agent
)
1271 /* Remove any methods for this mad agent */
1272 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1273 if (method
->agent
[i
] == agent
) {
1274 method
->agent
[i
] = NULL
;
1279 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1280 struct ib_mad_agent_private
*agent_priv
,
1283 struct ib_mad_port_private
*port_priv
;
1284 struct ib_mad_mgmt_class_table
**class;
1285 struct ib_mad_mgmt_method_table
**method
;
1288 port_priv
= agent_priv
->qp_info
->port_priv
;
1289 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1291 /* Allocate management class table for "new" class version */
1292 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1294 printk(KERN_ERR PFX
"No memory for "
1295 "ib_mad_mgmt_class_table\n");
1300 /* Allocate method table for this management class */
1301 method
= &(*class)->method_table
[mgmt_class
];
1302 if ((ret
= allocate_method_table(method
)))
1305 method
= &(*class)->method_table
[mgmt_class
];
1307 /* Allocate method table for this management class */
1308 if ((ret
= allocate_method_table(method
)))
1313 /* Now, make sure methods are not already in use */
1314 if (method_in_use(method
, mad_reg_req
))
1317 /* Finally, add in methods being registered */
1318 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1319 IB_MGMT_MAX_METHODS
);
1320 i
< IB_MGMT_MAX_METHODS
;
1321 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1323 (*method
)->agent
[i
] = agent_priv
;
1328 /* Remove any methods for this mad agent */
1329 remove_methods_mad_agent(*method
, agent_priv
);
1330 /* Now, check to see if there are any methods in use */
1331 if (!check_method_table(*method
)) {
1332 /* If not, release management method table */
1345 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1346 struct ib_mad_agent_private
*agent_priv
)
1348 struct ib_mad_port_private
*port_priv
;
1349 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1350 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1351 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1352 struct ib_mad_mgmt_method_table
**method
;
1353 int i
, ret
= -ENOMEM
;
1356 /* "New" vendor (with OUI) class */
1357 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1358 port_priv
= agent_priv
->qp_info
->port_priv
;
1359 vendor_table
= &port_priv
->version
[
1360 mad_reg_req
->mgmt_class_version
].vendor
;
1361 if (!*vendor_table
) {
1362 /* Allocate mgmt vendor class table for "new" class version */
1363 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1365 printk(KERN_ERR PFX
"No memory for "
1366 "ib_mad_mgmt_vendor_class_table\n");
1370 *vendor_table
= vendor
;
1372 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1373 /* Allocate table for this management vendor class */
1374 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1375 if (!vendor_class
) {
1376 printk(KERN_ERR PFX
"No memory for "
1377 "ib_mad_mgmt_vendor_class\n");
1381 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1383 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1384 /* Is there matching OUI for this vendor class ? */
1385 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1386 mad_reg_req
->oui
, 3)) {
1387 method
= &(*vendor_table
)->vendor_class
[
1388 vclass
]->method_table
[i
];
1393 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1394 /* OUI slot available ? */
1395 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1397 method
= &(*vendor_table
)->vendor_class
[
1398 vclass
]->method_table
[i
];
1400 /* Allocate method table for this OUI */
1401 if ((ret
= allocate_method_table(method
)))
1403 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1404 mad_reg_req
->oui
, 3);
1408 printk(KERN_ERR PFX
"All OUI slots in use\n");
1412 /* Now, make sure methods are not already in use */
1413 if (method_in_use(method
, mad_reg_req
))
1416 /* Finally, add in methods being registered */
1417 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1418 IB_MGMT_MAX_METHODS
);
1419 i
< IB_MGMT_MAX_METHODS
;
1420 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1422 (*method
)->agent
[i
] = agent_priv
;
1427 /* Remove any methods for this mad agent */
1428 remove_methods_mad_agent(*method
, agent_priv
);
1429 /* Now, check to see if there are any methods in use */
1430 if (!check_method_table(*method
)) {
1431 /* If not, release management method table */
1438 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1439 kfree(vendor_class
);
1443 *vendor_table
= NULL
;
1450 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1452 struct ib_mad_port_private
*port_priv
;
1453 struct ib_mad_mgmt_class_table
*class;
1454 struct ib_mad_mgmt_method_table
*method
;
1455 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1456 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1461 * Was MAD registration request supplied
1462 * with original registration ?
1464 if (!agent_priv
->reg_req
) {
1468 port_priv
= agent_priv
->qp_info
->port_priv
;
1469 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1470 class = port_priv
->version
[
1471 agent_priv
->reg_req
->mgmt_class_version
].class;
1475 method
= class->method_table
[mgmt_class
];
1477 /* Remove any methods for this mad agent */
1478 remove_methods_mad_agent(method
, agent_priv
);
1479 /* Now, check to see if there are any methods still in use */
1480 if (!check_method_table(method
)) {
1481 /* If not, release management method table */
1483 class->method_table
[mgmt_class
] = NULL
;
1484 /* Any management classes left ? */
1485 if (!check_class_table(class)) {
1486 /* If not, release management class table */
1489 agent_priv
->reg_req
->
1490 mgmt_class_version
].class = NULL
;
1496 if (!is_vendor_class(mgmt_class
))
1499 /* normalize mgmt_class to vendor range 2 */
1500 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1501 vendor
= port_priv
->version
[
1502 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1507 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1509 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1512 method
= vendor_class
->method_table
[index
];
1514 /* Remove any methods for this mad agent */
1515 remove_methods_mad_agent(method
, agent_priv
);
1517 * Now, check to see if there are
1518 * any methods still in use
1520 if (!check_method_table(method
)) {
1521 /* If not, release management method table */
1523 vendor_class
->method_table
[index
] = NULL
;
1524 memset(vendor_class
->oui
[index
], 0, 3);
1525 /* Any OUIs left ? */
1526 if (!check_vendor_class(vendor_class
)) {
1527 /* If not, release vendor class table */
1528 kfree(vendor_class
);
1529 vendor
->vendor_class
[mgmt_class
] = NULL
;
1530 /* Any other vendor classes left ? */
1531 if (!check_vendor_table(vendor
)) {
1534 agent_priv
->reg_req
->
1535 mgmt_class_version
].
1547 static struct ib_mad_agent_private
*
1548 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1551 struct ib_mad_agent_private
*mad_agent
= NULL
;
1552 unsigned long flags
;
1554 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1555 if (ib_response_mad(mad
)) {
1557 struct ib_mad_agent_private
*entry
;
1560 * Routing is based on high 32 bits of transaction ID
1563 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1564 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1565 if (entry
->agent
.hi_tid
== hi_tid
) {
1571 struct ib_mad_mgmt_class_table
*class;
1572 struct ib_mad_mgmt_method_table
*method
;
1573 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1574 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1575 struct ib_vendor_mad
*vendor_mad
;
1579 * Routing is based on version, class, and method
1580 * For "newer" vendor MADs, also based on OUI
1582 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1584 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1585 class = port_priv
->version
[
1586 mad
->mad_hdr
.class_version
].class;
1589 method
= class->method_table
[convert_mgmt_class(
1590 mad
->mad_hdr
.mgmt_class
)];
1592 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1593 ~IB_MGMT_METHOD_RESP
];
1595 vendor
= port_priv
->version
[
1596 mad
->mad_hdr
.class_version
].vendor
;
1599 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1600 mad
->mad_hdr
.mgmt_class
)];
1603 /* Find matching OUI */
1604 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1605 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1608 method
= vendor_class
->method_table
[index
];
1610 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1611 ~IB_MGMT_METHOD_RESP
];
1617 if (mad_agent
->agent
.recv_handler
)
1618 atomic_inc(&mad_agent
->refcount
);
1620 printk(KERN_NOTICE PFX
"No receive handler for client "
1622 &mad_agent
->agent
, port_priv
->port_num
);
1627 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1632 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1636 /* Make sure MAD base version is understood */
1637 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1638 printk(KERN_ERR PFX
"MAD received with unsupported base "
1639 "version %d\n", mad
->mad_hdr
.base_version
);
1643 /* Filter SMI packets sent to other than QP0 */
1644 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1645 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1649 /* Filter GSI packets sent to QP0 */
1658 static int is_data_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1659 struct ib_mad_hdr
*mad_hdr
)
1661 struct ib_rmpp_mad
*rmpp_mad
;
1663 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1664 return !mad_agent_priv
->agent
.rmpp_version
||
1665 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1666 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1667 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1670 static inline int rcv_has_same_class(struct ib_mad_send_wr_private
*wr
,
1671 struct ib_mad_recv_wc
*rwc
)
1673 return ((struct ib_mad
*)(wr
->send_buf
.mad
))->mad_hdr
.mgmt_class
==
1674 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1677 static inline int rcv_has_same_gid(struct ib_mad_agent_private
*mad_agent_priv
,
1678 struct ib_mad_send_wr_private
*wr
,
1679 struct ib_mad_recv_wc
*rwc
)
1681 struct ib_ah_attr attr
;
1682 u8 send_resp
, rcv_resp
;
1684 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1685 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1688 send_resp
= ((struct ib_mad
*)(wr
->send_buf
.mad
))->
1689 mad_hdr
.method
& IB_MGMT_METHOD_RESP
;
1690 rcv_resp
= rwc
->recv_buf
.mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
;
1692 if (send_resp
== rcv_resp
)
1693 /* both requests, or both responses. GIDs different */
1696 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1697 /* Assume not equal, to avoid false positives. */
1700 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1701 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1702 /* one has GID, other does not. Assume different */
1705 if (!send_resp
&& rcv_resp
) {
1706 /* is request/response. */
1707 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1708 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1710 return (!lmc
|| !((attr
.src_path_bits
^
1711 rwc
->wc
->dlid_path_bits
) &
1714 if (ib_get_cached_gid(device
, port_num
,
1715 attr
.grh
.sgid_index
, &sgid
))
1717 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1722 if (!(attr
.ah_flags
& IB_AH_GRH
))
1723 return attr
.dlid
== rwc
->wc
->slid
;
1725 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1729 static inline int is_direct(u8
class)
1731 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1734 struct ib_mad_send_wr_private
*
1735 ib_find_send_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1736 struct ib_mad_recv_wc
*wc
)
1738 struct ib_mad_send_wr_private
*wr
;
1741 mad
= (struct ib_mad
*)wc
->recv_buf
.mad
;
1743 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1744 if ((wr
->tid
== mad
->mad_hdr
.tid
) &&
1745 rcv_has_same_class(wr
, wc
) &&
1747 * Don't check GID for direct routed MADs.
1748 * These might have permissive LIDs.
1750 (is_direct(wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
) ||
1751 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1752 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1756 * It's possible to receive the response before we've
1757 * been notified that the send has completed
1759 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1760 if (is_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1761 wr
->tid
== mad
->mad_hdr
.tid
&&
1763 rcv_has_same_class(wr
, wc
) &&
1765 * Don't check GID for direct routed MADs.
1766 * These might have permissive LIDs.
1768 (is_direct(wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
) ||
1769 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1770 /* Verify request has not been canceled */
1771 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1776 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1778 mad_send_wr
->timeout
= 0;
1779 if (mad_send_wr
->refcount
== 1)
1780 list_move_tail(&mad_send_wr
->agent_list
,
1781 &mad_send_wr
->mad_agent_priv
->done_list
);
1784 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1785 struct ib_mad_recv_wc
*mad_recv_wc
)
1787 struct ib_mad_send_wr_private
*mad_send_wr
;
1788 struct ib_mad_send_wc mad_send_wc
;
1789 unsigned long flags
;
1791 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1792 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1793 if (mad_agent_priv
->agent
.rmpp_version
) {
1794 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1797 deref_mad_agent(mad_agent_priv
);
1802 /* Complete corresponding request */
1803 if (ib_response_mad(mad_recv_wc
->recv_buf
.mad
)) {
1804 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1805 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1807 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1808 ib_free_recv_mad(mad_recv_wc
);
1809 deref_mad_agent(mad_agent_priv
);
1812 ib_mark_mad_done(mad_send_wr
);
1813 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1815 /* Defined behavior is to complete response before request */
1816 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1817 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1819 atomic_dec(&mad_agent_priv
->refcount
);
1821 mad_send_wc
.status
= IB_WC_SUCCESS
;
1822 mad_send_wc
.vendor_err
= 0;
1823 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1824 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1826 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1828 deref_mad_agent(mad_agent_priv
);
1832 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1835 struct ib_mad_qp_info
*qp_info
;
1836 struct ib_mad_private_header
*mad_priv_hdr
;
1837 struct ib_mad_private
*recv
, *response
;
1838 struct ib_mad_list_head
*mad_list
;
1839 struct ib_mad_agent_private
*mad_agent
;
1841 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1843 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1844 "for response buffer\n");
1846 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1847 qp_info
= mad_list
->mad_queue
->qp_info
;
1848 dequeue_mad(mad_list
);
1850 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1852 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1853 dma_unmap_single(port_priv
->device
->dma_device
,
1854 pci_unmap_addr(&recv
->header
, mapping
),
1855 sizeof(struct ib_mad_private
) -
1856 sizeof(struct ib_mad_private_header
),
1859 /* Setup MAD receive work completion from "normal" work completion */
1860 recv
->header
.wc
= *wc
;
1861 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
1862 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1863 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1864 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1866 if (atomic_read(&qp_info
->snoop_count
))
1867 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1870 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1873 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1874 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1875 if (!smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1876 port_priv
->device
->node_type
,
1877 port_priv
->port_num
,
1878 port_priv
->device
->phys_port_cnt
))
1880 if (!smi_check_forward_dr_smp(&recv
->mad
.smp
))
1882 if (!smi_handle_dr_smp_send(&recv
->mad
.smp
,
1883 port_priv
->device
->node_type
,
1884 port_priv
->port_num
))
1886 if (!smi_check_local_smp(&recv
->mad
.smp
, port_priv
->device
))
1891 /* Give driver "right of first refusal" on incoming MAD */
1892 if (port_priv
->device
->process_mad
) {
1896 printk(KERN_ERR PFX
"No memory for response MAD\n");
1898 * Is it better to assume that
1899 * it wouldn't be processed ?
1904 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1905 port_priv
->port_num
,
1908 &response
->mad
.mad
);
1909 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1910 if (ret
& IB_MAD_RESULT_CONSUMED
)
1912 if (ret
& IB_MAD_RESULT_REPLY
) {
1913 agent_send_response(&response
->mad
.mad
,
1916 port_priv
->port_num
,
1917 qp_info
->qp
->qp_num
);
1923 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
);
1925 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
1927 * recv is freed up in error cases in ib_mad_complete_recv
1928 * or via recv_handler in ib_mad_complete_recv()
1934 /* Post another receive request for this QP */
1936 ib_mad_post_receive_mads(qp_info
, response
);
1938 kmem_cache_free(ib_mad_cache
, recv
);
1940 ib_mad_post_receive_mads(qp_info
, recv
);
1943 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1945 struct ib_mad_send_wr_private
*mad_send_wr
;
1946 unsigned long delay
;
1948 if (list_empty(&mad_agent_priv
->wait_list
)) {
1949 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1951 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1952 struct ib_mad_send_wr_private
,
1955 if (time_after(mad_agent_priv
->timeout
,
1956 mad_send_wr
->timeout
)) {
1957 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1958 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1959 delay
= mad_send_wr
->timeout
- jiffies
;
1960 if ((long)delay
<= 0)
1962 queue_delayed_work(mad_agent_priv
->qp_info
->
1964 &mad_agent_priv
->timed_work
, delay
);
1969 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
1971 struct ib_mad_agent_private
*mad_agent_priv
;
1972 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1973 struct list_head
*list_item
;
1974 unsigned long delay
;
1976 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1977 list_del(&mad_send_wr
->agent_list
);
1979 delay
= mad_send_wr
->timeout
;
1980 mad_send_wr
->timeout
+= jiffies
;
1983 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
1984 temp_mad_send_wr
= list_entry(list_item
,
1985 struct ib_mad_send_wr_private
,
1987 if (time_after(mad_send_wr
->timeout
,
1988 temp_mad_send_wr
->timeout
))
1993 list_item
= &mad_agent_priv
->wait_list
;
1994 list_add(&mad_send_wr
->agent_list
, list_item
);
1996 /* Reschedule a work item if we have a shorter timeout */
1997 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
1998 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1999 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2000 &mad_agent_priv
->timed_work
, delay
);
2004 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2007 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2008 wait_for_response(mad_send_wr
);
2012 * Process a send work completion
2014 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2015 struct ib_mad_send_wc
*mad_send_wc
)
2017 struct ib_mad_agent_private
*mad_agent_priv
;
2018 unsigned long flags
;
2021 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2022 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2023 if (mad_agent_priv
->agent
.rmpp_version
) {
2024 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2025 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2028 ret
= IB_RMPP_RESULT_UNHANDLED
;
2030 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2031 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2032 mad_send_wr
->status
= mad_send_wc
->status
;
2033 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2036 if (--mad_send_wr
->refcount
> 0) {
2037 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2038 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2039 wait_for_response(mad_send_wr
);
2044 /* Remove send from MAD agent and notify client of completion */
2045 list_del(&mad_send_wr
->agent_list
);
2046 adjust_timeout(mad_agent_priv
);
2047 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2049 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2050 mad_send_wc
->status
= mad_send_wr
->status
;
2051 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2052 ib_rmpp_send_handler(mad_send_wc
);
2054 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2057 /* Release reference on agent taken when sending */
2058 deref_mad_agent(mad_agent_priv
);
2061 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2064 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2067 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2068 struct ib_mad_list_head
*mad_list
;
2069 struct ib_mad_qp_info
*qp_info
;
2070 struct ib_mad_queue
*send_queue
;
2071 struct ib_send_wr
*bad_send_wr
;
2072 struct ib_mad_send_wc mad_send_wc
;
2073 unsigned long flags
;
2076 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2077 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2079 send_queue
= mad_list
->mad_queue
;
2080 qp_info
= send_queue
->qp_info
;
2083 dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
->dma_device
,
2084 pci_unmap_addr(mad_send_wr
, header_mapping
),
2085 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2086 dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
->dma_device
,
2087 pci_unmap_addr(mad_send_wr
, payload_mapping
),
2088 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2089 queued_send_wr
= NULL
;
2090 spin_lock_irqsave(&send_queue
->lock
, flags
);
2091 list_del(&mad_list
->list
);
2093 /* Move queued send to the send queue */
2094 if (send_queue
->count
-- > send_queue
->max_active
) {
2095 mad_list
= container_of(qp_info
->overflow_list
.next
,
2096 struct ib_mad_list_head
, list
);
2097 queued_send_wr
= container_of(mad_list
,
2098 struct ib_mad_send_wr_private
,
2100 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2102 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2104 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2105 mad_send_wc
.status
= wc
->status
;
2106 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2107 if (atomic_read(&qp_info
->snoop_count
))
2108 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2109 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2110 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2112 if (queued_send_wr
) {
2113 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
2116 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
2117 mad_send_wr
= queued_send_wr
;
2118 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2124 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2126 struct ib_mad_send_wr_private
*mad_send_wr
;
2127 struct ib_mad_list_head
*mad_list
;
2128 unsigned long flags
;
2130 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2131 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2132 mad_send_wr
= container_of(mad_list
,
2133 struct ib_mad_send_wr_private
,
2135 mad_send_wr
->retry
= 1;
2137 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2140 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2143 struct ib_mad_list_head
*mad_list
;
2144 struct ib_mad_qp_info
*qp_info
;
2145 struct ib_mad_send_wr_private
*mad_send_wr
;
2148 /* Determine if failure was a send or receive */
2149 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2150 qp_info
= mad_list
->mad_queue
->qp_info
;
2151 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2153 * Receive errors indicate that the QP has entered the error
2154 * state - error handling/shutdown code will cleanup
2159 * Send errors will transition the QP to SQE - move
2160 * QP to RTS and repost flushed work requests
2162 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2164 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2165 if (mad_send_wr
->retry
) {
2167 struct ib_send_wr
*bad_send_wr
;
2169 mad_send_wr
->retry
= 0;
2170 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
2173 ib_mad_send_done_handler(port_priv
, wc
);
2175 ib_mad_send_done_handler(port_priv
, wc
);
2177 struct ib_qp_attr
*attr
;
2179 /* Transition QP to RTS and fail offending send */
2180 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2182 attr
->qp_state
= IB_QPS_RTS
;
2183 attr
->cur_qp_state
= IB_QPS_SQE
;
2184 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2185 IB_QP_STATE
| IB_QP_CUR_STATE
);
2188 printk(KERN_ERR PFX
"mad_error_handler - "
2189 "ib_modify_qp to RTS : %d\n", ret
);
2191 mark_sends_for_retry(qp_info
);
2193 ib_mad_send_done_handler(port_priv
, wc
);
2198 * IB MAD completion callback
2200 static void ib_mad_completion_handler(struct work_struct
*work
)
2202 struct ib_mad_port_private
*port_priv
;
2205 port_priv
= container_of(work
, struct ib_mad_port_private
, work
);
2206 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2208 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2209 if (wc
.status
== IB_WC_SUCCESS
) {
2210 switch (wc
.opcode
) {
2212 ib_mad_send_done_handler(port_priv
, &wc
);
2215 ib_mad_recv_done_handler(port_priv
, &wc
);
2222 mad_error_handler(port_priv
, &wc
);
2226 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2228 unsigned long flags
;
2229 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2230 struct ib_mad_send_wc mad_send_wc
;
2231 struct list_head cancel_list
;
2233 INIT_LIST_HEAD(&cancel_list
);
2235 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2236 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2237 &mad_agent_priv
->send_list
, agent_list
) {
2238 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2239 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2240 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2244 /* Empty wait list to prevent receives from finding a request */
2245 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2246 /* Empty local completion list as well */
2247 list_splice_init(&mad_agent_priv
->local_list
, &cancel_list
);
2248 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2250 /* Report all cancelled requests */
2251 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2252 mad_send_wc
.vendor_err
= 0;
2254 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2255 &cancel_list
, agent_list
) {
2256 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2257 list_del(&mad_send_wr
->agent_list
);
2258 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2260 atomic_dec(&mad_agent_priv
->refcount
);
2264 static struct ib_mad_send_wr_private
*
2265 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2266 struct ib_mad_send_buf
*send_buf
)
2268 struct ib_mad_send_wr_private
*mad_send_wr
;
2270 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2272 if (&mad_send_wr
->send_buf
== send_buf
)
2276 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2278 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
2279 &mad_send_wr
->send_buf
== send_buf
)
2285 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2286 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2288 struct ib_mad_agent_private
*mad_agent_priv
;
2289 struct ib_mad_send_wr_private
*mad_send_wr
;
2290 unsigned long flags
;
2293 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2295 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2296 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2297 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2298 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2302 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2304 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2305 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2308 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2310 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2312 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2314 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2317 EXPORT_SYMBOL(ib_modify_mad
);
2319 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2320 struct ib_mad_send_buf
*send_buf
)
2322 ib_modify_mad(mad_agent
, send_buf
, 0);
2324 EXPORT_SYMBOL(ib_cancel_mad
);
2326 static void local_completions(struct work_struct
*work
)
2328 struct ib_mad_agent_private
*mad_agent_priv
;
2329 struct ib_mad_local_private
*local
;
2330 struct ib_mad_agent_private
*recv_mad_agent
;
2331 unsigned long flags
;
2334 struct ib_mad_send_wc mad_send_wc
;
2337 container_of(work
, struct ib_mad_agent_private
, local_work
);
2339 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2340 while (!list_empty(&mad_agent_priv
->local_list
)) {
2341 local
= list_entry(mad_agent_priv
->local_list
.next
,
2342 struct ib_mad_local_private
,
2344 list_del(&local
->completion_list
);
2345 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2346 if (local
->mad_priv
) {
2347 recv_mad_agent
= local
->recv_mad_agent
;
2348 if (!recv_mad_agent
) {
2349 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2350 goto local_send_completion
;
2355 * Defined behavior is to complete response
2358 build_smp_wc((unsigned long) local
->mad_send_wr
,
2359 be16_to_cpu(IB_LID_PERMISSIVE
),
2360 0, recv_mad_agent
->agent
.port_num
, &wc
);
2362 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2363 local
->mad_priv
->header
.recv_wc
.mad_len
=
2364 sizeof(struct ib_mad
);
2365 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2366 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2367 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2368 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2369 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2370 &local
->mad_priv
->mad
.mad
;
2371 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2372 snoop_recv(recv_mad_agent
->qp_info
,
2373 &local
->mad_priv
->header
.recv_wc
,
2374 IB_MAD_SNOOP_RECVS
);
2375 recv_mad_agent
->agent
.recv_handler(
2376 &recv_mad_agent
->agent
,
2377 &local
->mad_priv
->header
.recv_wc
);
2378 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2379 atomic_dec(&recv_mad_agent
->refcount
);
2380 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2383 local_send_completion
:
2385 mad_send_wc
.status
= IB_WC_SUCCESS
;
2386 mad_send_wc
.vendor_err
= 0;
2387 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2388 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2389 snoop_send(mad_agent_priv
->qp_info
,
2390 &local
->mad_send_wr
->send_buf
,
2391 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2392 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2395 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2396 atomic_dec(&mad_agent_priv
->refcount
);
2398 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2401 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2404 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2408 if (!mad_send_wr
->retries
--)
2411 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2413 if (mad_send_wr
->mad_agent_priv
->agent
.rmpp_version
) {
2414 ret
= ib_retry_rmpp(mad_send_wr
);
2416 case IB_RMPP_RESULT_UNHANDLED
:
2417 ret
= ib_send_mad(mad_send_wr
);
2419 case IB_RMPP_RESULT_CONSUMED
:
2427 ret
= ib_send_mad(mad_send_wr
);
2430 mad_send_wr
->refcount
++;
2431 list_add_tail(&mad_send_wr
->agent_list
,
2432 &mad_send_wr
->mad_agent_priv
->send_list
);
2437 static void timeout_sends(struct work_struct
*work
)
2439 struct ib_mad_agent_private
*mad_agent_priv
;
2440 struct ib_mad_send_wr_private
*mad_send_wr
;
2441 struct ib_mad_send_wc mad_send_wc
;
2442 unsigned long flags
, delay
;
2444 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2446 mad_send_wc
.vendor_err
= 0;
2448 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2449 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2450 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2451 struct ib_mad_send_wr_private
,
2454 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2455 delay
= mad_send_wr
->timeout
- jiffies
;
2456 if ((long)delay
<= 0)
2458 queue_delayed_work(mad_agent_priv
->qp_info
->
2460 &mad_agent_priv
->timed_work
, delay
);
2464 list_del(&mad_send_wr
->agent_list
);
2465 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2466 !retry_send(mad_send_wr
))
2469 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2471 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2472 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2474 mad_send_wc
.status
= mad_send_wr
->status
;
2475 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2476 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2479 atomic_dec(&mad_agent_priv
->refcount
);
2480 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2482 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2485 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2487 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2488 unsigned long flags
;
2490 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2491 if (!list_empty(&port_priv
->port_list
))
2492 queue_work(port_priv
->wq
, &port_priv
->work
);
2493 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2497 * Allocate receive MADs and post receive WRs for them
2499 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2500 struct ib_mad_private
*mad
)
2502 unsigned long flags
;
2504 struct ib_mad_private
*mad_priv
;
2505 struct ib_sge sg_list
;
2506 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2507 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2509 /* Initialize common scatter list fields */
2510 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2511 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2513 /* Initialize common receive WR fields */
2514 recv_wr
.next
= NULL
;
2515 recv_wr
.sg_list
= &sg_list
;
2516 recv_wr
.num_sge
= 1;
2519 /* Allocate and map receive buffer */
2524 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2526 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2531 sg_list
.addr
= dma_map_single(qp_info
->port_priv
->
2535 sizeof mad_priv
->header
,
2537 pci_unmap_addr_set(&mad_priv
->header
, mapping
, sg_list
.addr
);
2538 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2539 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2541 /* Post receive WR */
2542 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2543 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2544 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2545 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2546 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2548 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2549 list_del(&mad_priv
->header
.mad_list
.list
);
2550 recv_queue
->count
--;
2551 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2552 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2553 pci_unmap_addr(&mad_priv
->header
,
2556 sizeof mad_priv
->header
,
2558 kmem_cache_free(ib_mad_cache
, mad_priv
);
2559 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2568 * Return all the posted receive MADs
2570 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2572 struct ib_mad_private_header
*mad_priv_hdr
;
2573 struct ib_mad_private
*recv
;
2574 struct ib_mad_list_head
*mad_list
;
2576 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2578 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2579 struct ib_mad_list_head
, list
);
2580 mad_priv_hdr
= container_of(mad_list
,
2581 struct ib_mad_private_header
,
2583 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2586 /* Remove from posted receive MAD list */
2587 list_del(&mad_list
->list
);
2589 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2590 pci_unmap_addr(&recv
->header
, mapping
),
2591 sizeof(struct ib_mad_private
) -
2592 sizeof(struct ib_mad_private_header
),
2594 kmem_cache_free(ib_mad_cache
, recv
);
2597 qp_info
->recv_queue
.count
= 0;
2603 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2606 struct ib_qp_attr
*attr
;
2609 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2611 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2615 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2616 qp
= port_priv
->qp_info
[i
].qp
;
2618 * PKey index for QP1 is irrelevant but
2619 * one is needed for the Reset to Init transition
2621 attr
->qp_state
= IB_QPS_INIT
;
2622 attr
->pkey_index
= 0;
2623 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2624 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2625 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2627 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2628 "INIT: %d\n", i
, ret
);
2632 attr
->qp_state
= IB_QPS_RTR
;
2633 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2635 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2636 "RTR: %d\n", i
, ret
);
2640 attr
->qp_state
= IB_QPS_RTS
;
2641 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2642 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2644 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2645 "RTS: %d\n", i
, ret
);
2650 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2652 printk(KERN_ERR PFX
"Failed to request completion "
2653 "notification: %d\n", ret
);
2657 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2658 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2660 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2669 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2671 struct ib_mad_qp_info
*qp_info
= qp_context
;
2673 /* It's worse than that! He's dead, Jim! */
2674 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2675 event
->event
, qp_info
->qp
->qp_num
);
2678 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2679 struct ib_mad_queue
*mad_queue
)
2681 mad_queue
->qp_info
= qp_info
;
2682 mad_queue
->count
= 0;
2683 spin_lock_init(&mad_queue
->lock
);
2684 INIT_LIST_HEAD(&mad_queue
->list
);
2687 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2688 struct ib_mad_qp_info
*qp_info
)
2690 qp_info
->port_priv
= port_priv
;
2691 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2692 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2693 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2694 spin_lock_init(&qp_info
->snoop_lock
);
2695 qp_info
->snoop_table
= NULL
;
2696 qp_info
->snoop_table_size
= 0;
2697 atomic_set(&qp_info
->snoop_count
, 0);
2700 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2701 enum ib_qp_type qp_type
)
2703 struct ib_qp_init_attr qp_init_attr
;
2706 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2707 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2708 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2709 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2710 qp_init_attr
.cap
.max_send_wr
= IB_MAD_QP_SEND_SIZE
;
2711 qp_init_attr
.cap
.max_recv_wr
= IB_MAD_QP_RECV_SIZE
;
2712 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2713 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2714 qp_init_attr
.qp_type
= qp_type
;
2715 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2716 qp_init_attr
.qp_context
= qp_info
;
2717 qp_init_attr
.event_handler
= qp_event_handler
;
2718 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2719 if (IS_ERR(qp_info
->qp
)) {
2720 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2721 get_spl_qp_index(qp_type
));
2722 ret
= PTR_ERR(qp_info
->qp
);
2725 /* Use minimum queue sizes unless the CQ is resized */
2726 qp_info
->send_queue
.max_active
= IB_MAD_QP_SEND_SIZE
;
2727 qp_info
->recv_queue
.max_active
= IB_MAD_QP_RECV_SIZE
;
2734 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2736 ib_destroy_qp(qp_info
->qp
);
2737 kfree(qp_info
->snoop_table
);
2742 * Create the QP, PD, MR, and CQ if needed
2744 static int ib_mad_port_open(struct ib_device
*device
,
2748 struct ib_mad_port_private
*port_priv
;
2749 unsigned long flags
;
2750 char name
[sizeof "ib_mad123"];
2752 /* Create new device info */
2753 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2755 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2759 port_priv
->device
= device
;
2760 port_priv
->port_num
= port_num
;
2761 spin_lock_init(&port_priv
->reg_lock
);
2762 INIT_LIST_HEAD(&port_priv
->agent_list
);
2763 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2764 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2766 cq_size
= (IB_MAD_QP_SEND_SIZE
+ IB_MAD_QP_RECV_SIZE
) * 2;
2767 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2768 ib_mad_thread_completion_handler
,
2769 NULL
, port_priv
, cq_size
);
2770 if (IS_ERR(port_priv
->cq
)) {
2771 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2772 ret
= PTR_ERR(port_priv
->cq
);
2776 port_priv
->pd
= ib_alloc_pd(device
);
2777 if (IS_ERR(port_priv
->pd
)) {
2778 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2779 ret
= PTR_ERR(port_priv
->pd
);
2783 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2784 if (IS_ERR(port_priv
->mr
)) {
2785 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2786 ret
= PTR_ERR(port_priv
->mr
);
2790 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2793 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2797 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2798 port_priv
->wq
= create_singlethread_workqueue(name
);
2799 if (!port_priv
->wq
) {
2803 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
);
2805 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2806 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2807 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2809 ret
= ib_mad_port_start(port_priv
);
2811 printk(KERN_ERR PFX
"Couldn't start port\n");
2818 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2819 list_del_init(&port_priv
->port_list
);
2820 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2822 destroy_workqueue(port_priv
->wq
);
2824 destroy_mad_qp(&port_priv
->qp_info
[1]);
2826 destroy_mad_qp(&port_priv
->qp_info
[0]);
2828 ib_dereg_mr(port_priv
->mr
);
2830 ib_dealloc_pd(port_priv
->pd
);
2832 ib_destroy_cq(port_priv
->cq
);
2833 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2834 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2843 * If there are no classes using the port, free the port
2844 * resources (CQ, MR, PD, QP) and remove the port's info structure
2846 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2848 struct ib_mad_port_private
*port_priv
;
2849 unsigned long flags
;
2851 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2852 port_priv
= __ib_get_mad_port(device
, port_num
);
2853 if (port_priv
== NULL
) {
2854 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2855 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2858 list_del_init(&port_priv
->port_list
);
2859 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2861 destroy_workqueue(port_priv
->wq
);
2862 destroy_mad_qp(&port_priv
->qp_info
[1]);
2863 destroy_mad_qp(&port_priv
->qp_info
[0]);
2864 ib_dereg_mr(port_priv
->mr
);
2865 ib_dealloc_pd(port_priv
->pd
);
2866 ib_destroy_cq(port_priv
->cq
);
2867 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2868 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2869 /* XXX: Handle deallocation of MAD registration tables */
2876 static void ib_mad_init_device(struct ib_device
*device
)
2880 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
2883 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2888 end
= device
->phys_port_cnt
;
2891 for (i
= start
; i
<= end
; i
++) {
2892 if (ib_mad_port_open(device
, i
)) {
2893 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2897 if (ib_agent_port_open(device
, i
)) {
2898 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2907 if (ib_mad_port_close(device
, i
))
2908 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2914 while (i
>= start
) {
2915 if (ib_agent_port_close(device
, i
))
2916 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2919 if (ib_mad_port_close(device
, i
))
2920 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2926 static void ib_mad_remove_device(struct ib_device
*device
)
2928 int i
, num_ports
, cur_port
;
2930 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2934 num_ports
= device
->phys_port_cnt
;
2937 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2938 if (ib_agent_port_close(device
, cur_port
))
2939 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2941 device
->name
, cur_port
);
2942 if (ib_mad_port_close(device
, cur_port
))
2943 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2944 device
->name
, cur_port
);
2948 static struct ib_client mad_client
= {
2950 .add
= ib_mad_init_device
,
2951 .remove
= ib_mad_remove_device
2954 static int __init
ib_mad_init_module(void)
2958 spin_lock_init(&ib_mad_port_list_lock
);
2960 ib_mad_cache
= kmem_cache_create("ib_mad",
2961 sizeof(struct ib_mad_private
),
2966 if (!ib_mad_cache
) {
2967 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
2972 INIT_LIST_HEAD(&ib_mad_port_list
);
2974 if (ib_register_client(&mad_client
)) {
2975 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
2983 kmem_cache_destroy(ib_mad_cache
);
2988 static void __exit
ib_mad_cleanup_module(void)
2990 ib_unregister_client(&mad_client
);
2991 kmem_cache_destroy(ib_mad_cache
);
2994 module_init(ib_mad_init_module
);
2995 module_exit(ib_mad_cleanup_module
);