2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
36 #include <linux/dma-mapping.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_DESCRIPTION("kernel IB MAD API");
45 MODULE_AUTHOR("Hal Rosenstock");
46 MODULE_AUTHOR("Sean Hefty");
48 static kmem_cache_t
*ib_mad_cache
;
50 static struct list_head ib_mad_port_list
;
51 static u32 ib_mad_client_id
= 0;
54 static spinlock_t ib_mad_port_list_lock
;
57 /* Forward declarations */
58 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
59 struct ib_mad_reg_req
*mad_reg_req
);
60 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
61 static struct ib_mad_agent_private
*find_mad_agent(
62 struct ib_mad_port_private
*port_priv
,
64 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
65 struct ib_mad_private
*mad
);
66 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
67 static void timeout_sends(void *data
);
68 static void local_completions(void *data
);
69 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
70 struct ib_mad_agent_private
*agent_priv
,
72 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
73 struct ib_mad_agent_private
*agent_priv
);
76 * Returns a ib_mad_port_private structure or NULL for a device/port
77 * Assumes ib_mad_port_list_lock is being held
79 static inline struct ib_mad_port_private
*
80 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
82 struct ib_mad_port_private
*entry
;
84 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
85 if (entry
->device
== device
&& entry
->port_num
== port_num
)
92 * Wrapper function to return a ib_mad_port_private structure or NULL
95 static inline struct ib_mad_port_private
*
96 ib_get_mad_port(struct ib_device
*device
, int port_num
)
98 struct ib_mad_port_private
*entry
;
101 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
102 entry
= __ib_get_mad_port(device
, port_num
);
103 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
108 static inline u8
convert_mgmt_class(u8 mgmt_class
)
110 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
115 static int get_spl_qp_index(enum ib_qp_type qp_type
)
128 static int vendor_class_index(u8 mgmt_class
)
130 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
133 static int is_vendor_class(u8 mgmt_class
)
135 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
136 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
141 static int is_vendor_oui(char *oui
)
143 if (oui
[0] || oui
[1] || oui
[2])
148 static int is_vendor_method_in_use(
149 struct ib_mad_mgmt_vendor_class
*vendor_class
,
150 struct ib_mad_reg_req
*mad_reg_req
)
152 struct ib_mad_mgmt_method_table
*method
;
155 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
156 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
157 method
= vendor_class
->method_table
[i
];
159 if (method_in_use(&method
, mad_reg_req
))
170 * ib_register_mad_agent - Register to send/receive MADs
172 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
174 enum ib_qp_type qp_type
,
175 struct ib_mad_reg_req
*mad_reg_req
,
177 ib_mad_send_handler send_handler
,
178 ib_mad_recv_handler recv_handler
,
181 struct ib_mad_port_private
*port_priv
;
182 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
183 struct ib_mad_agent_private
*mad_agent_priv
;
184 struct ib_mad_reg_req
*reg_req
= NULL
;
185 struct ib_mad_mgmt_class_table
*class;
186 struct ib_mad_mgmt_vendor_class_table
*vendor
;
187 struct ib_mad_mgmt_vendor_class
*vendor_class
;
188 struct ib_mad_mgmt_method_table
*method
;
191 u8 mgmt_class
, vclass
;
193 /* Validate parameters */
194 qpn
= get_spl_qp_index(qp_type
);
198 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
)
201 /* Validate MAD registration request if supplied */
203 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
207 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
209 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
210 * one in this range currently allowed
212 if (mad_reg_req
->mgmt_class
!=
213 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
215 } else if (mad_reg_req
->mgmt_class
== 0) {
217 * Class 0 is reserved in IBA and is used for
218 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
221 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
223 * If class is in "new" vendor range,
224 * ensure supplied OUI is not zero
226 if (!is_vendor_oui(mad_reg_req
->oui
))
229 /* Make sure class supplied is consistent with RMPP */
230 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
234 /* Make sure class supplied is consistent with QP type */
235 if (qp_type
== IB_QPT_SMI
) {
236 if ((mad_reg_req
->mgmt_class
!=
237 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
238 (mad_reg_req
->mgmt_class
!=
239 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
242 if ((mad_reg_req
->mgmt_class
==
243 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
244 (mad_reg_req
->mgmt_class
==
245 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
249 /* No registration request supplied */
254 /* Validate device and port */
255 port_priv
= ib_get_mad_port(device
, port_num
);
257 ret
= ERR_PTR(-ENODEV
);
261 /* Allocate structures */
262 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
263 if (!mad_agent_priv
) {
264 ret
= ERR_PTR(-ENOMEM
);
268 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
269 IB_ACCESS_LOCAL_WRITE
);
270 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
271 ret
= ERR_PTR(-ENOMEM
);
276 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
278 ret
= ERR_PTR(-ENOMEM
);
281 /* Make a copy of the MAD registration request */
282 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
285 /* Now, fill in the various structures */
286 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
287 mad_agent_priv
->reg_req
= reg_req
;
288 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
289 mad_agent_priv
->agent
.device
= device
;
290 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
291 mad_agent_priv
->agent
.send_handler
= send_handler
;
292 mad_agent_priv
->agent
.context
= context
;
293 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
294 mad_agent_priv
->agent
.port_num
= port_num
;
296 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
297 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
300 * Make sure MAD registration (if supplied)
301 * is non overlapping with any existing ones
304 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
305 if (!is_vendor_class(mgmt_class
)) {
306 class = port_priv
->version
[mad_reg_req
->
307 mgmt_class_version
].class;
309 method
= class->method_table
[mgmt_class
];
311 if (method_in_use(&method
,
316 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
319 /* "New" vendor class range */
320 vendor
= port_priv
->version
[mad_reg_req
->
321 mgmt_class_version
].vendor
;
323 vclass
= vendor_class_index(mgmt_class
);
324 vendor_class
= vendor
->vendor_class
[vclass
];
326 if (is_vendor_method_in_use(
332 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
340 /* Add mad agent into port's agent list */
341 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
342 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
344 spin_lock_init(&mad_agent_priv
->lock
);
345 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
346 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
347 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
348 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
349 INIT_WORK(&mad_agent_priv
->timed_work
, timeout_sends
, mad_agent_priv
);
350 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
351 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
,
353 atomic_set(&mad_agent_priv
->refcount
, 1);
354 init_completion(&mad_agent_priv
->comp
);
356 return &mad_agent_priv
->agent
;
359 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
362 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
364 kfree(mad_agent_priv
);
368 EXPORT_SYMBOL(ib_register_mad_agent
);
370 static inline int is_snooping_sends(int mad_snoop_flags
)
372 return (mad_snoop_flags
&
373 (/*IB_MAD_SNOOP_POSTED_SENDS |
374 IB_MAD_SNOOP_RMPP_SENDS |*/
375 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
376 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
379 static inline int is_snooping_recvs(int mad_snoop_flags
)
381 return (mad_snoop_flags
&
382 (IB_MAD_SNOOP_RECVS
/*|
383 IB_MAD_SNOOP_RMPP_RECVS*/));
386 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
387 struct ib_mad_snoop_private
*mad_snoop_priv
)
389 struct ib_mad_snoop_private
**new_snoop_table
;
393 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
394 /* Check for empty slot in array. */
395 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
396 if (!qp_info
->snoop_table
[i
])
399 if (i
== qp_info
->snoop_table_size
) {
401 new_snoop_table
= kmalloc(sizeof mad_snoop_priv
*
402 qp_info
->snoop_table_size
+ 1,
404 if (!new_snoop_table
) {
408 if (qp_info
->snoop_table
) {
409 memcpy(new_snoop_table
, qp_info
->snoop_table
,
410 sizeof mad_snoop_priv
*
411 qp_info
->snoop_table_size
);
412 kfree(qp_info
->snoop_table
);
414 qp_info
->snoop_table
= new_snoop_table
;
415 qp_info
->snoop_table_size
++;
417 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
418 atomic_inc(&qp_info
->snoop_count
);
420 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
424 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
426 enum ib_qp_type qp_type
,
428 ib_mad_snoop_handler snoop_handler
,
429 ib_mad_recv_handler recv_handler
,
432 struct ib_mad_port_private
*port_priv
;
433 struct ib_mad_agent
*ret
;
434 struct ib_mad_snoop_private
*mad_snoop_priv
;
437 /* Validate parameters */
438 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
439 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
440 ret
= ERR_PTR(-EINVAL
);
443 qpn
= get_spl_qp_index(qp_type
);
445 ret
= ERR_PTR(-EINVAL
);
448 port_priv
= ib_get_mad_port(device
, port_num
);
450 ret
= ERR_PTR(-ENODEV
);
453 /* Allocate structures */
454 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
455 if (!mad_snoop_priv
) {
456 ret
= ERR_PTR(-ENOMEM
);
460 /* Now, fill in the various structures */
461 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
462 mad_snoop_priv
->agent
.device
= device
;
463 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
464 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
465 mad_snoop_priv
->agent
.context
= context
;
466 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
467 mad_snoop_priv
->agent
.port_num
= port_num
;
468 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
469 init_completion(&mad_snoop_priv
->comp
);
470 mad_snoop_priv
->snoop_index
= register_snoop_agent(
471 &port_priv
->qp_info
[qpn
],
473 if (mad_snoop_priv
->snoop_index
< 0) {
474 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
478 atomic_set(&mad_snoop_priv
->refcount
, 1);
479 return &mad_snoop_priv
->agent
;
482 kfree(mad_snoop_priv
);
486 EXPORT_SYMBOL(ib_register_mad_snoop
);
488 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
490 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
491 complete(&mad_agent_priv
->comp
);
494 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
496 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
497 complete(&mad_snoop_priv
->comp
);
500 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
502 struct ib_mad_port_private
*port_priv
;
505 /* Note that we could still be handling received MADs */
508 * Canceling all sends results in dropping received response
509 * MADs, preventing us from queuing additional work
511 cancel_mads(mad_agent_priv
);
512 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
513 cancel_delayed_work(&mad_agent_priv
->timed_work
);
515 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
516 remove_mad_reg_req(mad_agent_priv
);
517 list_del(&mad_agent_priv
->agent_list
);
518 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
520 flush_workqueue(port_priv
->wq
);
521 ib_cancel_rmpp_recvs(mad_agent_priv
);
523 deref_mad_agent(mad_agent_priv
);
524 wait_for_completion(&mad_agent_priv
->comp
);
526 kfree(mad_agent_priv
->reg_req
);
527 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
528 kfree(mad_agent_priv
);
531 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
533 struct ib_mad_qp_info
*qp_info
;
536 qp_info
= mad_snoop_priv
->qp_info
;
537 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
538 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
539 atomic_dec(&qp_info
->snoop_count
);
540 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
542 deref_snoop_agent(mad_snoop_priv
);
543 wait_for_completion(&mad_snoop_priv
->comp
);
545 kfree(mad_snoop_priv
);
549 * ib_unregister_mad_agent - Unregisters a client from using MAD services
551 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
553 struct ib_mad_agent_private
*mad_agent_priv
;
554 struct ib_mad_snoop_private
*mad_snoop_priv
;
556 /* If the TID is zero, the agent can only snoop. */
557 if (mad_agent
->hi_tid
) {
558 mad_agent_priv
= container_of(mad_agent
,
559 struct ib_mad_agent_private
,
561 unregister_mad_agent(mad_agent_priv
);
563 mad_snoop_priv
= container_of(mad_agent
,
564 struct ib_mad_snoop_private
,
566 unregister_mad_snoop(mad_snoop_priv
);
570 EXPORT_SYMBOL(ib_unregister_mad_agent
);
572 static inline int response_mad(struct ib_mad
*mad
)
574 /* Trap represses are responses although response bit is reset */
575 return ((mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
576 (mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
));
579 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
581 struct ib_mad_queue
*mad_queue
;
584 BUG_ON(!mad_list
->mad_queue
);
585 mad_queue
= mad_list
->mad_queue
;
586 spin_lock_irqsave(&mad_queue
->lock
, flags
);
587 list_del(&mad_list
->list
);
589 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
592 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
593 struct ib_mad_send_buf
*send_buf
,
594 struct ib_mad_send_wc
*mad_send_wc
,
597 struct ib_mad_snoop_private
*mad_snoop_priv
;
601 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
602 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
603 mad_snoop_priv
= qp_info
->snoop_table
[i
];
604 if (!mad_snoop_priv
||
605 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
608 atomic_inc(&mad_snoop_priv
->refcount
);
609 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
610 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
611 send_buf
, mad_send_wc
);
612 deref_snoop_agent(mad_snoop_priv
);
613 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
615 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
618 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
619 struct ib_mad_recv_wc
*mad_recv_wc
,
622 struct ib_mad_snoop_private
*mad_snoop_priv
;
626 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
627 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
628 mad_snoop_priv
= qp_info
->snoop_table
[i
];
629 if (!mad_snoop_priv
||
630 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
633 atomic_inc(&mad_snoop_priv
->refcount
);
634 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
635 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
637 deref_snoop_agent(mad_snoop_priv
);
638 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
640 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
643 static void build_smp_wc(u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
646 memset(wc
, 0, sizeof *wc
);
648 wc
->status
= IB_WC_SUCCESS
;
649 wc
->opcode
= IB_WC_RECV
;
650 wc
->pkey_index
= pkey_index
;
651 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
656 wc
->dlid_path_bits
= 0;
657 wc
->port_num
= port_num
;
661 * Return 0 if SMP is to be sent
662 * Return 1 if SMP was consumed locally (whether or not solicited)
663 * Return < 0 if error
665 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
666 struct ib_mad_send_wr_private
*mad_send_wr
)
669 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
671 struct ib_mad_local_private
*local
;
672 struct ib_mad_private
*mad_priv
;
673 struct ib_mad_port_private
*port_priv
;
674 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
675 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
676 u8 port_num
= mad_agent_priv
->agent
.port_num
;
678 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
681 * Directed route handling starts if the initial LID routed part of
682 * a request or the ending LID routed part of a response is empty.
683 * If we are at the start of the LID routed part, don't update the
684 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
686 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
688 !smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
)) {
690 printk(KERN_ERR PFX
"Invalid directed route\n");
693 /* Check to post send on QP or process locally */
694 ret
= smi_check_local_smp(smp
, device
);
698 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
701 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
704 local
->mad_priv
= NULL
;
705 local
->recv_mad_agent
= NULL
;
706 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
709 printk(KERN_ERR PFX
"No memory for local response MAD\n");
714 build_smp_wc(send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
715 send_wr
->wr
.ud
.pkey_index
,
716 send_wr
->wr
.ud
.port_num
, &mad_wc
);
718 /* No GRH for DR SMP */
719 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
720 (struct ib_mad
*)smp
,
721 (struct ib_mad
*)&mad_priv
->mad
);
724 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
725 if (response_mad(&mad_priv
->mad
.mad
) &&
726 mad_agent_priv
->agent
.recv_handler
) {
727 local
->mad_priv
= mad_priv
;
728 local
->recv_mad_agent
= mad_agent_priv
;
730 * Reference MAD agent until receive
731 * side of local completion handled
733 atomic_inc(&mad_agent_priv
->refcount
);
735 kmem_cache_free(ib_mad_cache
, mad_priv
);
737 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
738 kmem_cache_free(ib_mad_cache
, mad_priv
);
740 case IB_MAD_RESULT_SUCCESS
:
741 /* Treat like an incoming receive MAD */
742 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
743 mad_agent_priv
->agent
.port_num
);
745 mad_priv
->mad
.mad
.mad_hdr
.tid
=
746 ((struct ib_mad
*)smp
)->mad_hdr
.tid
;
747 recv_mad_agent
= find_mad_agent(port_priv
,
750 if (!port_priv
|| !recv_mad_agent
) {
751 kmem_cache_free(ib_mad_cache
, mad_priv
);
756 local
->mad_priv
= mad_priv
;
757 local
->recv_mad_agent
= recv_mad_agent
;
760 kmem_cache_free(ib_mad_cache
, mad_priv
);
766 local
->mad_send_wr
= mad_send_wr
;
767 /* Reference MAD agent until send side of local completion handled */
768 atomic_inc(&mad_agent_priv
->refcount
);
769 /* Queue local completion to local list */
770 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
771 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
772 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
773 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
774 &mad_agent_priv
->local_work
);
780 static int get_pad_size(int hdr_len
, int data_len
)
784 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
785 if (data_len
&& seg_size
) {
786 pad
= seg_size
- data_len
% seg_size
;
787 return pad
== seg_size
? 0 : pad
;
792 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
794 struct ib_rmpp_segment
*s
, *t
;
796 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
802 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
805 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
806 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
807 struct ib_rmpp_segment
*seg
= NULL
;
808 int left
, seg_size
, pad
;
810 send_buf
->seg_size
= sizeof (struct ib_mad
) - send_buf
->hdr_len
;
811 seg_size
= send_buf
->seg_size
;
814 /* Allocate data segments. */
815 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
816 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
818 printk(KERN_ERR
"alloc_send_rmpp_segs: RMPP mem "
819 "alloc failed for len %zd, gfp %#x\n",
820 sizeof (*seg
) + seg_size
, gfp_mask
);
821 free_send_rmpp_list(send_wr
);
824 seg
->num
= ++send_buf
->seg_count
;
825 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
828 /* Zero any padding */
830 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
832 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
834 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
835 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
837 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
838 struct ib_rmpp_segment
, list
);
839 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
843 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
844 u32 remote_qpn
, u16 pkey_index
,
846 int hdr_len
, int data_len
,
849 struct ib_mad_agent_private
*mad_agent_priv
;
850 struct ib_mad_send_wr_private
*mad_send_wr
;
851 int pad
, message_size
, ret
, size
;
854 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
856 pad
= get_pad_size(hdr_len
, data_len
);
857 message_size
= hdr_len
+ data_len
+ pad
;
859 if ((!mad_agent
->rmpp_version
&&
860 (rmpp_active
|| message_size
> sizeof(struct ib_mad
))) ||
861 (!rmpp_active
&& message_size
> sizeof(struct ib_mad
)))
862 return ERR_PTR(-EINVAL
);
864 size
= rmpp_active
? hdr_len
: sizeof(struct ib_mad
);
865 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
867 return ERR_PTR(-ENOMEM
);
869 mad_send_wr
= buf
+ size
;
870 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
871 mad_send_wr
->send_buf
.mad
= buf
;
872 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
873 mad_send_wr
->send_buf
.data_len
= data_len
;
874 mad_send_wr
->pad
= pad
;
876 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
877 mad_send_wr
->sg_list
[0].length
= hdr_len
;
878 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
879 mad_send_wr
->sg_list
[1].length
= sizeof(struct ib_mad
) - hdr_len
;
880 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->mr
->lkey
;
882 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
883 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
884 mad_send_wr
->send_wr
.num_sge
= 2;
885 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
886 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
887 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
888 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
889 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
892 ret
= alloc_send_rmpp_list(mad_send_wr
, gfp_mask
);
899 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
900 atomic_inc(&mad_agent_priv
->refcount
);
901 return &mad_send_wr
->send_buf
;
903 EXPORT_SYMBOL(ib_create_send_mad
);
905 int ib_get_mad_data_offset(u8 mgmt_class
)
907 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
908 return IB_MGMT_SA_HDR
;
909 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
910 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
911 (mgmt_class
== IB_MGMT_CLASS_BIS
))
912 return IB_MGMT_DEVICE_HDR
;
913 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
914 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
915 return IB_MGMT_VENDOR_HDR
;
917 return IB_MGMT_MAD_HDR
;
919 EXPORT_SYMBOL(ib_get_mad_data_offset
);
921 int ib_is_mad_class_rmpp(u8 mgmt_class
)
923 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
924 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
925 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
926 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
927 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
928 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
932 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
934 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
936 struct ib_mad_send_wr_private
*mad_send_wr
;
937 struct list_head
*list
;
939 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
941 list
= &mad_send_wr
->cur_seg
->list
;
943 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
944 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
945 if (mad_send_wr
->cur_seg
->num
== seg_num
)
947 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
948 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
949 if (mad_send_wr
->cur_seg
->num
== seg_num
)
952 return mad_send_wr
->cur_seg
->data
;
954 EXPORT_SYMBOL(ib_get_rmpp_segment
);
956 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
958 if (mad_send_wr
->send_buf
.seg_count
)
959 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
960 mad_send_wr
->seg_num
);
962 return mad_send_wr
->send_buf
.mad
+
963 mad_send_wr
->send_buf
.hdr_len
;
966 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
968 struct ib_mad_agent_private
*mad_agent_priv
;
969 struct ib_mad_send_wr_private
*mad_send_wr
;
971 mad_agent_priv
= container_of(send_buf
->mad_agent
,
972 struct ib_mad_agent_private
, agent
);
973 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
976 free_send_rmpp_list(mad_send_wr
);
977 kfree(send_buf
->mad
);
978 deref_mad_agent(mad_agent_priv
);
980 EXPORT_SYMBOL(ib_free_send_mad
);
982 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
984 struct ib_mad_qp_info
*qp_info
;
985 struct list_head
*list
;
986 struct ib_send_wr
*bad_send_wr
;
987 struct ib_mad_agent
*mad_agent
;
992 /* Set WR ID to find mad_send_wr upon completion */
993 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
994 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
995 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
997 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
998 sge
= mad_send_wr
->sg_list
;
999 sge
[0].addr
= dma_map_single(mad_agent
->device
->dma_device
,
1000 mad_send_wr
->send_buf
.mad
,
1003 pci_unmap_addr_set(mad_send_wr
, header_mapping
, sge
[0].addr
);
1005 sge
[1].addr
= dma_map_single(mad_agent
->device
->dma_device
,
1006 ib_get_payload(mad_send_wr
),
1009 pci_unmap_addr_set(mad_send_wr
, payload_mapping
, sge
[1].addr
);
1011 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1012 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1013 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
1015 list
= &qp_info
->send_queue
.list
;
1018 list
= &qp_info
->overflow_list
;
1022 qp_info
->send_queue
.count
++;
1023 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1025 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1027 dma_unmap_single(mad_agent
->device
->dma_device
,
1028 pci_unmap_addr(mad_send_wr
, header_mapping
),
1029 sge
[0].length
, DMA_TO_DEVICE
);
1030 dma_unmap_single(mad_agent
->device
->dma_device
,
1031 pci_unmap_addr(mad_send_wr
, payload_mapping
),
1032 sge
[1].length
, DMA_TO_DEVICE
);
1038 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1039 * with the registered client
1041 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1042 struct ib_mad_send_buf
**bad_send_buf
)
1044 struct ib_mad_agent_private
*mad_agent_priv
;
1045 struct ib_mad_send_buf
*next_send_buf
;
1046 struct ib_mad_send_wr_private
*mad_send_wr
;
1047 unsigned long flags
;
1050 /* Walk list of send WRs and post each on send list */
1051 for (; send_buf
; send_buf
= next_send_buf
) {
1053 mad_send_wr
= container_of(send_buf
,
1054 struct ib_mad_send_wr_private
,
1056 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1058 if (!send_buf
->mad_agent
->send_handler
||
1059 (send_buf
->timeout_ms
&&
1060 !send_buf
->mad_agent
->recv_handler
)) {
1065 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1066 if (mad_agent_priv
->agent
.rmpp_version
) {
1073 * Save pointer to next work request to post in case the
1074 * current one completes, and the user modifies the work
1075 * request associated with the completion
1077 next_send_buf
= send_buf
->next
;
1078 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
1080 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1081 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1082 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1084 if (ret
< 0) /* error */
1086 else if (ret
== 1) /* locally consumed */
1090 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1091 /* Timeout will be updated after send completes */
1092 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1093 mad_send_wr
->retries
= send_buf
->retries
;
1094 /* Reference for work request to QP + response */
1095 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1096 mad_send_wr
->status
= IB_WC_SUCCESS
;
1098 /* Reference MAD agent until send completes */
1099 atomic_inc(&mad_agent_priv
->refcount
);
1100 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1101 list_add_tail(&mad_send_wr
->agent_list
,
1102 &mad_agent_priv
->send_list
);
1103 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1105 if (mad_agent_priv
->agent
.rmpp_version
) {
1106 ret
= ib_send_rmpp_mad(mad_send_wr
);
1107 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1108 ret
= ib_send_mad(mad_send_wr
);
1110 ret
= ib_send_mad(mad_send_wr
);
1112 /* Fail send request */
1113 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1114 list_del(&mad_send_wr
->agent_list
);
1115 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1116 atomic_dec(&mad_agent_priv
->refcount
);
1123 *bad_send_buf
= send_buf
;
1126 EXPORT_SYMBOL(ib_post_send_mad
);
1129 * ib_free_recv_mad - Returns data buffers used to receive
1130 * a MAD to the access layer
1132 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1134 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1135 struct ib_mad_private_header
*mad_priv_hdr
;
1136 struct ib_mad_private
*priv
;
1137 struct list_head free_list
;
1139 INIT_LIST_HEAD(&free_list
);
1140 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1142 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1144 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1146 mad_priv_hdr
= container_of(mad_recv_wc
,
1147 struct ib_mad_private_header
,
1149 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1151 kmem_cache_free(ib_mad_cache
, priv
);
1154 EXPORT_SYMBOL(ib_free_recv_mad
);
1156 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1158 ib_mad_send_handler send_handler
,
1159 ib_mad_recv_handler recv_handler
,
1162 return ERR_PTR(-EINVAL
); /* XXX: for now */
1164 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1166 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1169 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
1172 EXPORT_SYMBOL(ib_process_mad_wc
);
1174 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1175 struct ib_mad_reg_req
*mad_reg_req
)
1179 for (i
= find_first_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
);
1180 i
< IB_MGMT_MAX_METHODS
;
1181 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1183 if ((*method
)->agent
[i
]) {
1184 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
1191 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1193 /* Allocate management method table */
1194 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1196 printk(KERN_ERR PFX
"No memory for "
1197 "ib_mad_mgmt_method_table\n");
1205 * Check to see if there are any methods still in use
1207 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1211 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1212 if (method
->agent
[i
])
1218 * Check to see if there are any method tables for this class still in use
1220 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1224 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1225 if (class->method_table
[i
])
1230 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1234 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1235 if (vendor_class
->method_table
[i
])
1240 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1245 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1246 /* Is there matching OUI for this vendor class ? */
1247 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1253 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1257 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1258 if (vendor
->vendor_class
[i
])
1264 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1265 struct ib_mad_agent_private
*agent
)
1269 /* Remove any methods for this mad agent */
1270 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1271 if (method
->agent
[i
] == agent
) {
1272 method
->agent
[i
] = NULL
;
1277 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1278 struct ib_mad_agent_private
*agent_priv
,
1281 struct ib_mad_port_private
*port_priv
;
1282 struct ib_mad_mgmt_class_table
**class;
1283 struct ib_mad_mgmt_method_table
**method
;
1286 port_priv
= agent_priv
->qp_info
->port_priv
;
1287 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1289 /* Allocate management class table for "new" class version */
1290 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1292 printk(KERN_ERR PFX
"No memory for "
1293 "ib_mad_mgmt_class_table\n");
1298 /* Allocate method table for this management class */
1299 method
= &(*class)->method_table
[mgmt_class
];
1300 if ((ret
= allocate_method_table(method
)))
1303 method
= &(*class)->method_table
[mgmt_class
];
1305 /* Allocate method table for this management class */
1306 if ((ret
= allocate_method_table(method
)))
1311 /* Now, make sure methods are not already in use */
1312 if (method_in_use(method
, mad_reg_req
))
1315 /* Finally, add in methods being registered */
1316 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1317 IB_MGMT_MAX_METHODS
);
1318 i
< IB_MGMT_MAX_METHODS
;
1319 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1321 (*method
)->agent
[i
] = agent_priv
;
1326 /* Remove any methods for this mad agent */
1327 remove_methods_mad_agent(*method
, agent_priv
);
1328 /* Now, check to see if there are any methods in use */
1329 if (!check_method_table(*method
)) {
1330 /* If not, release management method table */
1343 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1344 struct ib_mad_agent_private
*agent_priv
)
1346 struct ib_mad_port_private
*port_priv
;
1347 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1348 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1349 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1350 struct ib_mad_mgmt_method_table
**method
;
1351 int i
, ret
= -ENOMEM
;
1354 /* "New" vendor (with OUI) class */
1355 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1356 port_priv
= agent_priv
->qp_info
->port_priv
;
1357 vendor_table
= &port_priv
->version
[
1358 mad_reg_req
->mgmt_class_version
].vendor
;
1359 if (!*vendor_table
) {
1360 /* Allocate mgmt vendor class table for "new" class version */
1361 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1363 printk(KERN_ERR PFX
"No memory for "
1364 "ib_mad_mgmt_vendor_class_table\n");
1368 *vendor_table
= vendor
;
1370 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1371 /* Allocate table for this management vendor class */
1372 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1373 if (!vendor_class
) {
1374 printk(KERN_ERR PFX
"No memory for "
1375 "ib_mad_mgmt_vendor_class\n");
1379 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1381 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1382 /* Is there matching OUI for this vendor class ? */
1383 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1384 mad_reg_req
->oui
, 3)) {
1385 method
= &(*vendor_table
)->vendor_class
[
1386 vclass
]->method_table
[i
];
1391 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1392 /* OUI slot available ? */
1393 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1395 method
= &(*vendor_table
)->vendor_class
[
1396 vclass
]->method_table
[i
];
1398 /* Allocate method table for this OUI */
1399 if ((ret
= allocate_method_table(method
)))
1401 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1402 mad_reg_req
->oui
, 3);
1406 printk(KERN_ERR PFX
"All OUI slots in use\n");
1410 /* Now, make sure methods are not already in use */
1411 if (method_in_use(method
, mad_reg_req
))
1414 /* Finally, add in methods being registered */
1415 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1416 IB_MGMT_MAX_METHODS
);
1417 i
< IB_MGMT_MAX_METHODS
;
1418 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1420 (*method
)->agent
[i
] = agent_priv
;
1425 /* Remove any methods for this mad agent */
1426 remove_methods_mad_agent(*method
, agent_priv
);
1427 /* Now, check to see if there are any methods in use */
1428 if (!check_method_table(*method
)) {
1429 /* If not, release management method table */
1436 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1437 kfree(vendor_class
);
1441 *vendor_table
= NULL
;
1448 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1450 struct ib_mad_port_private
*port_priv
;
1451 struct ib_mad_mgmt_class_table
*class;
1452 struct ib_mad_mgmt_method_table
*method
;
1453 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1454 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1459 * Was MAD registration request supplied
1460 * with original registration ?
1462 if (!agent_priv
->reg_req
) {
1466 port_priv
= agent_priv
->qp_info
->port_priv
;
1467 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1468 class = port_priv
->version
[
1469 agent_priv
->reg_req
->mgmt_class_version
].class;
1473 method
= class->method_table
[mgmt_class
];
1475 /* Remove any methods for this mad agent */
1476 remove_methods_mad_agent(method
, agent_priv
);
1477 /* Now, check to see if there are any methods still in use */
1478 if (!check_method_table(method
)) {
1479 /* If not, release management method table */
1481 class->method_table
[mgmt_class
] = NULL
;
1482 /* Any management classes left ? */
1483 if (!check_class_table(class)) {
1484 /* If not, release management class table */
1487 agent_priv
->reg_req
->
1488 mgmt_class_version
].class = NULL
;
1494 if (!is_vendor_class(mgmt_class
))
1497 /* normalize mgmt_class to vendor range 2 */
1498 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1499 vendor
= port_priv
->version
[
1500 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1505 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1507 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1510 method
= vendor_class
->method_table
[index
];
1512 /* Remove any methods for this mad agent */
1513 remove_methods_mad_agent(method
, agent_priv
);
1515 * Now, check to see if there are
1516 * any methods still in use
1518 if (!check_method_table(method
)) {
1519 /* If not, release management method table */
1521 vendor_class
->method_table
[index
] = NULL
;
1522 memset(vendor_class
->oui
[index
], 0, 3);
1523 /* Any OUIs left ? */
1524 if (!check_vendor_class(vendor_class
)) {
1525 /* If not, release vendor class table */
1526 kfree(vendor_class
);
1527 vendor
->vendor_class
[mgmt_class
] = NULL
;
1528 /* Any other vendor classes left ? */
1529 if (!check_vendor_table(vendor
)) {
1532 agent_priv
->reg_req
->
1533 mgmt_class_version
].
1545 static struct ib_mad_agent_private
*
1546 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1549 struct ib_mad_agent_private
*mad_agent
= NULL
;
1550 unsigned long flags
;
1552 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1553 if (response_mad(mad
)) {
1555 struct ib_mad_agent_private
*entry
;
1558 * Routing is based on high 32 bits of transaction ID
1561 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1562 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1563 if (entry
->agent
.hi_tid
== hi_tid
) {
1569 struct ib_mad_mgmt_class_table
*class;
1570 struct ib_mad_mgmt_method_table
*method
;
1571 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1572 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1573 struct ib_vendor_mad
*vendor_mad
;
1577 * Routing is based on version, class, and method
1578 * For "newer" vendor MADs, also based on OUI
1580 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1582 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1583 class = port_priv
->version
[
1584 mad
->mad_hdr
.class_version
].class;
1587 method
= class->method_table
[convert_mgmt_class(
1588 mad
->mad_hdr
.mgmt_class
)];
1590 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1591 ~IB_MGMT_METHOD_RESP
];
1593 vendor
= port_priv
->version
[
1594 mad
->mad_hdr
.class_version
].vendor
;
1597 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1598 mad
->mad_hdr
.mgmt_class
)];
1601 /* Find matching OUI */
1602 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1603 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1606 method
= vendor_class
->method_table
[index
];
1608 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1609 ~IB_MGMT_METHOD_RESP
];
1615 if (mad_agent
->agent
.recv_handler
)
1616 atomic_inc(&mad_agent
->refcount
);
1618 printk(KERN_NOTICE PFX
"No receive handler for client "
1620 &mad_agent
->agent
, port_priv
->port_num
);
1625 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1630 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1634 /* Make sure MAD base version is understood */
1635 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1636 printk(KERN_ERR PFX
"MAD received with unsupported base "
1637 "version %d\n", mad
->mad_hdr
.base_version
);
1641 /* Filter SMI packets sent to other than QP0 */
1642 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1643 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1647 /* Filter GSI packets sent to QP0 */
1656 static int is_data_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1657 struct ib_mad_hdr
*mad_hdr
)
1659 struct ib_rmpp_mad
*rmpp_mad
;
1661 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1662 return !mad_agent_priv
->agent
.rmpp_version
||
1663 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1664 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1665 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1668 static inline int rcv_has_same_class(struct ib_mad_send_wr_private
*wr
,
1669 struct ib_mad_recv_wc
*rwc
)
1671 return ((struct ib_mad
*)(wr
->send_buf
.mad
))->mad_hdr
.mgmt_class
==
1672 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1675 static inline int rcv_has_same_gid(struct ib_mad_send_wr_private
*wr
,
1676 struct ib_mad_recv_wc
*rwc
)
1678 struct ib_ah_attr attr
;
1679 u8 send_resp
, rcv_resp
;
1681 send_resp
= ((struct ib_mad
*)(wr
->send_buf
.mad
))->
1682 mad_hdr
.method
& IB_MGMT_METHOD_RESP
;
1683 rcv_resp
= rwc
->recv_buf
.mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
;
1685 if (!send_resp
&& rcv_resp
)
1686 /* is request/response. GID/LIDs are both local (same). */
1689 if (send_resp
== rcv_resp
)
1690 /* both requests, or both responses. GIDs different */
1693 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1694 /* Assume not equal, to avoid false positives. */
1697 if (!(attr
.ah_flags
& IB_AH_GRH
) && !(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1698 return attr
.dlid
== rwc
->wc
->slid
;
1699 else if ((attr
.ah_flags
& IB_AH_GRH
) &&
1700 (rwc
->wc
->wc_flags
& IB_WC_GRH
))
1701 return memcmp(attr
.grh
.dgid
.raw
,
1702 rwc
->recv_buf
.grh
->sgid
.raw
, 16) == 0;
1704 /* one has GID, other does not. Assume different */
1707 struct ib_mad_send_wr_private
*
1708 ib_find_send_mad(struct ib_mad_agent_private
*mad_agent_priv
,
1709 struct ib_mad_recv_wc
*mad_recv_wc
)
1711 struct ib_mad_send_wr_private
*mad_send_wr
;
1714 mad
= (struct ib_mad
*)mad_recv_wc
->recv_buf
.mad
;
1716 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
1718 if ((mad_send_wr
->tid
== mad
->mad_hdr
.tid
) &&
1719 rcv_has_same_class(mad_send_wr
, mad_recv_wc
) &&
1720 rcv_has_same_gid(mad_send_wr
, mad_recv_wc
))
1725 * It's possible to receive the response before we've
1726 * been notified that the send has completed
1728 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
1730 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
1731 mad_send_wr
->tid
== mad
->mad_hdr
.tid
&&
1732 mad_send_wr
->timeout
&&
1733 rcv_has_same_class(mad_send_wr
, mad_recv_wc
) &&
1734 rcv_has_same_gid(mad_send_wr
, mad_recv_wc
)) {
1735 /* Verify request has not been canceled */
1736 return (mad_send_wr
->status
== IB_WC_SUCCESS
) ?
1743 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1745 mad_send_wr
->timeout
= 0;
1746 if (mad_send_wr
->refcount
== 1) {
1747 list_del(&mad_send_wr
->agent_list
);
1748 list_add_tail(&mad_send_wr
->agent_list
,
1749 &mad_send_wr
->mad_agent_priv
->done_list
);
1753 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1754 struct ib_mad_recv_wc
*mad_recv_wc
)
1756 struct ib_mad_send_wr_private
*mad_send_wr
;
1757 struct ib_mad_send_wc mad_send_wc
;
1758 unsigned long flags
;
1760 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1761 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1762 if (mad_agent_priv
->agent
.rmpp_version
) {
1763 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1766 deref_mad_agent(mad_agent_priv
);
1771 /* Complete corresponding request */
1772 if (response_mad(mad_recv_wc
->recv_buf
.mad
)) {
1773 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1774 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1776 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1777 ib_free_recv_mad(mad_recv_wc
);
1778 deref_mad_agent(mad_agent_priv
);
1781 ib_mark_mad_done(mad_send_wr
);
1782 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1784 /* Defined behavior is to complete response before request */
1785 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1786 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1788 atomic_dec(&mad_agent_priv
->refcount
);
1790 mad_send_wc
.status
= IB_WC_SUCCESS
;
1791 mad_send_wc
.vendor_err
= 0;
1792 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1793 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1795 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1797 deref_mad_agent(mad_agent_priv
);
1801 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1804 struct ib_mad_qp_info
*qp_info
;
1805 struct ib_mad_private_header
*mad_priv_hdr
;
1806 struct ib_mad_private
*recv
, *response
;
1807 struct ib_mad_list_head
*mad_list
;
1808 struct ib_mad_agent_private
*mad_agent
;
1810 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1812 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1813 "for response buffer\n");
1815 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1816 qp_info
= mad_list
->mad_queue
->qp_info
;
1817 dequeue_mad(mad_list
);
1819 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1821 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1822 dma_unmap_single(port_priv
->device
->dma_device
,
1823 pci_unmap_addr(&recv
->header
, mapping
),
1824 sizeof(struct ib_mad_private
) -
1825 sizeof(struct ib_mad_private_header
),
1828 /* Setup MAD receive work completion from "normal" work completion */
1829 recv
->header
.wc
= *wc
;
1830 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
1831 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1832 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1833 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1835 if (atomic_read(&qp_info
->snoop_count
))
1836 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1839 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1842 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1843 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1844 if (!smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1845 port_priv
->device
->node_type
,
1846 port_priv
->port_num
,
1847 port_priv
->device
->phys_port_cnt
))
1849 if (!smi_check_forward_dr_smp(&recv
->mad
.smp
))
1851 if (!smi_handle_dr_smp_send(&recv
->mad
.smp
,
1852 port_priv
->device
->node_type
,
1853 port_priv
->port_num
))
1855 if (!smi_check_local_smp(&recv
->mad
.smp
, port_priv
->device
))
1860 /* Give driver "right of first refusal" on incoming MAD */
1861 if (port_priv
->device
->process_mad
) {
1865 printk(KERN_ERR PFX
"No memory for response MAD\n");
1867 * Is it better to assume that
1868 * it wouldn't be processed ?
1873 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1874 port_priv
->port_num
,
1877 &response
->mad
.mad
);
1878 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1879 if (ret
& IB_MAD_RESULT_CONSUMED
)
1881 if (ret
& IB_MAD_RESULT_REPLY
) {
1882 agent_send_response(&response
->mad
.mad
,
1885 port_priv
->port_num
,
1886 qp_info
->qp
->qp_num
);
1892 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
);
1894 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
1896 * recv is freed up in error cases in ib_mad_complete_recv
1897 * or via recv_handler in ib_mad_complete_recv()
1903 /* Post another receive request for this QP */
1905 ib_mad_post_receive_mads(qp_info
, response
);
1907 kmem_cache_free(ib_mad_cache
, recv
);
1909 ib_mad_post_receive_mads(qp_info
, recv
);
1912 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1914 struct ib_mad_send_wr_private
*mad_send_wr
;
1915 unsigned long delay
;
1917 if (list_empty(&mad_agent_priv
->wait_list
)) {
1918 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1920 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1921 struct ib_mad_send_wr_private
,
1924 if (time_after(mad_agent_priv
->timeout
,
1925 mad_send_wr
->timeout
)) {
1926 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1927 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1928 delay
= mad_send_wr
->timeout
- jiffies
;
1929 if ((long)delay
<= 0)
1931 queue_delayed_work(mad_agent_priv
->qp_info
->
1933 &mad_agent_priv
->timed_work
, delay
);
1938 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
1940 struct ib_mad_agent_private
*mad_agent_priv
;
1941 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1942 struct list_head
*list_item
;
1943 unsigned long delay
;
1945 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1946 list_del(&mad_send_wr
->agent_list
);
1948 delay
= mad_send_wr
->timeout
;
1949 mad_send_wr
->timeout
+= jiffies
;
1952 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
1953 temp_mad_send_wr
= list_entry(list_item
,
1954 struct ib_mad_send_wr_private
,
1956 if (time_after(mad_send_wr
->timeout
,
1957 temp_mad_send_wr
->timeout
))
1962 list_item
= &mad_agent_priv
->wait_list
;
1963 list_add(&mad_send_wr
->agent_list
, list_item
);
1965 /* Reschedule a work item if we have a shorter timeout */
1966 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
1967 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1968 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
1969 &mad_agent_priv
->timed_work
, delay
);
1973 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
1976 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
1977 wait_for_response(mad_send_wr
);
1981 * Process a send work completion
1983 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
1984 struct ib_mad_send_wc
*mad_send_wc
)
1986 struct ib_mad_agent_private
*mad_agent_priv
;
1987 unsigned long flags
;
1990 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1991 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1992 if (mad_agent_priv
->agent
.rmpp_version
) {
1993 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
1994 if (ret
== IB_RMPP_RESULT_CONSUMED
)
1997 ret
= IB_RMPP_RESULT_UNHANDLED
;
1999 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2000 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2001 mad_send_wr
->status
= mad_send_wc
->status
;
2002 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2005 if (--mad_send_wr
->refcount
> 0) {
2006 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2007 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2008 wait_for_response(mad_send_wr
);
2013 /* Remove send from MAD agent and notify client of completion */
2014 list_del(&mad_send_wr
->agent_list
);
2015 adjust_timeout(mad_agent_priv
);
2016 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2018 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2019 mad_send_wc
->status
= mad_send_wr
->status
;
2020 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2021 ib_rmpp_send_handler(mad_send_wc
);
2023 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2026 /* Release reference on agent taken when sending */
2027 deref_mad_agent(mad_agent_priv
);
2030 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2033 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2036 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2037 struct ib_mad_list_head
*mad_list
;
2038 struct ib_mad_qp_info
*qp_info
;
2039 struct ib_mad_queue
*send_queue
;
2040 struct ib_send_wr
*bad_send_wr
;
2041 struct ib_mad_send_wc mad_send_wc
;
2042 unsigned long flags
;
2045 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2046 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2048 send_queue
= mad_list
->mad_queue
;
2049 qp_info
= send_queue
->qp_info
;
2052 dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
->dma_device
,
2053 pci_unmap_addr(mad_send_wr
, header_mapping
),
2054 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2055 dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
->dma_device
,
2056 pci_unmap_addr(mad_send_wr
, payload_mapping
),
2057 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2058 queued_send_wr
= NULL
;
2059 spin_lock_irqsave(&send_queue
->lock
, flags
);
2060 list_del(&mad_list
->list
);
2062 /* Move queued send to the send queue */
2063 if (send_queue
->count
-- > send_queue
->max_active
) {
2064 mad_list
= container_of(qp_info
->overflow_list
.next
,
2065 struct ib_mad_list_head
, list
);
2066 queued_send_wr
= container_of(mad_list
,
2067 struct ib_mad_send_wr_private
,
2069 list_del(&mad_list
->list
);
2070 list_add_tail(&mad_list
->list
, &send_queue
->list
);
2072 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2074 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2075 mad_send_wc
.status
= wc
->status
;
2076 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2077 if (atomic_read(&qp_info
->snoop_count
))
2078 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2079 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2080 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2082 if (queued_send_wr
) {
2083 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
2086 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
2087 mad_send_wr
= queued_send_wr
;
2088 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2094 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2096 struct ib_mad_send_wr_private
*mad_send_wr
;
2097 struct ib_mad_list_head
*mad_list
;
2098 unsigned long flags
;
2100 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2101 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2102 mad_send_wr
= container_of(mad_list
,
2103 struct ib_mad_send_wr_private
,
2105 mad_send_wr
->retry
= 1;
2107 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2110 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2113 struct ib_mad_list_head
*mad_list
;
2114 struct ib_mad_qp_info
*qp_info
;
2115 struct ib_mad_send_wr_private
*mad_send_wr
;
2118 /* Determine if failure was a send or receive */
2119 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2120 qp_info
= mad_list
->mad_queue
->qp_info
;
2121 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2123 * Receive errors indicate that the QP has entered the error
2124 * state - error handling/shutdown code will cleanup
2129 * Send errors will transition the QP to SQE - move
2130 * QP to RTS and repost flushed work requests
2132 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2134 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2135 if (mad_send_wr
->retry
) {
2137 struct ib_send_wr
*bad_send_wr
;
2139 mad_send_wr
->retry
= 0;
2140 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
2143 ib_mad_send_done_handler(port_priv
, wc
);
2145 ib_mad_send_done_handler(port_priv
, wc
);
2147 struct ib_qp_attr
*attr
;
2149 /* Transition QP to RTS and fail offending send */
2150 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2152 attr
->qp_state
= IB_QPS_RTS
;
2153 attr
->cur_qp_state
= IB_QPS_SQE
;
2154 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2155 IB_QP_STATE
| IB_QP_CUR_STATE
);
2158 printk(KERN_ERR PFX
"mad_error_handler - "
2159 "ib_modify_qp to RTS : %d\n", ret
);
2161 mark_sends_for_retry(qp_info
);
2163 ib_mad_send_done_handler(port_priv
, wc
);
2168 * IB MAD completion callback
2170 static void ib_mad_completion_handler(void *data
)
2172 struct ib_mad_port_private
*port_priv
;
2175 port_priv
= (struct ib_mad_port_private
*)data
;
2176 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2178 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2179 if (wc
.status
== IB_WC_SUCCESS
) {
2180 switch (wc
.opcode
) {
2182 ib_mad_send_done_handler(port_priv
, &wc
);
2185 ib_mad_recv_done_handler(port_priv
, &wc
);
2192 mad_error_handler(port_priv
, &wc
);
2196 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2198 unsigned long flags
;
2199 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2200 struct ib_mad_send_wc mad_send_wc
;
2201 struct list_head cancel_list
;
2203 INIT_LIST_HEAD(&cancel_list
);
2205 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2206 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2207 &mad_agent_priv
->send_list
, agent_list
) {
2208 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2209 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2210 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2214 /* Empty wait list to prevent receives from finding a request */
2215 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2216 /* Empty local completion list as well */
2217 list_splice_init(&mad_agent_priv
->local_list
, &cancel_list
);
2218 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2220 /* Report all cancelled requests */
2221 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2222 mad_send_wc
.vendor_err
= 0;
2224 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2225 &cancel_list
, agent_list
) {
2226 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2227 list_del(&mad_send_wr
->agent_list
);
2228 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2230 atomic_dec(&mad_agent_priv
->refcount
);
2234 static struct ib_mad_send_wr_private
*
2235 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2236 struct ib_mad_send_buf
*send_buf
)
2238 struct ib_mad_send_wr_private
*mad_send_wr
;
2240 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2242 if (&mad_send_wr
->send_buf
== send_buf
)
2246 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2248 if (is_data_mad(mad_agent_priv
, mad_send_wr
->send_buf
.mad
) &&
2249 &mad_send_wr
->send_buf
== send_buf
)
2255 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2256 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2258 struct ib_mad_agent_private
*mad_agent_priv
;
2259 struct ib_mad_send_wr_private
*mad_send_wr
;
2260 unsigned long flags
;
2263 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2265 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2266 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2267 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2268 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2272 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2274 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2275 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2278 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2280 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2282 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2284 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2287 EXPORT_SYMBOL(ib_modify_mad
);
2289 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2290 struct ib_mad_send_buf
*send_buf
)
2292 ib_modify_mad(mad_agent
, send_buf
, 0);
2294 EXPORT_SYMBOL(ib_cancel_mad
);
2296 static void local_completions(void *data
)
2298 struct ib_mad_agent_private
*mad_agent_priv
;
2299 struct ib_mad_local_private
*local
;
2300 struct ib_mad_agent_private
*recv_mad_agent
;
2301 unsigned long flags
;
2304 struct ib_mad_send_wc mad_send_wc
;
2306 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2308 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2309 while (!list_empty(&mad_agent_priv
->local_list
)) {
2310 local
= list_entry(mad_agent_priv
->local_list
.next
,
2311 struct ib_mad_local_private
,
2313 list_del(&local
->completion_list
);
2314 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2315 if (local
->mad_priv
) {
2316 recv_mad_agent
= local
->recv_mad_agent
;
2317 if (!recv_mad_agent
) {
2318 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2319 goto local_send_completion
;
2324 * Defined behavior is to complete response
2327 build_smp_wc((unsigned long) local
->mad_send_wr
,
2328 be16_to_cpu(IB_LID_PERMISSIVE
),
2329 0, recv_mad_agent
->agent
.port_num
, &wc
);
2331 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2332 local
->mad_priv
->header
.recv_wc
.mad_len
=
2333 sizeof(struct ib_mad
);
2334 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2335 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2336 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2337 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2338 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2339 &local
->mad_priv
->mad
.mad
;
2340 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2341 snoop_recv(recv_mad_agent
->qp_info
,
2342 &local
->mad_priv
->header
.recv_wc
,
2343 IB_MAD_SNOOP_RECVS
);
2344 recv_mad_agent
->agent
.recv_handler(
2345 &recv_mad_agent
->agent
,
2346 &local
->mad_priv
->header
.recv_wc
);
2347 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2348 atomic_dec(&recv_mad_agent
->refcount
);
2349 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2352 local_send_completion
:
2354 mad_send_wc
.status
= IB_WC_SUCCESS
;
2355 mad_send_wc
.vendor_err
= 0;
2356 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2357 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2358 snoop_send(mad_agent_priv
->qp_info
,
2359 &local
->mad_send_wr
->send_buf
,
2360 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2361 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2364 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2365 atomic_dec(&mad_agent_priv
->refcount
);
2367 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2370 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2373 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2377 if (!mad_send_wr
->retries
--)
2380 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2382 if (mad_send_wr
->mad_agent_priv
->agent
.rmpp_version
) {
2383 ret
= ib_retry_rmpp(mad_send_wr
);
2385 case IB_RMPP_RESULT_UNHANDLED
:
2386 ret
= ib_send_mad(mad_send_wr
);
2388 case IB_RMPP_RESULT_CONSUMED
:
2396 ret
= ib_send_mad(mad_send_wr
);
2399 mad_send_wr
->refcount
++;
2400 list_add_tail(&mad_send_wr
->agent_list
,
2401 &mad_send_wr
->mad_agent_priv
->send_list
);
2406 static void timeout_sends(void *data
)
2408 struct ib_mad_agent_private
*mad_agent_priv
;
2409 struct ib_mad_send_wr_private
*mad_send_wr
;
2410 struct ib_mad_send_wc mad_send_wc
;
2411 unsigned long flags
, delay
;
2413 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2414 mad_send_wc
.vendor_err
= 0;
2416 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2417 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2418 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2419 struct ib_mad_send_wr_private
,
2422 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2423 delay
= mad_send_wr
->timeout
- jiffies
;
2424 if ((long)delay
<= 0)
2426 queue_delayed_work(mad_agent_priv
->qp_info
->
2428 &mad_agent_priv
->timed_work
, delay
);
2432 list_del(&mad_send_wr
->agent_list
);
2433 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2434 !retry_send(mad_send_wr
))
2437 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2439 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2440 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2442 mad_send_wc
.status
= mad_send_wr
->status
;
2443 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2444 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2447 atomic_dec(&mad_agent_priv
->refcount
);
2448 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2450 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2453 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2455 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2456 unsigned long flags
;
2458 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2459 if (!list_empty(&port_priv
->port_list
))
2460 queue_work(port_priv
->wq
, &port_priv
->work
);
2461 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2465 * Allocate receive MADs and post receive WRs for them
2467 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2468 struct ib_mad_private
*mad
)
2470 unsigned long flags
;
2472 struct ib_mad_private
*mad_priv
;
2473 struct ib_sge sg_list
;
2474 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2475 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2477 /* Initialize common scatter list fields */
2478 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2479 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2481 /* Initialize common receive WR fields */
2482 recv_wr
.next
= NULL
;
2483 recv_wr
.sg_list
= &sg_list
;
2484 recv_wr
.num_sge
= 1;
2487 /* Allocate and map receive buffer */
2492 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2494 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2499 sg_list
.addr
= dma_map_single(qp_info
->port_priv
->
2503 sizeof mad_priv
->header
,
2505 pci_unmap_addr_set(&mad_priv
->header
, mapping
, sg_list
.addr
);
2506 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2507 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2509 /* Post receive WR */
2510 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2511 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2512 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2513 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2514 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2516 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2517 list_del(&mad_priv
->header
.mad_list
.list
);
2518 recv_queue
->count
--;
2519 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2520 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2521 pci_unmap_addr(&mad_priv
->header
,
2524 sizeof mad_priv
->header
,
2526 kmem_cache_free(ib_mad_cache
, mad_priv
);
2527 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2536 * Return all the posted receive MADs
2538 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2540 struct ib_mad_private_header
*mad_priv_hdr
;
2541 struct ib_mad_private
*recv
;
2542 struct ib_mad_list_head
*mad_list
;
2544 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2546 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2547 struct ib_mad_list_head
, list
);
2548 mad_priv_hdr
= container_of(mad_list
,
2549 struct ib_mad_private_header
,
2551 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2554 /* Remove from posted receive MAD list */
2555 list_del(&mad_list
->list
);
2557 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2558 pci_unmap_addr(&recv
->header
, mapping
),
2559 sizeof(struct ib_mad_private
) -
2560 sizeof(struct ib_mad_private_header
),
2562 kmem_cache_free(ib_mad_cache
, recv
);
2565 qp_info
->recv_queue
.count
= 0;
2571 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2574 struct ib_qp_attr
*attr
;
2577 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2579 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2583 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2584 qp
= port_priv
->qp_info
[i
].qp
;
2586 * PKey index for QP1 is irrelevant but
2587 * one is needed for the Reset to Init transition
2589 attr
->qp_state
= IB_QPS_INIT
;
2590 attr
->pkey_index
= 0;
2591 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2592 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2593 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2595 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2596 "INIT: %d\n", i
, ret
);
2600 attr
->qp_state
= IB_QPS_RTR
;
2601 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2603 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2604 "RTR: %d\n", i
, ret
);
2608 attr
->qp_state
= IB_QPS_RTS
;
2609 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2610 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2612 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2613 "RTS: %d\n", i
, ret
);
2618 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2620 printk(KERN_ERR PFX
"Failed to request completion "
2621 "notification: %d\n", ret
);
2625 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2626 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2628 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2637 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2639 struct ib_mad_qp_info
*qp_info
= qp_context
;
2641 /* It's worse than that! He's dead, Jim! */
2642 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2643 event
->event
, qp_info
->qp
->qp_num
);
2646 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2647 struct ib_mad_queue
*mad_queue
)
2649 mad_queue
->qp_info
= qp_info
;
2650 mad_queue
->count
= 0;
2651 spin_lock_init(&mad_queue
->lock
);
2652 INIT_LIST_HEAD(&mad_queue
->list
);
2655 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2656 struct ib_mad_qp_info
*qp_info
)
2658 qp_info
->port_priv
= port_priv
;
2659 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2660 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2661 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2662 spin_lock_init(&qp_info
->snoop_lock
);
2663 qp_info
->snoop_table
= NULL
;
2664 qp_info
->snoop_table_size
= 0;
2665 atomic_set(&qp_info
->snoop_count
, 0);
2668 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2669 enum ib_qp_type qp_type
)
2671 struct ib_qp_init_attr qp_init_attr
;
2674 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2675 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2676 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2677 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2678 qp_init_attr
.cap
.max_send_wr
= IB_MAD_QP_SEND_SIZE
;
2679 qp_init_attr
.cap
.max_recv_wr
= IB_MAD_QP_RECV_SIZE
;
2680 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2681 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2682 qp_init_attr
.qp_type
= qp_type
;
2683 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2684 qp_init_attr
.qp_context
= qp_info
;
2685 qp_init_attr
.event_handler
= qp_event_handler
;
2686 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2687 if (IS_ERR(qp_info
->qp
)) {
2688 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2689 get_spl_qp_index(qp_type
));
2690 ret
= PTR_ERR(qp_info
->qp
);
2693 /* Use minimum queue sizes unless the CQ is resized */
2694 qp_info
->send_queue
.max_active
= IB_MAD_QP_SEND_SIZE
;
2695 qp_info
->recv_queue
.max_active
= IB_MAD_QP_RECV_SIZE
;
2702 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2704 ib_destroy_qp(qp_info
->qp
);
2705 kfree(qp_info
->snoop_table
);
2710 * Create the QP, PD, MR, and CQ if needed
2712 static int ib_mad_port_open(struct ib_device
*device
,
2716 struct ib_mad_port_private
*port_priv
;
2717 unsigned long flags
;
2718 char name
[sizeof "ib_mad123"];
2720 /* Create new device info */
2721 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2723 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2727 port_priv
->device
= device
;
2728 port_priv
->port_num
= port_num
;
2729 spin_lock_init(&port_priv
->reg_lock
);
2730 INIT_LIST_HEAD(&port_priv
->agent_list
);
2731 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2732 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2734 cq_size
= (IB_MAD_QP_SEND_SIZE
+ IB_MAD_QP_RECV_SIZE
) * 2;
2735 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2736 ib_mad_thread_completion_handler
,
2737 NULL
, port_priv
, cq_size
);
2738 if (IS_ERR(port_priv
->cq
)) {
2739 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2740 ret
= PTR_ERR(port_priv
->cq
);
2744 port_priv
->pd
= ib_alloc_pd(device
);
2745 if (IS_ERR(port_priv
->pd
)) {
2746 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2747 ret
= PTR_ERR(port_priv
->pd
);
2751 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2752 if (IS_ERR(port_priv
->mr
)) {
2753 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2754 ret
= PTR_ERR(port_priv
->mr
);
2758 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2761 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2765 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2766 port_priv
->wq
= create_singlethread_workqueue(name
);
2767 if (!port_priv
->wq
) {
2771 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
, port_priv
);
2773 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2774 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2775 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2777 ret
= ib_mad_port_start(port_priv
);
2779 printk(KERN_ERR PFX
"Couldn't start port\n");
2786 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2787 list_del_init(&port_priv
->port_list
);
2788 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2790 destroy_workqueue(port_priv
->wq
);
2792 destroy_mad_qp(&port_priv
->qp_info
[1]);
2794 destroy_mad_qp(&port_priv
->qp_info
[0]);
2796 ib_dereg_mr(port_priv
->mr
);
2798 ib_dealloc_pd(port_priv
->pd
);
2800 ib_destroy_cq(port_priv
->cq
);
2801 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2802 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2811 * If there are no classes using the port, free the port
2812 * resources (CQ, MR, PD, QP) and remove the port's info structure
2814 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2816 struct ib_mad_port_private
*port_priv
;
2817 unsigned long flags
;
2819 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2820 port_priv
= __ib_get_mad_port(device
, port_num
);
2821 if (port_priv
== NULL
) {
2822 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2823 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2826 list_del_init(&port_priv
->port_list
);
2827 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2829 destroy_workqueue(port_priv
->wq
);
2830 destroy_mad_qp(&port_priv
->qp_info
[1]);
2831 destroy_mad_qp(&port_priv
->qp_info
[0]);
2832 ib_dereg_mr(port_priv
->mr
);
2833 ib_dealloc_pd(port_priv
->pd
);
2834 ib_destroy_cq(port_priv
->cq
);
2835 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2836 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2837 /* XXX: Handle deallocation of MAD registration tables */
2844 static void ib_mad_init_device(struct ib_device
*device
)
2848 if (device
->node_type
== IB_NODE_SWITCH
) {
2853 end
= device
->phys_port_cnt
;
2856 for (i
= start
; i
<= end
; i
++) {
2857 if (ib_mad_port_open(device
, i
)) {
2858 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2862 if (ib_agent_port_open(device
, i
)) {
2863 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2872 if (ib_mad_port_close(device
, i
))
2873 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2879 while (i
>= start
) {
2880 if (ib_agent_port_close(device
, i
))
2881 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2884 if (ib_mad_port_close(device
, i
))
2885 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2891 static void ib_mad_remove_device(struct ib_device
*device
)
2893 int i
, num_ports
, cur_port
;
2895 if (device
->node_type
== IB_NODE_SWITCH
) {
2899 num_ports
= device
->phys_port_cnt
;
2902 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2903 if (ib_agent_port_close(device
, cur_port
))
2904 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2906 device
->name
, cur_port
);
2907 if (ib_mad_port_close(device
, cur_port
))
2908 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2909 device
->name
, cur_port
);
2913 static struct ib_client mad_client
= {
2915 .add
= ib_mad_init_device
,
2916 .remove
= ib_mad_remove_device
2919 static int __init
ib_mad_init_module(void)
2923 spin_lock_init(&ib_mad_port_list_lock
);
2925 ib_mad_cache
= kmem_cache_create("ib_mad",
2926 sizeof(struct ib_mad_private
),
2931 if (!ib_mad_cache
) {
2932 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
2937 INIT_LIST_HEAD(&ib_mad_port_list
);
2939 if (ib_register_client(&mad_client
)) {
2940 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
2948 kmem_cache_destroy(ib_mad_cache
);
2953 static void __exit
ib_mad_cleanup_module(void)
2955 ib_unregister_client(&mad_client
);
2957 if (kmem_cache_destroy(ib_mad_cache
)) {
2958 printk(KERN_DEBUG PFX
"Failed to destroy ib_mad cache\n");
2962 module_init(ib_mad_init_module
);
2963 module_exit(ib_mad_cleanup_module
);