2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/dma-mapping.h>
41 MODULE_LICENSE("Dual BSD/GPL");
42 MODULE_DESCRIPTION("kernel IB MAD API");
43 MODULE_AUTHOR("Hal Rosenstock");
44 MODULE_AUTHOR("Sean Hefty");
47 kmem_cache_t
*ib_mad_cache
;
48 static struct list_head ib_mad_port_list
;
49 static u32 ib_mad_client_id
= 0;
52 static spinlock_t ib_mad_port_list_lock
;
55 /* Forward declarations */
56 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
57 struct ib_mad_reg_req
*mad_reg_req
);
58 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
59 static struct ib_mad_agent_private
*find_mad_agent(
60 struct ib_mad_port_private
*port_priv
,
62 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
63 struct ib_mad_private
*mad
);
64 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
65 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
66 struct ib_mad_send_wc
*mad_send_wc
);
67 static void timeout_sends(void *data
);
68 static void local_completions(void *data
);
69 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
70 struct ib_mad_agent_private
*agent_priv
,
72 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
73 struct ib_mad_agent_private
*agent_priv
);
76 * Returns a ib_mad_port_private structure or NULL for a device/port
77 * Assumes ib_mad_port_list_lock is being held
79 static inline struct ib_mad_port_private
*
80 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
82 struct ib_mad_port_private
*entry
;
84 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
85 if (entry
->device
== device
&& entry
->port_num
== port_num
)
92 * Wrapper function to return a ib_mad_port_private structure or NULL
95 static inline struct ib_mad_port_private
*
96 ib_get_mad_port(struct ib_device
*device
, int port_num
)
98 struct ib_mad_port_private
*entry
;
101 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
102 entry
= __ib_get_mad_port(device
, port_num
);
103 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
108 static inline u8
convert_mgmt_class(u8 mgmt_class
)
110 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
111 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
115 static int get_spl_qp_index(enum ib_qp_type qp_type
)
128 static int vendor_class_index(u8 mgmt_class
)
130 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
133 static int is_vendor_class(u8 mgmt_class
)
135 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
136 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
141 static int is_vendor_oui(char *oui
)
143 if (oui
[0] || oui
[1] || oui
[2])
148 static int is_vendor_method_in_use(
149 struct ib_mad_mgmt_vendor_class
*vendor_class
,
150 struct ib_mad_reg_req
*mad_reg_req
)
152 struct ib_mad_mgmt_method_table
*method
;
155 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
156 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
157 method
= vendor_class
->method_table
[i
];
159 if (method_in_use(&method
, mad_reg_req
))
170 * ib_register_mad_agent - Register to send/receive MADs
172 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
174 enum ib_qp_type qp_type
,
175 struct ib_mad_reg_req
*mad_reg_req
,
177 ib_mad_send_handler send_handler
,
178 ib_mad_recv_handler recv_handler
,
181 struct ib_mad_port_private
*port_priv
;
182 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
183 struct ib_mad_agent_private
*mad_agent_priv
;
184 struct ib_mad_reg_req
*reg_req
= NULL
;
185 struct ib_mad_mgmt_class_table
*class;
186 struct ib_mad_mgmt_vendor_class_table
*vendor
;
187 struct ib_mad_mgmt_vendor_class
*vendor_class
;
188 struct ib_mad_mgmt_method_table
*method
;
191 u8 mgmt_class
, vclass
;
193 /* Validate parameters */
194 qpn
= get_spl_qp_index(qp_type
);
199 goto error1
; /* XXX: until RMPP implemented */
201 /* Validate MAD registration request if supplied */
203 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
207 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
209 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
210 * one in this range currently allowed
212 if (mad_reg_req
->mgmt_class
!=
213 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
215 } else if (mad_reg_req
->mgmt_class
== 0) {
217 * Class 0 is reserved in IBA and is used for
218 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
221 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
223 * If class is in "new" vendor range,
224 * ensure supplied OUI is not zero
226 if (!is_vendor_oui(mad_reg_req
->oui
))
229 /* Make sure class supplied is consistent with QP type */
230 if (qp_type
== IB_QPT_SMI
) {
231 if ((mad_reg_req
->mgmt_class
!=
232 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
233 (mad_reg_req
->mgmt_class
!=
234 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
237 if ((mad_reg_req
->mgmt_class
==
238 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
239 (mad_reg_req
->mgmt_class
==
240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
244 /* No registration request supplied */
249 /* Validate device and port */
250 port_priv
= ib_get_mad_port(device
, port_num
);
252 ret
= ERR_PTR(-ENODEV
);
256 /* Allocate structures */
257 mad_agent_priv
= kmalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
258 if (!mad_agent_priv
) {
259 ret
= ERR_PTR(-ENOMEM
);
262 memset(mad_agent_priv
, 0, sizeof *mad_agent_priv
);
264 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
265 IB_ACCESS_LOCAL_WRITE
);
266 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
267 ret
= ERR_PTR(-ENOMEM
);
272 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
274 ret
= ERR_PTR(-ENOMEM
);
277 /* Make a copy of the MAD registration request */
278 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
281 /* Now, fill in the various structures */
282 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
283 mad_agent_priv
->reg_req
= reg_req
;
284 mad_agent_priv
->rmpp_version
= rmpp_version
;
285 mad_agent_priv
->agent
.device
= device
;
286 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
287 mad_agent_priv
->agent
.send_handler
= send_handler
;
288 mad_agent_priv
->agent
.context
= context
;
289 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
290 mad_agent_priv
->agent
.port_num
= port_num
;
292 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
293 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
296 * Make sure MAD registration (if supplied)
297 * is non overlapping with any existing ones
300 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
301 if (!is_vendor_class(mgmt_class
)) {
302 class = port_priv
->version
[mad_reg_req
->
303 mgmt_class_version
].class;
305 method
= class->method_table
[mgmt_class
];
307 if (method_in_use(&method
,
312 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
315 /* "New" vendor class range */
316 vendor
= port_priv
->version
[mad_reg_req
->
317 mgmt_class_version
].vendor
;
319 vclass
= vendor_class_index(mgmt_class
);
320 vendor_class
= vendor
->vendor_class
[vclass
];
322 if (is_vendor_method_in_use(
328 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
336 /* Add mad agent into port's agent list */
337 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
338 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
340 spin_lock_init(&mad_agent_priv
->lock
);
341 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
342 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
343 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
344 INIT_WORK(&mad_agent_priv
->timed_work
, timeout_sends
, mad_agent_priv
);
345 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
346 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
,
348 atomic_set(&mad_agent_priv
->refcount
, 1);
349 init_waitqueue_head(&mad_agent_priv
->wait
);
351 return &mad_agent_priv
->agent
;
354 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
357 kfree(mad_agent_priv
);
359 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
363 EXPORT_SYMBOL(ib_register_mad_agent
);
365 static inline int is_snooping_sends(int mad_snoop_flags
)
367 return (mad_snoop_flags
&
368 (/*IB_MAD_SNOOP_POSTED_SENDS |
369 IB_MAD_SNOOP_RMPP_SENDS |*/
370 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
371 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
374 static inline int is_snooping_recvs(int mad_snoop_flags
)
376 return (mad_snoop_flags
&
377 (IB_MAD_SNOOP_RECVS
/*|
378 IB_MAD_SNOOP_RMPP_RECVS*/));
381 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
382 struct ib_mad_snoop_private
*mad_snoop_priv
)
384 struct ib_mad_snoop_private
**new_snoop_table
;
388 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
389 /* Check for empty slot in array. */
390 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
391 if (!qp_info
->snoop_table
[i
])
394 if (i
== qp_info
->snoop_table_size
) {
396 new_snoop_table
= kmalloc(sizeof mad_snoop_priv
*
397 qp_info
->snoop_table_size
+ 1,
399 if (!new_snoop_table
) {
403 if (qp_info
->snoop_table
) {
404 memcpy(new_snoop_table
, qp_info
->snoop_table
,
405 sizeof mad_snoop_priv
*
406 qp_info
->snoop_table_size
);
407 kfree(qp_info
->snoop_table
);
409 qp_info
->snoop_table
= new_snoop_table
;
410 qp_info
->snoop_table_size
++;
412 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
413 atomic_inc(&qp_info
->snoop_count
);
415 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
419 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
421 enum ib_qp_type qp_type
,
423 ib_mad_snoop_handler snoop_handler
,
424 ib_mad_recv_handler recv_handler
,
427 struct ib_mad_port_private
*port_priv
;
428 struct ib_mad_agent
*ret
;
429 struct ib_mad_snoop_private
*mad_snoop_priv
;
432 /* Validate parameters */
433 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
434 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
435 ret
= ERR_PTR(-EINVAL
);
438 qpn
= get_spl_qp_index(qp_type
);
440 ret
= ERR_PTR(-EINVAL
);
443 port_priv
= ib_get_mad_port(device
, port_num
);
445 ret
= ERR_PTR(-ENODEV
);
448 /* Allocate structures */
449 mad_snoop_priv
= kmalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
450 if (!mad_snoop_priv
) {
451 ret
= ERR_PTR(-ENOMEM
);
455 /* Now, fill in the various structures */
456 memset(mad_snoop_priv
, 0, sizeof *mad_snoop_priv
);
457 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
458 mad_snoop_priv
->agent
.device
= device
;
459 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
460 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
461 mad_snoop_priv
->agent
.context
= context
;
462 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
463 mad_snoop_priv
->agent
.port_num
= port_num
;
464 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
465 init_waitqueue_head(&mad_snoop_priv
->wait
);
466 mad_snoop_priv
->snoop_index
= register_snoop_agent(
467 &port_priv
->qp_info
[qpn
],
469 if (mad_snoop_priv
->snoop_index
< 0) {
470 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
474 atomic_set(&mad_snoop_priv
->refcount
, 1);
475 return &mad_snoop_priv
->agent
;
478 kfree(mad_snoop_priv
);
482 EXPORT_SYMBOL(ib_register_mad_snoop
);
484 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
486 struct ib_mad_port_private
*port_priv
;
489 /* Note that we could still be handling received MADs */
492 * Canceling all sends results in dropping received response
493 * MADs, preventing us from queuing additional work
495 cancel_mads(mad_agent_priv
);
496 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
497 cancel_delayed_work(&mad_agent_priv
->timed_work
);
499 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
500 remove_mad_reg_req(mad_agent_priv
);
501 list_del(&mad_agent_priv
->agent_list
);
502 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
504 flush_workqueue(port_priv
->wq
);
506 atomic_dec(&mad_agent_priv
->refcount
);
507 wait_event(mad_agent_priv
->wait
,
508 !atomic_read(&mad_agent_priv
->refcount
));
510 if (mad_agent_priv
->reg_req
)
511 kfree(mad_agent_priv
->reg_req
);
512 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
513 kfree(mad_agent_priv
);
516 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
518 struct ib_mad_qp_info
*qp_info
;
521 qp_info
= mad_snoop_priv
->qp_info
;
522 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
523 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
524 atomic_dec(&qp_info
->snoop_count
);
525 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
527 atomic_dec(&mad_snoop_priv
->refcount
);
528 wait_event(mad_snoop_priv
->wait
,
529 !atomic_read(&mad_snoop_priv
->refcount
));
531 kfree(mad_snoop_priv
);
535 * ib_unregister_mad_agent - Unregisters a client from using MAD services
537 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
539 struct ib_mad_agent_private
*mad_agent_priv
;
540 struct ib_mad_snoop_private
*mad_snoop_priv
;
542 /* If the TID is zero, the agent can only snoop. */
543 if (mad_agent
->hi_tid
) {
544 mad_agent_priv
= container_of(mad_agent
,
545 struct ib_mad_agent_private
,
547 unregister_mad_agent(mad_agent_priv
);
549 mad_snoop_priv
= container_of(mad_agent
,
550 struct ib_mad_snoop_private
,
552 unregister_mad_snoop(mad_snoop_priv
);
556 EXPORT_SYMBOL(ib_unregister_mad_agent
);
558 static inline int response_mad(struct ib_mad
*mad
)
560 /* Trap represses are responses although response bit is reset */
561 return ((mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
562 (mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
));
565 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
567 struct ib_mad_queue
*mad_queue
;
570 BUG_ON(!mad_list
->mad_queue
);
571 mad_queue
= mad_list
->mad_queue
;
572 spin_lock_irqsave(&mad_queue
->lock
, flags
);
573 list_del(&mad_list
->list
);
575 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
578 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
579 struct ib_send_wr
*send_wr
,
580 struct ib_mad_send_wc
*mad_send_wc
,
583 struct ib_mad_snoop_private
*mad_snoop_priv
;
587 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
588 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
589 mad_snoop_priv
= qp_info
->snoop_table
[i
];
590 if (!mad_snoop_priv
||
591 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
594 atomic_inc(&mad_snoop_priv
->refcount
);
595 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
596 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
597 send_wr
, mad_send_wc
);
598 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
599 wake_up(&mad_snoop_priv
->wait
);
600 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
602 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
605 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
606 struct ib_mad_recv_wc
*mad_recv_wc
,
609 struct ib_mad_snoop_private
*mad_snoop_priv
;
613 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
614 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
615 mad_snoop_priv
= qp_info
->snoop_table
[i
];
616 if (!mad_snoop_priv
||
617 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
620 atomic_inc(&mad_snoop_priv
->refcount
);
621 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
622 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
624 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
625 wake_up(&mad_snoop_priv
->wait
);
626 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
628 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
631 static void build_smp_wc(u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
634 memset(wc
, 0, sizeof *wc
);
636 wc
->status
= IB_WC_SUCCESS
;
637 wc
->opcode
= IB_WC_RECV
;
638 wc
->pkey_index
= pkey_index
;
639 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
644 wc
->dlid_path_bits
= 0;
645 wc
->port_num
= port_num
;
649 * Return 0 if SMP is to be sent
650 * Return 1 if SMP was consumed locally (whether or not solicited)
651 * Return < 0 if error
653 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
655 struct ib_send_wr
*send_wr
)
659 struct ib_mad_local_private
*local
;
660 struct ib_mad_private
*mad_priv
;
661 struct ib_mad_port_private
*port_priv
;
662 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
663 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
664 u8 port_num
= mad_agent_priv
->agent
.port_num
;
667 if (!smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
)) {
669 printk(KERN_ERR PFX
"Invalid directed route\n");
672 /* Check to post send on QP or process locally */
673 ret
= smi_check_local_dr_smp(smp
, device
, port_num
);
674 if (!ret
|| !device
->process_mad
)
677 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
680 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
683 local
->mad_priv
= NULL
;
684 local
->recv_mad_agent
= NULL
;
685 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
688 printk(KERN_ERR PFX
"No memory for local response MAD\n");
693 build_smp_wc(send_wr
->wr_id
, smp
->dr_slid
, send_wr
->wr
.ud
.pkey_index
,
694 send_wr
->wr
.ud
.port_num
, &mad_wc
);
696 /* No GRH for DR SMP */
697 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
698 (struct ib_mad
*)smp
,
699 (struct ib_mad
*)&mad_priv
->mad
);
702 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
703 if (response_mad(&mad_priv
->mad
.mad
) &&
704 mad_agent_priv
->agent
.recv_handler
) {
705 local
->mad_priv
= mad_priv
;
706 local
->recv_mad_agent
= mad_agent_priv
;
708 * Reference MAD agent until receive
709 * side of local completion handled
711 atomic_inc(&mad_agent_priv
->refcount
);
713 kmem_cache_free(ib_mad_cache
, mad_priv
);
715 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
716 kmem_cache_free(ib_mad_cache
, mad_priv
);
718 case IB_MAD_RESULT_SUCCESS
:
719 /* Treat like an incoming receive MAD */
720 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
721 mad_agent_priv
->agent
.port_num
);
723 mad_priv
->mad
.mad
.mad_hdr
.tid
=
724 ((struct ib_mad
*)smp
)->mad_hdr
.tid
;
725 recv_mad_agent
= find_mad_agent(port_priv
,
728 if (!port_priv
|| !recv_mad_agent
) {
729 kmem_cache_free(ib_mad_cache
, mad_priv
);
734 local
->mad_priv
= mad_priv
;
735 local
->recv_mad_agent
= recv_mad_agent
;
738 kmem_cache_free(ib_mad_cache
, mad_priv
);
744 local
->send_wr
= *send_wr
;
745 local
->send_wr
.sg_list
= local
->sg_list
;
746 memcpy(local
->sg_list
, send_wr
->sg_list
,
747 sizeof *send_wr
->sg_list
* send_wr
->num_sge
);
748 local
->send_wr
.next
= NULL
;
749 local
->tid
= send_wr
->wr
.ud
.mad_hdr
->tid
;
750 local
->wr_id
= send_wr
->wr_id
;
751 /* Reference MAD agent until send side of local completion handled */
752 atomic_inc(&mad_agent_priv
->refcount
);
753 /* Queue local completion to local list */
754 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
755 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
756 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
757 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
758 &mad_agent_priv
->local_work
);
764 static int get_buf_length(int hdr_len
, int data_len
)
768 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
769 if (data_len
&& seg_size
) {
770 pad
= seg_size
- data_len
% seg_size
;
775 return hdr_len
+ data_len
+ pad
;
778 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
779 u32 remote_qpn
, u16 pkey_index
,
780 struct ib_ah
*ah
, int rmpp_active
,
781 int hdr_len
, int data_len
,
782 unsigned int __nocast gfp_mask
)
784 struct ib_mad_agent_private
*mad_agent_priv
;
785 struct ib_mad_send_buf
*send_buf
;
790 return ERR_PTR(-EINVAL
); /* until RMPP implemented */
791 mad_agent_priv
= container_of(mad_agent
,
792 struct ib_mad_agent_private
, agent
);
793 buf_size
= get_buf_length(hdr_len
, data_len
);
795 buf
= kmalloc(sizeof *send_buf
+ buf_size
, gfp_mask
);
797 return ERR_PTR(-ENOMEM
);
798 memset(buf
, 0, sizeof *send_buf
+ buf_size
);
800 send_buf
= buf
+ buf_size
;
803 send_buf
->sge
.addr
= dma_map_single(mad_agent
->device
->dma_device
,
804 buf
, buf_size
, DMA_TO_DEVICE
);
805 pci_unmap_addr_set(send_buf
, mapping
, send_buf
->sge
.addr
);
806 send_buf
->sge
.length
= buf_size
;
807 send_buf
->sge
.lkey
= mad_agent
->mr
->lkey
;
809 send_buf
->send_wr
.wr_id
= (unsigned long) send_buf
;
810 send_buf
->send_wr
.sg_list
= &send_buf
->sge
;
811 send_buf
->send_wr
.num_sge
= 1;
812 send_buf
->send_wr
.opcode
= IB_WR_SEND
;
813 send_buf
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
814 send_buf
->send_wr
.wr
.ud
.ah
= ah
;
815 send_buf
->send_wr
.wr
.ud
.mad_hdr
= &send_buf
->mad
->mad_hdr
;
816 send_buf
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
817 send_buf
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
818 send_buf
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
819 send_buf
->mad_agent
= mad_agent
;
820 atomic_inc(&mad_agent_priv
->refcount
);
823 EXPORT_SYMBOL(ib_create_send_mad
);
825 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
827 struct ib_mad_agent_private
*mad_agent_priv
;
829 mad_agent_priv
= container_of(send_buf
->mad_agent
,
830 struct ib_mad_agent_private
, agent
);
832 dma_unmap_single(send_buf
->mad_agent
->device
->dma_device
,
833 pci_unmap_addr(send_buf
, mapping
),
834 send_buf
->sge
.length
, DMA_TO_DEVICE
);
835 kfree(send_buf
->mad
);
837 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
838 wake_up(&mad_agent_priv
->wait
);
840 EXPORT_SYMBOL(ib_free_send_mad
);
842 static int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
844 struct ib_mad_qp_info
*qp_info
;
845 struct ib_send_wr
*bad_send_wr
;
846 struct list_head
*list
;
850 /* Set WR ID to find mad_send_wr upon completion */
851 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
852 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
853 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
855 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
856 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
857 ret
= ib_post_send(mad_send_wr
->mad_agent_priv
->agent
.qp
,
858 &mad_send_wr
->send_wr
, &bad_send_wr
);
859 list
= &qp_info
->send_queue
.list
;
862 list
= &qp_info
->overflow_list
;
866 qp_info
->send_queue
.count
++;
867 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
869 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
874 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
875 * with the registered client
877 int ib_post_send_mad(struct ib_mad_agent
*mad_agent
,
878 struct ib_send_wr
*send_wr
,
879 struct ib_send_wr
**bad_send_wr
)
882 struct ib_mad_agent_private
*mad_agent_priv
;
884 /* Validate supplied parameters */
888 if (!mad_agent
|| !send_wr
)
891 if (!mad_agent
->send_handler
)
894 mad_agent_priv
= container_of(mad_agent
,
895 struct ib_mad_agent_private
,
898 /* Walk list of send WRs and post each on send list */
901 struct ib_send_wr
*next_send_wr
;
902 struct ib_mad_send_wr_private
*mad_send_wr
;
905 /* Validate more parameters */
906 if (send_wr
->num_sge
> IB_MAD_SEND_REQ_MAX_SG
)
909 if (send_wr
->wr
.ud
.timeout_ms
&& !mad_agent
->recv_handler
)
912 if (!send_wr
->wr
.ud
.mad_hdr
) {
913 printk(KERN_ERR PFX
"MAD header must be supplied "
914 "in WR %p\n", send_wr
);
919 * Save pointer to next work request to post in case the
920 * current one completes, and the user modifies the work
921 * request associated with the completion
923 next_send_wr
= (struct ib_send_wr
*)send_wr
->next
;
925 smp
= (struct ib_smp
*)send_wr
->wr
.ud
.mad_hdr
;
926 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
927 ret
= handle_outgoing_dr_smp(mad_agent_priv
, smp
,
929 if (ret
< 0) /* error */
931 else if (ret
== 1) /* locally consumed */
935 /* Allocate MAD send WR tracking structure */
936 mad_send_wr
= kmalloc(sizeof *mad_send_wr
, GFP_ATOMIC
);
938 printk(KERN_ERR PFX
"No memory for "
939 "ib_mad_send_wr_private\n");
944 mad_send_wr
->send_wr
= *send_wr
;
945 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
946 memcpy(mad_send_wr
->sg_list
, send_wr
->sg_list
,
947 sizeof *send_wr
->sg_list
* send_wr
->num_sge
);
948 mad_send_wr
->wr_id
= mad_send_wr
->send_wr
.wr_id
;
949 mad_send_wr
->send_wr
.next
= NULL
;
950 mad_send_wr
->tid
= send_wr
->wr
.ud
.mad_hdr
->tid
;
951 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
952 /* Timeout will be updated after send completes */
953 mad_send_wr
->timeout
= msecs_to_jiffies(send_wr
->wr
.
955 mad_send_wr
->retries
= mad_send_wr
->send_wr
.wr
.ud
.retries
;
956 /* One reference for each work request to QP + response */
957 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
958 mad_send_wr
->status
= IB_WC_SUCCESS
;
960 /* Reference MAD agent until send completes */
961 atomic_inc(&mad_agent_priv
->refcount
);
962 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
963 list_add_tail(&mad_send_wr
->agent_list
,
964 &mad_agent_priv
->send_list
);
965 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
967 ret
= ib_send_mad(mad_send_wr
);
969 /* Fail send request */
970 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
971 list_del(&mad_send_wr
->agent_list
);
972 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
973 atomic_dec(&mad_agent_priv
->refcount
);
977 send_wr
= next_send_wr
;
982 *bad_send_wr
= send_wr
;
986 EXPORT_SYMBOL(ib_post_send_mad
);
989 * ib_free_recv_mad - Returns data buffers used to receive
990 * a MAD to the access layer
992 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
994 struct ib_mad_recv_buf
*entry
;
995 struct ib_mad_private_header
*mad_priv_hdr
;
996 struct ib_mad_private
*priv
;
998 mad_priv_hdr
= container_of(mad_recv_wc
,
999 struct ib_mad_private_header
,
1001 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1004 * Walk receive buffer list associated with this WC
1005 * No need to remove them from list of receive buffers
1007 list_for_each_entry(entry
, &mad_recv_wc
->recv_buf
.list
, list
) {
1008 /* Free previous receive buffer */
1009 kmem_cache_free(ib_mad_cache
, priv
);
1010 mad_priv_hdr
= container_of(mad_recv_wc
,
1011 struct ib_mad_private_header
,
1013 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1017 /* Free last buffer */
1018 kmem_cache_free(ib_mad_cache
, priv
);
1020 EXPORT_SYMBOL(ib_free_recv_mad
);
1022 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1024 ib_mad_send_handler send_handler
,
1025 ib_mad_recv_handler recv_handler
,
1028 return ERR_PTR(-EINVAL
); /* XXX: for now */
1030 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1032 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1035 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
1038 EXPORT_SYMBOL(ib_process_mad_wc
);
1040 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1041 struct ib_mad_reg_req
*mad_reg_req
)
1045 for (i
= find_first_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
);
1046 i
< IB_MGMT_MAX_METHODS
;
1047 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1049 if ((*method
)->agent
[i
]) {
1050 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
1057 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1059 /* Allocate management method table */
1060 *method
= kmalloc(sizeof **method
, GFP_ATOMIC
);
1062 printk(KERN_ERR PFX
"No memory for "
1063 "ib_mad_mgmt_method_table\n");
1066 /* Clear management method table */
1067 memset(*method
, 0, sizeof **method
);
1073 * Check to see if there are any methods still in use
1075 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1079 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1080 if (method
->agent
[i
])
1086 * Check to see if there are any method tables for this class still in use
1088 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1092 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1093 if (class->method_table
[i
])
1098 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1102 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1103 if (vendor_class
->method_table
[i
])
1108 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1113 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1114 /* Is there matching OUI for this vendor class ? */
1115 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1121 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1125 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1126 if (vendor
->vendor_class
[i
])
1132 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1133 struct ib_mad_agent_private
*agent
)
1137 /* Remove any methods for this mad agent */
1138 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1139 if (method
->agent
[i
] == agent
) {
1140 method
->agent
[i
] = NULL
;
1145 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1146 struct ib_mad_agent_private
*agent_priv
,
1149 struct ib_mad_port_private
*port_priv
;
1150 struct ib_mad_mgmt_class_table
**class;
1151 struct ib_mad_mgmt_method_table
**method
;
1154 port_priv
= agent_priv
->qp_info
->port_priv
;
1155 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1157 /* Allocate management class table for "new" class version */
1158 *class = kmalloc(sizeof **class, GFP_ATOMIC
);
1160 printk(KERN_ERR PFX
"No memory for "
1161 "ib_mad_mgmt_class_table\n");
1165 /* Clear management class table */
1166 memset(*class, 0, sizeof(**class));
1167 /* Allocate method table for this management class */
1168 method
= &(*class)->method_table
[mgmt_class
];
1169 if ((ret
= allocate_method_table(method
)))
1172 method
= &(*class)->method_table
[mgmt_class
];
1174 /* Allocate method table for this management class */
1175 if ((ret
= allocate_method_table(method
)))
1180 /* Now, make sure methods are not already in use */
1181 if (method_in_use(method
, mad_reg_req
))
1184 /* Finally, add in methods being registered */
1185 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1186 IB_MGMT_MAX_METHODS
);
1187 i
< IB_MGMT_MAX_METHODS
;
1188 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1190 (*method
)->agent
[i
] = agent_priv
;
1195 /* Remove any methods for this mad agent */
1196 remove_methods_mad_agent(*method
, agent_priv
);
1197 /* Now, check to see if there are any methods in use */
1198 if (!check_method_table(*method
)) {
1199 /* If not, release management method table */
1212 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1213 struct ib_mad_agent_private
*agent_priv
)
1215 struct ib_mad_port_private
*port_priv
;
1216 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1217 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1218 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1219 struct ib_mad_mgmt_method_table
**method
;
1220 int i
, ret
= -ENOMEM
;
1223 /* "New" vendor (with OUI) class */
1224 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1225 port_priv
= agent_priv
->qp_info
->port_priv
;
1226 vendor_table
= &port_priv
->version
[
1227 mad_reg_req
->mgmt_class_version
].vendor
;
1228 if (!*vendor_table
) {
1229 /* Allocate mgmt vendor class table for "new" class version */
1230 vendor
= kmalloc(sizeof *vendor
, GFP_ATOMIC
);
1232 printk(KERN_ERR PFX
"No memory for "
1233 "ib_mad_mgmt_vendor_class_table\n");
1236 /* Clear management vendor class table */
1237 memset(vendor
, 0, sizeof(*vendor
));
1238 *vendor_table
= vendor
;
1240 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1241 /* Allocate table for this management vendor class */
1242 vendor_class
= kmalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1243 if (!vendor_class
) {
1244 printk(KERN_ERR PFX
"No memory for "
1245 "ib_mad_mgmt_vendor_class\n");
1248 memset(vendor_class
, 0, sizeof(*vendor_class
));
1249 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1251 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1252 /* Is there matching OUI for this vendor class ? */
1253 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1254 mad_reg_req
->oui
, 3)) {
1255 method
= &(*vendor_table
)->vendor_class
[
1256 vclass
]->method_table
[i
];
1261 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1262 /* OUI slot available ? */
1263 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1265 method
= &(*vendor_table
)->vendor_class
[
1266 vclass
]->method_table
[i
];
1268 /* Allocate method table for this OUI */
1269 if ((ret
= allocate_method_table(method
)))
1271 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1272 mad_reg_req
->oui
, 3);
1276 printk(KERN_ERR PFX
"All OUI slots in use\n");
1280 /* Now, make sure methods are not already in use */
1281 if (method_in_use(method
, mad_reg_req
))
1284 /* Finally, add in methods being registered */
1285 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1286 IB_MGMT_MAX_METHODS
);
1287 i
< IB_MGMT_MAX_METHODS
;
1288 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1290 (*method
)->agent
[i
] = agent_priv
;
1295 /* Remove any methods for this mad agent */
1296 remove_methods_mad_agent(*method
, agent_priv
);
1297 /* Now, check to see if there are any methods in use */
1298 if (!check_method_table(*method
)) {
1299 /* If not, release management method table */
1306 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1307 kfree(vendor_class
);
1311 *vendor_table
= NULL
;
1318 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1320 struct ib_mad_port_private
*port_priv
;
1321 struct ib_mad_mgmt_class_table
*class;
1322 struct ib_mad_mgmt_method_table
*method
;
1323 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1324 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1329 * Was MAD registration request supplied
1330 * with original registration ?
1332 if (!agent_priv
->reg_req
) {
1336 port_priv
= agent_priv
->qp_info
->port_priv
;
1337 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1338 class = port_priv
->version
[
1339 agent_priv
->reg_req
->mgmt_class_version
].class;
1343 method
= class->method_table
[mgmt_class
];
1345 /* Remove any methods for this mad agent */
1346 remove_methods_mad_agent(method
, agent_priv
);
1347 /* Now, check to see if there are any methods still in use */
1348 if (!check_method_table(method
)) {
1349 /* If not, release management method table */
1351 class->method_table
[mgmt_class
] = NULL
;
1352 /* Any management classes left ? */
1353 if (!check_class_table(class)) {
1354 /* If not, release management class table */
1357 agent_priv
->reg_req
->
1358 mgmt_class_version
].class = NULL
;
1364 if (!is_vendor_class(mgmt_class
))
1367 /* normalize mgmt_class to vendor range 2 */
1368 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1369 vendor
= port_priv
->version
[
1370 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1375 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1377 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1380 method
= vendor_class
->method_table
[index
];
1382 /* Remove any methods for this mad agent */
1383 remove_methods_mad_agent(method
, agent_priv
);
1385 * Now, check to see if there are
1386 * any methods still in use
1388 if (!check_method_table(method
)) {
1389 /* If not, release management method table */
1391 vendor_class
->method_table
[index
] = NULL
;
1392 memset(vendor_class
->oui
[index
], 0, 3);
1393 /* Any OUIs left ? */
1394 if (!check_vendor_class(vendor_class
)) {
1395 /* If not, release vendor class table */
1396 kfree(vendor_class
);
1397 vendor
->vendor_class
[mgmt_class
] = NULL
;
1398 /* Any other vendor classes left ? */
1399 if (!check_vendor_table(vendor
)) {
1402 agent_priv
->reg_req
->
1403 mgmt_class_version
].
1415 static struct ib_mad_agent_private
*
1416 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1419 struct ib_mad_agent_private
*mad_agent
= NULL
;
1420 unsigned long flags
;
1422 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1423 if (response_mad(mad
)) {
1425 struct ib_mad_agent_private
*entry
;
1428 * Routing is based on high 32 bits of transaction ID
1431 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1432 list_for_each_entry(entry
, &port_priv
->agent_list
,
1434 if (entry
->agent
.hi_tid
== hi_tid
) {
1440 struct ib_mad_mgmt_class_table
*class;
1441 struct ib_mad_mgmt_method_table
*method
;
1442 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1443 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1444 struct ib_vendor_mad
*vendor_mad
;
1448 * Routing is based on version, class, and method
1449 * For "newer" vendor MADs, also based on OUI
1451 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1453 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1454 class = port_priv
->version
[
1455 mad
->mad_hdr
.class_version
].class;
1458 method
= class->method_table
[convert_mgmt_class(
1459 mad
->mad_hdr
.mgmt_class
)];
1461 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1462 ~IB_MGMT_METHOD_RESP
];
1464 vendor
= port_priv
->version
[
1465 mad
->mad_hdr
.class_version
].vendor
;
1468 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1469 mad
->mad_hdr
.mgmt_class
)];
1472 /* Find matching OUI */
1473 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1474 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1477 method
= vendor_class
->method_table
[index
];
1479 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1480 ~IB_MGMT_METHOD_RESP
];
1486 if (mad_agent
->agent
.recv_handler
)
1487 atomic_inc(&mad_agent
->refcount
);
1489 printk(KERN_NOTICE PFX
"No receive handler for client "
1491 &mad_agent
->agent
, port_priv
->port_num
);
1496 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1501 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1505 /* Make sure MAD base version is understood */
1506 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1507 printk(KERN_ERR PFX
"MAD received with unsupported base "
1508 "version %d\n", mad
->mad_hdr
.base_version
);
1512 /* Filter SMI packets sent to other than QP0 */
1513 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1514 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1518 /* Filter GSI packets sent to QP0 */
1527 static struct ib_mad_send_wr_private
*
1528 find_send_req(struct ib_mad_agent_private
*mad_agent_priv
,
1531 struct ib_mad_send_wr_private
*mad_send_wr
;
1533 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
1535 if (mad_send_wr
->tid
== tid
)
1540 * It's possible to receive the response before we've
1541 * been notified that the send has completed
1543 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
1545 if (mad_send_wr
->tid
== tid
&& mad_send_wr
->timeout
) {
1546 /* Verify request has not been canceled */
1547 return (mad_send_wr
->status
== IB_WC_SUCCESS
) ?
1554 static void ib_mark_req_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1556 mad_send_wr
->timeout
= 0;
1557 if (mad_send_wr
->refcount
== 1) {
1558 list_del(&mad_send_wr
->agent_list
);
1559 list_add_tail(&mad_send_wr
->agent_list
,
1560 &mad_send_wr
->mad_agent_priv
->done_list
);
1564 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1565 struct ib_mad_recv_wc
*mad_recv_wc
)
1567 struct ib_mad_send_wr_private
*mad_send_wr
;
1568 struct ib_mad_send_wc mad_send_wc
;
1569 unsigned long flags
;
1572 INIT_LIST_HEAD(&mad_recv_wc
->recv_buf
.list
);
1573 /* Complete corresponding request */
1574 if (response_mad(mad_recv_wc
->recv_buf
.mad
)) {
1575 tid
= mad_recv_wc
->recv_buf
.mad
->mad_hdr
.tid
;
1576 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1577 mad_send_wr
= find_send_req(mad_agent_priv
, tid
);
1579 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1580 ib_free_recv_mad(mad_recv_wc
);
1581 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1582 wake_up(&mad_agent_priv
->wait
);
1585 ib_mark_req_done(mad_send_wr
);
1586 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1588 /* Defined behavior is to complete response before request */
1589 mad_recv_wc
->wc
->wr_id
= mad_send_wr
->wr_id
;
1590 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1592 atomic_dec(&mad_agent_priv
->refcount
);
1594 mad_send_wc
.status
= IB_WC_SUCCESS
;
1595 mad_send_wc
.vendor_err
= 0;
1596 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
1597 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1599 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1601 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1602 wake_up(&mad_agent_priv
->wait
);
1606 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1609 struct ib_mad_qp_info
*qp_info
;
1610 struct ib_mad_private_header
*mad_priv_hdr
;
1611 struct ib_mad_private
*recv
, *response
;
1612 struct ib_mad_list_head
*mad_list
;
1613 struct ib_mad_agent_private
*mad_agent
;
1615 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1617 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1618 "for response buffer\n");
1620 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1621 qp_info
= mad_list
->mad_queue
->qp_info
;
1622 dequeue_mad(mad_list
);
1624 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1626 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1627 dma_unmap_single(port_priv
->device
->dma_device
,
1628 pci_unmap_addr(&recv
->header
, mapping
),
1629 sizeof(struct ib_mad_private
) -
1630 sizeof(struct ib_mad_private_header
),
1633 /* Setup MAD receive work completion from "normal" work completion */
1634 recv
->header
.wc
= *wc
;
1635 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
1636 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1637 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1638 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1640 if (atomic_read(&qp_info
->snoop_count
))
1641 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1644 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1647 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1648 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1649 if (!smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1650 port_priv
->device
->node_type
,
1651 port_priv
->port_num
,
1652 port_priv
->device
->phys_port_cnt
))
1654 if (!smi_check_forward_dr_smp(&recv
->mad
.smp
))
1656 if (!smi_handle_dr_smp_send(&recv
->mad
.smp
,
1657 port_priv
->device
->node_type
,
1658 port_priv
->port_num
))
1660 if (!smi_check_local_dr_smp(&recv
->mad
.smp
,
1662 port_priv
->port_num
))
1667 /* Give driver "right of first refusal" on incoming MAD */
1668 if (port_priv
->device
->process_mad
) {
1672 printk(KERN_ERR PFX
"No memory for response MAD\n");
1674 * Is it better to assume that
1675 * it wouldn't be processed ?
1680 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1681 port_priv
->port_num
,
1684 &response
->mad
.mad
);
1685 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1686 if (ret
& IB_MAD_RESULT_CONSUMED
)
1688 if (ret
& IB_MAD_RESULT_REPLY
) {
1690 if (!agent_send(response
, &recv
->grh
, wc
,
1692 port_priv
->port_num
))
1699 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
);
1701 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
1703 * recv is freed up in error cases in ib_mad_complete_recv
1704 * or via recv_handler in ib_mad_complete_recv()
1710 /* Post another receive request for this QP */
1712 ib_mad_post_receive_mads(qp_info
, response
);
1714 kmem_cache_free(ib_mad_cache
, recv
);
1716 ib_mad_post_receive_mads(qp_info
, recv
);
1719 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1721 struct ib_mad_send_wr_private
*mad_send_wr
;
1722 unsigned long delay
;
1724 if (list_empty(&mad_agent_priv
->wait_list
)) {
1725 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1727 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1728 struct ib_mad_send_wr_private
,
1731 if (time_after(mad_agent_priv
->timeout
,
1732 mad_send_wr
->timeout
)) {
1733 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1734 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1735 delay
= mad_send_wr
->timeout
- jiffies
;
1736 if ((long)delay
<= 0)
1738 queue_delayed_work(mad_agent_priv
->qp_info
->
1740 &mad_agent_priv
->timed_work
, delay
);
1745 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
1747 struct ib_mad_agent_private
*mad_agent_priv
;
1748 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1749 struct list_head
*list_item
;
1750 unsigned long delay
;
1752 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1753 list_del(&mad_send_wr
->agent_list
);
1755 delay
= mad_send_wr
->timeout
;
1756 mad_send_wr
->timeout
+= jiffies
;
1759 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
1760 temp_mad_send_wr
= list_entry(list_item
,
1761 struct ib_mad_send_wr_private
,
1763 if (time_after(mad_send_wr
->timeout
,
1764 temp_mad_send_wr
->timeout
))
1769 list_item
= &mad_agent_priv
->wait_list
;
1770 list_add(&mad_send_wr
->agent_list
, list_item
);
1772 /* Reschedule a work item if we have a shorter timeout */
1773 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
1774 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1775 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
1776 &mad_agent_priv
->timed_work
, delay
);
1780 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
1783 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
1784 wait_for_response(mad_send_wr
);
1788 * Process a send work completion
1790 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
1791 struct ib_mad_send_wc
*mad_send_wc
)
1793 struct ib_mad_agent_private
*mad_agent_priv
;
1794 unsigned long flags
;
1796 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1797 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1798 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
1799 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1800 mad_send_wr
->status
= mad_send_wc
->status
;
1801 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
1804 if (--mad_send_wr
->refcount
> 0) {
1805 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
1806 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1807 wait_for_response(mad_send_wr
);
1809 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1813 /* Remove send from MAD agent and notify client of completion */
1814 list_del(&mad_send_wr
->agent_list
);
1815 adjust_timeout(mad_agent_priv
);
1816 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1818 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
1819 mad_send_wc
->status
= mad_send_wr
->status
;
1820 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
1823 /* Release reference on agent taken when sending */
1824 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1825 wake_up(&mad_agent_priv
->wait
);
1830 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
1833 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
1834 struct ib_mad_list_head
*mad_list
;
1835 struct ib_mad_qp_info
*qp_info
;
1836 struct ib_mad_queue
*send_queue
;
1837 struct ib_send_wr
*bad_send_wr
;
1838 unsigned long flags
;
1841 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1842 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1844 send_queue
= mad_list
->mad_queue
;
1845 qp_info
= send_queue
->qp_info
;
1848 queued_send_wr
= NULL
;
1849 spin_lock_irqsave(&send_queue
->lock
, flags
);
1850 list_del(&mad_list
->list
);
1852 /* Move queued send to the send queue */
1853 if (send_queue
->count
-- > send_queue
->max_active
) {
1854 mad_list
= container_of(qp_info
->overflow_list
.next
,
1855 struct ib_mad_list_head
, list
);
1856 queued_send_wr
= container_of(mad_list
,
1857 struct ib_mad_send_wr_private
,
1859 list_del(&mad_list
->list
);
1860 list_add_tail(&mad_list
->list
, &send_queue
->list
);
1862 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
1864 /* Restore client wr_id in WC and complete send */
1865 wc
->wr_id
= mad_send_wr
->wr_id
;
1866 if (atomic_read(&qp_info
->snoop_count
))
1867 snoop_send(qp_info
, &mad_send_wr
->send_wr
,
1868 (struct ib_mad_send_wc
*)wc
,
1869 IB_MAD_SNOOP_SEND_COMPLETIONS
);
1870 ib_mad_complete_send_wr(mad_send_wr
, (struct ib_mad_send_wc
*)wc
);
1872 if (queued_send_wr
) {
1873 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
1876 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
1877 mad_send_wr
= queued_send_wr
;
1878 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
1884 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
1886 struct ib_mad_send_wr_private
*mad_send_wr
;
1887 struct ib_mad_list_head
*mad_list
;
1888 unsigned long flags
;
1890 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1891 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
1892 mad_send_wr
= container_of(mad_list
,
1893 struct ib_mad_send_wr_private
,
1895 mad_send_wr
->retry
= 1;
1897 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1900 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
1903 struct ib_mad_list_head
*mad_list
;
1904 struct ib_mad_qp_info
*qp_info
;
1905 struct ib_mad_send_wr_private
*mad_send_wr
;
1908 /* Determine if failure was a send or receive */
1909 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1910 qp_info
= mad_list
->mad_queue
->qp_info
;
1911 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
1913 * Receive errors indicate that the QP has entered the error
1914 * state - error handling/shutdown code will cleanup
1919 * Send errors will transition the QP to SQE - move
1920 * QP to RTS and repost flushed work requests
1922 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1924 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
1925 if (mad_send_wr
->retry
) {
1927 struct ib_send_wr
*bad_send_wr
;
1929 mad_send_wr
->retry
= 0;
1930 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
1933 ib_mad_send_done_handler(port_priv
, wc
);
1935 ib_mad_send_done_handler(port_priv
, wc
);
1937 struct ib_qp_attr
*attr
;
1939 /* Transition QP to RTS and fail offending send */
1940 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1942 attr
->qp_state
= IB_QPS_RTS
;
1943 attr
->cur_qp_state
= IB_QPS_SQE
;
1944 ret
= ib_modify_qp(qp_info
->qp
, attr
,
1945 IB_QP_STATE
| IB_QP_CUR_STATE
);
1948 printk(KERN_ERR PFX
"mad_error_handler - "
1949 "ib_modify_qp to RTS : %d\n", ret
);
1951 mark_sends_for_retry(qp_info
);
1953 ib_mad_send_done_handler(port_priv
, wc
);
1958 * IB MAD completion callback
1960 static void ib_mad_completion_handler(void *data
)
1962 struct ib_mad_port_private
*port_priv
;
1965 port_priv
= (struct ib_mad_port_private
*)data
;
1966 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
1968 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
1969 if (wc
.status
== IB_WC_SUCCESS
) {
1970 switch (wc
.opcode
) {
1972 ib_mad_send_done_handler(port_priv
, &wc
);
1975 ib_mad_recv_done_handler(port_priv
, &wc
);
1982 mad_error_handler(port_priv
, &wc
);
1986 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
1988 unsigned long flags
;
1989 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
1990 struct ib_mad_send_wc mad_send_wc
;
1991 struct list_head cancel_list
;
1993 INIT_LIST_HEAD(&cancel_list
);
1995 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1996 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
1997 &mad_agent_priv
->send_list
, agent_list
) {
1998 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
1999 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2000 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2004 /* Empty wait list to prevent receives from finding a request */
2005 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2006 /* Empty local completion list as well */
2007 list_splice_init(&mad_agent_priv
->local_list
, &cancel_list
);
2008 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2010 /* Report all cancelled requests */
2011 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2012 mad_send_wc
.vendor_err
= 0;
2014 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2015 &cancel_list
, agent_list
) {
2016 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
2017 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2020 list_del(&mad_send_wr
->agent_list
);
2022 atomic_dec(&mad_agent_priv
->refcount
);
2026 static struct ib_mad_send_wr_private
*
2027 find_send_by_wr_id(struct ib_mad_agent_private
*mad_agent_priv
, u64 wr_id
)
2029 struct ib_mad_send_wr_private
*mad_send_wr
;
2031 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2033 if (mad_send_wr
->wr_id
== wr_id
)
2037 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2039 if (mad_send_wr
->wr_id
== wr_id
)
2045 int ib_modify_mad(struct ib_mad_agent
*mad_agent
, u64 wr_id
, u32 timeout_ms
)
2047 struct ib_mad_agent_private
*mad_agent_priv
;
2048 struct ib_mad_send_wr_private
*mad_send_wr
;
2049 unsigned long flags
;
2052 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2054 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2055 mad_send_wr
= find_send_by_wr_id(mad_agent_priv
, wr_id
);
2056 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2057 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2061 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2063 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2064 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2067 mad_send_wr
->send_wr
.wr
.ud
.timeout_ms
= timeout_ms
;
2069 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2071 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2073 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2076 EXPORT_SYMBOL(ib_modify_mad
);
2078 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
, u64 wr_id
)
2080 ib_modify_mad(mad_agent
, wr_id
, 0);
2082 EXPORT_SYMBOL(ib_cancel_mad
);
2084 static void local_completions(void *data
)
2086 struct ib_mad_agent_private
*mad_agent_priv
;
2087 struct ib_mad_local_private
*local
;
2088 struct ib_mad_agent_private
*recv_mad_agent
;
2089 unsigned long flags
;
2092 struct ib_mad_send_wc mad_send_wc
;
2094 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2096 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2097 while (!list_empty(&mad_agent_priv
->local_list
)) {
2098 local
= list_entry(mad_agent_priv
->local_list
.next
,
2099 struct ib_mad_local_private
,
2101 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2102 if (local
->mad_priv
) {
2103 recv_mad_agent
= local
->recv_mad_agent
;
2104 if (!recv_mad_agent
) {
2105 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2106 goto local_send_completion
;
2111 * Defined behavior is to complete response
2114 build_smp_wc(local
->wr_id
, IB_LID_PERMISSIVE
,
2116 recv_mad_agent
->agent
.port_num
, &wc
);
2118 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2119 local
->mad_priv
->header
.recv_wc
.mad_len
=
2120 sizeof(struct ib_mad
);
2121 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
);
2122 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2123 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2124 &local
->mad_priv
->mad
.mad
;
2125 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2126 snoop_recv(recv_mad_agent
->qp_info
,
2127 &local
->mad_priv
->header
.recv_wc
,
2128 IB_MAD_SNOOP_RECVS
);
2129 recv_mad_agent
->agent
.recv_handler(
2130 &recv_mad_agent
->agent
,
2131 &local
->mad_priv
->header
.recv_wc
);
2132 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2133 atomic_dec(&recv_mad_agent
->refcount
);
2134 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2137 local_send_completion
:
2139 mad_send_wc
.status
= IB_WC_SUCCESS
;
2140 mad_send_wc
.vendor_err
= 0;
2141 mad_send_wc
.wr_id
= local
->wr_id
;
2142 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2143 snoop_send(mad_agent_priv
->qp_info
, &local
->send_wr
,
2145 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2146 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2149 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2150 list_del(&local
->completion_list
);
2151 atomic_dec(&mad_agent_priv
->refcount
);
2153 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2156 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2159 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2163 if (!mad_send_wr
->retries
--)
2166 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_wr
.
2169 ret
= ib_send_mad(mad_send_wr
);
2172 mad_send_wr
->refcount
++;
2173 list_add_tail(&mad_send_wr
->agent_list
,
2174 &mad_send_wr
->mad_agent_priv
->send_list
);
2179 static void timeout_sends(void *data
)
2181 struct ib_mad_agent_private
*mad_agent_priv
;
2182 struct ib_mad_send_wr_private
*mad_send_wr
;
2183 struct ib_mad_send_wc mad_send_wc
;
2184 unsigned long flags
, delay
;
2186 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2187 mad_send_wc
.vendor_err
= 0;
2189 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2190 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2191 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2192 struct ib_mad_send_wr_private
,
2195 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2196 delay
= mad_send_wr
->timeout
- jiffies
;
2197 if ((long)delay
<= 0)
2199 queue_delayed_work(mad_agent_priv
->qp_info
->
2201 &mad_agent_priv
->timed_work
, delay
);
2205 list_del(&mad_send_wr
->agent_list
);
2206 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2207 !retry_send(mad_send_wr
))
2210 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2212 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2213 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2215 mad_send_wc
.status
= mad_send_wr
->status
;
2216 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
2217 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2221 atomic_dec(&mad_agent_priv
->refcount
);
2222 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2224 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2227 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
)
2229 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2231 queue_work(port_priv
->wq
, &port_priv
->work
);
2235 * Allocate receive MADs and post receive WRs for them
2237 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2238 struct ib_mad_private
*mad
)
2240 unsigned long flags
;
2242 struct ib_mad_private
*mad_priv
;
2243 struct ib_sge sg_list
;
2244 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2245 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2247 /* Initialize common scatter list fields */
2248 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2249 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2251 /* Initialize common receive WR fields */
2252 recv_wr
.next
= NULL
;
2253 recv_wr
.sg_list
= &sg_list
;
2254 recv_wr
.num_sge
= 1;
2257 /* Allocate and map receive buffer */
2262 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2264 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2269 sg_list
.addr
= dma_map_single(qp_info
->port_priv
->
2273 sizeof mad_priv
->header
,
2275 pci_unmap_addr_set(&mad_priv
->header
, mapping
, sg_list
.addr
);
2276 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2277 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2279 /* Post receive WR */
2280 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2281 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2282 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2283 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2284 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2286 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2287 list_del(&mad_priv
->header
.mad_list
.list
);
2288 recv_queue
->count
--;
2289 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2290 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2291 pci_unmap_addr(&mad_priv
->header
,
2294 sizeof mad_priv
->header
,
2296 kmem_cache_free(ib_mad_cache
, mad_priv
);
2297 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2306 * Return all the posted receive MADs
2308 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2310 struct ib_mad_private_header
*mad_priv_hdr
;
2311 struct ib_mad_private
*recv
;
2312 struct ib_mad_list_head
*mad_list
;
2314 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2316 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2317 struct ib_mad_list_head
, list
);
2318 mad_priv_hdr
= container_of(mad_list
,
2319 struct ib_mad_private_header
,
2321 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2324 /* Remove from posted receive MAD list */
2325 list_del(&mad_list
->list
);
2327 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2328 pci_unmap_addr(&recv
->header
, mapping
),
2329 sizeof(struct ib_mad_private
) -
2330 sizeof(struct ib_mad_private_header
),
2332 kmem_cache_free(ib_mad_cache
, recv
);
2335 qp_info
->recv_queue
.count
= 0;
2341 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2344 struct ib_qp_attr
*attr
;
2347 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2349 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2353 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2354 qp
= port_priv
->qp_info
[i
].qp
;
2356 * PKey index for QP1 is irrelevant but
2357 * one is needed for the Reset to Init transition
2359 attr
->qp_state
= IB_QPS_INIT
;
2360 attr
->pkey_index
= 0;
2361 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2362 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2363 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2365 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2366 "INIT: %d\n", i
, ret
);
2370 attr
->qp_state
= IB_QPS_RTR
;
2371 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2373 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2374 "RTR: %d\n", i
, ret
);
2378 attr
->qp_state
= IB_QPS_RTS
;
2379 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2380 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2382 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2383 "RTS: %d\n", i
, ret
);
2388 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2390 printk(KERN_ERR PFX
"Failed to request completion "
2391 "notification: %d\n", ret
);
2395 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2396 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2398 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2407 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2409 struct ib_mad_qp_info
*qp_info
= qp_context
;
2411 /* It's worse than that! He's dead, Jim! */
2412 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2413 event
->event
, qp_info
->qp
->qp_num
);
2416 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2417 struct ib_mad_queue
*mad_queue
)
2419 mad_queue
->qp_info
= qp_info
;
2420 mad_queue
->count
= 0;
2421 spin_lock_init(&mad_queue
->lock
);
2422 INIT_LIST_HEAD(&mad_queue
->list
);
2425 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2426 struct ib_mad_qp_info
*qp_info
)
2428 qp_info
->port_priv
= port_priv
;
2429 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2430 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2431 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2432 spin_lock_init(&qp_info
->snoop_lock
);
2433 qp_info
->snoop_table
= NULL
;
2434 qp_info
->snoop_table_size
= 0;
2435 atomic_set(&qp_info
->snoop_count
, 0);
2438 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2439 enum ib_qp_type qp_type
)
2441 struct ib_qp_init_attr qp_init_attr
;
2444 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2445 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2446 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2447 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2448 qp_init_attr
.cap
.max_send_wr
= IB_MAD_QP_SEND_SIZE
;
2449 qp_init_attr
.cap
.max_recv_wr
= IB_MAD_QP_RECV_SIZE
;
2450 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2451 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2452 qp_init_attr
.qp_type
= qp_type
;
2453 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2454 qp_init_attr
.qp_context
= qp_info
;
2455 qp_init_attr
.event_handler
= qp_event_handler
;
2456 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2457 if (IS_ERR(qp_info
->qp
)) {
2458 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2459 get_spl_qp_index(qp_type
));
2460 ret
= PTR_ERR(qp_info
->qp
);
2463 /* Use minimum queue sizes unless the CQ is resized */
2464 qp_info
->send_queue
.max_active
= IB_MAD_QP_SEND_SIZE
;
2465 qp_info
->recv_queue
.max_active
= IB_MAD_QP_RECV_SIZE
;
2472 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2474 ib_destroy_qp(qp_info
->qp
);
2475 if (qp_info
->snoop_table
)
2476 kfree(qp_info
->snoop_table
);
2481 * Create the QP, PD, MR, and CQ if needed
2483 static int ib_mad_port_open(struct ib_device
*device
,
2487 struct ib_mad_port_private
*port_priv
;
2488 unsigned long flags
;
2489 char name
[sizeof "ib_mad123"];
2491 /* Create new device info */
2492 port_priv
= kmalloc(sizeof *port_priv
, GFP_KERNEL
);
2494 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2497 memset(port_priv
, 0, sizeof *port_priv
);
2498 port_priv
->device
= device
;
2499 port_priv
->port_num
= port_num
;
2500 spin_lock_init(&port_priv
->reg_lock
);
2501 INIT_LIST_HEAD(&port_priv
->agent_list
);
2502 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2503 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2505 cq_size
= (IB_MAD_QP_SEND_SIZE
+ IB_MAD_QP_RECV_SIZE
) * 2;
2506 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2508 ib_mad_thread_completion_handler
,
2509 NULL
, port_priv
, cq_size
);
2510 if (IS_ERR(port_priv
->cq
)) {
2511 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2512 ret
= PTR_ERR(port_priv
->cq
);
2516 port_priv
->pd
= ib_alloc_pd(device
);
2517 if (IS_ERR(port_priv
->pd
)) {
2518 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2519 ret
= PTR_ERR(port_priv
->pd
);
2523 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2524 if (IS_ERR(port_priv
->mr
)) {
2525 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2526 ret
= PTR_ERR(port_priv
->mr
);
2530 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2533 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2537 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2538 port_priv
->wq
= create_singlethread_workqueue(name
);
2539 if (!port_priv
->wq
) {
2543 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
, port_priv
);
2545 ret
= ib_mad_port_start(port_priv
);
2547 printk(KERN_ERR PFX
"Couldn't start port\n");
2551 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2552 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2553 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2557 destroy_workqueue(port_priv
->wq
);
2559 destroy_mad_qp(&port_priv
->qp_info
[1]);
2561 destroy_mad_qp(&port_priv
->qp_info
[0]);
2563 ib_dereg_mr(port_priv
->mr
);
2565 ib_dealloc_pd(port_priv
->pd
);
2567 ib_destroy_cq(port_priv
->cq
);
2568 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2569 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2578 * If there are no classes using the port, free the port
2579 * resources (CQ, MR, PD, QP) and remove the port's info structure
2581 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2583 struct ib_mad_port_private
*port_priv
;
2584 unsigned long flags
;
2586 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2587 port_priv
= __ib_get_mad_port(device
, port_num
);
2588 if (port_priv
== NULL
) {
2589 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2590 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2593 list_del(&port_priv
->port_list
);
2594 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2596 /* Stop processing completions. */
2597 flush_workqueue(port_priv
->wq
);
2598 destroy_workqueue(port_priv
->wq
);
2599 destroy_mad_qp(&port_priv
->qp_info
[1]);
2600 destroy_mad_qp(&port_priv
->qp_info
[0]);
2601 ib_dereg_mr(port_priv
->mr
);
2602 ib_dealloc_pd(port_priv
->pd
);
2603 ib_destroy_cq(port_priv
->cq
);
2604 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2605 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2606 /* XXX: Handle deallocation of MAD registration tables */
2613 static void ib_mad_init_device(struct ib_device
*device
)
2615 int num_ports
, cur_port
, i
;
2617 if (device
->node_type
== IB_NODE_SWITCH
) {
2621 num_ports
= device
->phys_port_cnt
;
2624 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2625 if (ib_mad_port_open(device
, cur_port
)) {
2626 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2627 device
->name
, cur_port
);
2628 goto error_device_open
;
2630 if (ib_agent_port_open(device
, cur_port
)) {
2631 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2633 device
->name
, cur_port
);
2634 goto error_device_open
;
2642 if (ib_agent_port_close(device
, cur_port
))
2643 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2645 device
->name
, cur_port
);
2646 if (ib_mad_port_close(device
, cur_port
))
2647 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2648 device
->name
, cur_port
);
2653 static void ib_mad_remove_device(struct ib_device
*device
)
2655 int i
, num_ports
, cur_port
;
2657 if (device
->node_type
== IB_NODE_SWITCH
) {
2661 num_ports
= device
->phys_port_cnt
;
2664 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2665 if (ib_agent_port_close(device
, cur_port
))
2666 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2668 device
->name
, cur_port
);
2669 if (ib_mad_port_close(device
, cur_port
))
2670 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2671 device
->name
, cur_port
);
2675 static struct ib_client mad_client
= {
2677 .add
= ib_mad_init_device
,
2678 .remove
= ib_mad_remove_device
2681 static int __init
ib_mad_init_module(void)
2685 spin_lock_init(&ib_mad_port_list_lock
);
2686 spin_lock_init(&ib_agent_port_list_lock
);
2688 ib_mad_cache
= kmem_cache_create("ib_mad",
2689 sizeof(struct ib_mad_private
),
2694 if (!ib_mad_cache
) {
2695 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
2700 INIT_LIST_HEAD(&ib_mad_port_list
);
2702 if (ib_register_client(&mad_client
)) {
2703 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
2711 kmem_cache_destroy(ib_mad_cache
);
2716 static void __exit
ib_mad_cleanup_module(void)
2718 ib_unregister_client(&mad_client
);
2720 if (kmem_cache_destroy(ib_mad_cache
)) {
2721 printk(KERN_DEBUG PFX
"Failed to destroy ib_mad cache\n");
2725 module_init(ib_mad_init_module
);
2726 module_exit(ib_mad_cleanup_module
);