2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <rdma/ib_cache.h>
49 MODULE_LICENSE("Dual BSD/GPL");
50 MODULE_DESCRIPTION("kernel IB MAD API");
51 MODULE_AUTHOR("Hal Rosenstock");
52 MODULE_AUTHOR("Sean Hefty");
54 static int mad_sendq_size
= IB_MAD_QP_SEND_SIZE
;
55 static int mad_recvq_size
= IB_MAD_QP_RECV_SIZE
;
57 module_param_named(send_queue_size
, mad_sendq_size
, int, 0444);
58 MODULE_PARM_DESC(send_queue_size
, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size
, mad_recvq_size
, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size
, "Size of receive queue in number of work requests");
62 static struct kmem_cache
*ib_mad_cache
;
64 static struct list_head ib_mad_port_list
;
65 static u32 ib_mad_client_id
= 0;
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock
);
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
72 struct ib_mad_reg_req
*mad_reg_req
);
73 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
74 static struct ib_mad_agent_private
*find_mad_agent(
75 struct ib_mad_port_private
*port_priv
,
76 const struct ib_mad_hdr
*mad
);
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
78 struct ib_mad_private
*mad
);
79 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
80 static void timeout_sends(struct work_struct
*work
);
81 static void local_completions(struct work_struct
*work
);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
83 struct ib_mad_agent_private
*agent_priv
,
85 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
86 struct ib_mad_agent_private
*agent_priv
);
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
92 static inline struct ib_mad_port_private
*
93 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
95 struct ib_mad_port_private
*entry
;
97 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
98 if (entry
->device
== device
&& entry
->port_num
== port_num
)
105 * Wrapper function to return a ib_mad_port_private structure or NULL
108 static inline struct ib_mad_port_private
*
109 ib_get_mad_port(struct ib_device
*device
, int port_num
)
111 struct ib_mad_port_private
*entry
;
114 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
115 entry
= __ib_get_mad_port(device
, port_num
);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
121 static inline u8
convert_mgmt_class(u8 mgmt_class
)
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
128 static int get_spl_qp_index(enum ib_qp_type qp_type
)
141 static int vendor_class_index(u8 mgmt_class
)
143 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
146 static int is_vendor_class(u8 mgmt_class
)
148 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
149 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
154 static int is_vendor_oui(char *oui
)
156 if (oui
[0] || oui
[1] || oui
[2])
161 static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class
*vendor_class
,
163 struct ib_mad_reg_req
*mad_reg_req
)
165 struct ib_mad_mgmt_method_table
*method
;
168 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
169 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
170 method
= vendor_class
->method_table
[i
];
172 if (method_in_use(&method
, mad_reg_req
))
182 int ib_response_mad(const struct ib_mad_hdr
*hdr
)
184 return ((hdr
->method
& IB_MGMT_METHOD_RESP
) ||
185 (hdr
->method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
186 ((hdr
->mgmt_class
== IB_MGMT_CLASS_BM
) &&
187 (hdr
->attr_mod
& IB_BM_ATTR_MOD_RESP
)));
189 EXPORT_SYMBOL(ib_response_mad
);
192 * ib_register_mad_agent - Register to send/receive MADs
194 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
196 enum ib_qp_type qp_type
,
197 struct ib_mad_reg_req
*mad_reg_req
,
199 ib_mad_send_handler send_handler
,
200 ib_mad_recv_handler recv_handler
,
202 u32 registration_flags
)
204 struct ib_mad_port_private
*port_priv
;
205 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
206 struct ib_mad_agent_private
*mad_agent_priv
;
207 struct ib_mad_reg_req
*reg_req
= NULL
;
208 struct ib_mad_mgmt_class_table
*class;
209 struct ib_mad_mgmt_vendor_class_table
*vendor
;
210 struct ib_mad_mgmt_vendor_class
*vendor_class
;
211 struct ib_mad_mgmt_method_table
*method
;
214 u8 mgmt_class
, vclass
;
216 /* Validate parameters */
217 qpn
= get_spl_qp_index(qp_type
);
219 dev_notice(&device
->dev
,
220 "ib_register_mad_agent: invalid QP Type %d\n",
225 if (rmpp_version
&& rmpp_version
!= IB_MGMT_RMPP_VERSION
) {
226 dev_notice(&device
->dev
,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
232 /* Validate MAD registration request if supplied */
234 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
) {
235 dev_notice(&device
->dev
,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req
->mgmt_class_version
);
241 dev_notice(&device
->dev
,
242 "ib_register_mad_agent: no recv_handler\n");
245 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
250 if (mad_reg_req
->mgmt_class
!=
251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
252 dev_notice(&device
->dev
,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req
->mgmt_class
);
257 } else if (mad_reg_req
->mgmt_class
== 0) {
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
262 dev_notice(&device
->dev
,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
265 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
270 if (!is_vendor_oui(mad_reg_req
->oui
)) {
271 dev_notice(&device
->dev
,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req
->mgmt_class
);
277 /* Make sure class supplied is consistent with RMPP */
278 if (!ib_is_mad_class_rmpp(mad_reg_req
->mgmt_class
)) {
280 dev_notice(&device
->dev
,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req
->mgmt_class
);
287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type
== IB_QPT_SMI
) {
289 if ((mad_reg_req
->mgmt_class
!=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
291 (mad_reg_req
->mgmt_class
!=
292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
293 dev_notice(&device
->dev
,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req
->mgmt_class
);
299 if ((mad_reg_req
->mgmt_class
==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
301 (mad_reg_req
->mgmt_class
==
302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
303 dev_notice(&device
->dev
,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req
->mgmt_class
);
310 /* No registration request supplied */
313 if (registration_flags
& IB_MAD_USER_RMPP
)
317 /* Validate device and port */
318 port_priv
= ib_get_mad_port(device
, port_num
);
320 dev_notice(&device
->dev
, "ib_register_mad_agent: Invalid port\n");
321 ret
= ERR_PTR(-ENODEV
);
325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv
->qp_info
[qpn
].qp
) {
328 dev_notice(&device
->dev
,
329 "ib_register_mad_agent: QP %d not supported\n", qpn
);
330 ret
= ERR_PTR(-EPROTONOSUPPORT
);
334 /* Allocate structures */
335 mad_agent_priv
= kzalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
336 if (!mad_agent_priv
) {
337 ret
= ERR_PTR(-ENOMEM
);
341 mad_agent_priv
->agent
.mr
= ib_get_dma_mr(port_priv
->qp_info
[qpn
].qp
->pd
,
342 IB_ACCESS_LOCAL_WRITE
);
343 if (IS_ERR(mad_agent_priv
->agent
.mr
)) {
344 ret
= ERR_PTR(-ENOMEM
);
349 reg_req
= kmemdup(mad_reg_req
, sizeof *reg_req
, GFP_KERNEL
);
351 ret
= ERR_PTR(-ENOMEM
);
356 /* Now, fill in the various structures */
357 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
358 mad_agent_priv
->reg_req
= reg_req
;
359 mad_agent_priv
->agent
.rmpp_version
= rmpp_version
;
360 mad_agent_priv
->agent
.device
= device
;
361 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
362 mad_agent_priv
->agent
.send_handler
= send_handler
;
363 mad_agent_priv
->agent
.context
= context
;
364 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
365 mad_agent_priv
->agent
.port_num
= port_num
;
366 mad_agent_priv
->agent
.flags
= registration_flags
;
367 spin_lock_init(&mad_agent_priv
->lock
);
368 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
369 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
370 INIT_LIST_HEAD(&mad_agent_priv
->done_list
);
371 INIT_LIST_HEAD(&mad_agent_priv
->rmpp_list
);
372 INIT_DELAYED_WORK(&mad_agent_priv
->timed_work
, timeout_sends
);
373 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
374 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
);
375 atomic_set(&mad_agent_priv
->refcount
, 1);
376 init_completion(&mad_agent_priv
->comp
);
378 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
379 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
382 * Make sure MAD registration (if supplied)
383 * is non overlapping with any existing ones
386 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
387 if (!is_vendor_class(mgmt_class
)) {
388 class = port_priv
->version
[mad_reg_req
->
389 mgmt_class_version
].class;
391 method
= class->method_table
[mgmt_class
];
393 if (method_in_use(&method
,
398 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
401 /* "New" vendor class range */
402 vendor
= port_priv
->version
[mad_reg_req
->
403 mgmt_class_version
].vendor
;
405 vclass
= vendor_class_index(mgmt_class
);
406 vendor_class
= vendor
->vendor_class
[vclass
];
408 if (is_vendor_method_in_use(
414 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
422 /* Add mad agent into port's agent list */
423 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
424 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
426 return &mad_agent_priv
->agent
;
429 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
432 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
434 kfree(mad_agent_priv
);
438 EXPORT_SYMBOL(ib_register_mad_agent
);
440 static inline int is_snooping_sends(int mad_snoop_flags
)
442 return (mad_snoop_flags
&
443 (/*IB_MAD_SNOOP_POSTED_SENDS |
444 IB_MAD_SNOOP_RMPP_SENDS |*/
445 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
449 static inline int is_snooping_recvs(int mad_snoop_flags
)
451 return (mad_snoop_flags
&
452 (IB_MAD_SNOOP_RECVS
/*|
453 IB_MAD_SNOOP_RMPP_RECVS*/));
456 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
457 struct ib_mad_snoop_private
*mad_snoop_priv
)
459 struct ib_mad_snoop_private
**new_snoop_table
;
463 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
464 /* Check for empty slot in array. */
465 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
466 if (!qp_info
->snoop_table
[i
])
469 if (i
== qp_info
->snoop_table_size
) {
471 new_snoop_table
= krealloc(qp_info
->snoop_table
,
472 sizeof mad_snoop_priv
*
473 (qp_info
->snoop_table_size
+ 1),
475 if (!new_snoop_table
) {
480 qp_info
->snoop_table
= new_snoop_table
;
481 qp_info
->snoop_table_size
++;
483 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
484 atomic_inc(&qp_info
->snoop_count
);
486 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
490 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
492 enum ib_qp_type qp_type
,
494 ib_mad_snoop_handler snoop_handler
,
495 ib_mad_recv_handler recv_handler
,
498 struct ib_mad_port_private
*port_priv
;
499 struct ib_mad_agent
*ret
;
500 struct ib_mad_snoop_private
*mad_snoop_priv
;
503 /* Validate parameters */
504 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
505 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
506 ret
= ERR_PTR(-EINVAL
);
509 qpn
= get_spl_qp_index(qp_type
);
511 ret
= ERR_PTR(-EINVAL
);
514 port_priv
= ib_get_mad_port(device
, port_num
);
516 ret
= ERR_PTR(-ENODEV
);
519 /* Allocate structures */
520 mad_snoop_priv
= kzalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
521 if (!mad_snoop_priv
) {
522 ret
= ERR_PTR(-ENOMEM
);
526 /* Now, fill in the various structures */
527 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
528 mad_snoop_priv
->agent
.device
= device
;
529 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
530 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
531 mad_snoop_priv
->agent
.context
= context
;
532 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
533 mad_snoop_priv
->agent
.port_num
= port_num
;
534 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
535 init_completion(&mad_snoop_priv
->comp
);
536 mad_snoop_priv
->snoop_index
= register_snoop_agent(
537 &port_priv
->qp_info
[qpn
],
539 if (mad_snoop_priv
->snoop_index
< 0) {
540 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
544 atomic_set(&mad_snoop_priv
->refcount
, 1);
545 return &mad_snoop_priv
->agent
;
548 kfree(mad_snoop_priv
);
552 EXPORT_SYMBOL(ib_register_mad_snoop
);
554 static inline void deref_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
556 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
557 complete(&mad_agent_priv
->comp
);
560 static inline void deref_snoop_agent(struct ib_mad_snoop_private
*mad_snoop_priv
)
562 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
563 complete(&mad_snoop_priv
->comp
);
566 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
568 struct ib_mad_port_private
*port_priv
;
571 /* Note that we could still be handling received MADs */
574 * Canceling all sends results in dropping received response
575 * MADs, preventing us from queuing additional work
577 cancel_mads(mad_agent_priv
);
578 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
579 cancel_delayed_work(&mad_agent_priv
->timed_work
);
581 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
582 remove_mad_reg_req(mad_agent_priv
);
583 list_del(&mad_agent_priv
->agent_list
);
584 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
586 flush_workqueue(port_priv
->wq
);
587 ib_cancel_rmpp_recvs(mad_agent_priv
);
589 deref_mad_agent(mad_agent_priv
);
590 wait_for_completion(&mad_agent_priv
->comp
);
592 kfree(mad_agent_priv
->reg_req
);
593 ib_dereg_mr(mad_agent_priv
->agent
.mr
);
594 kfree(mad_agent_priv
);
597 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
599 struct ib_mad_qp_info
*qp_info
;
602 qp_info
= mad_snoop_priv
->qp_info
;
603 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
604 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
605 atomic_dec(&qp_info
->snoop_count
);
606 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
608 deref_snoop_agent(mad_snoop_priv
);
609 wait_for_completion(&mad_snoop_priv
->comp
);
611 kfree(mad_snoop_priv
);
615 * ib_unregister_mad_agent - Unregisters a client from using MAD services
617 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
619 struct ib_mad_agent_private
*mad_agent_priv
;
620 struct ib_mad_snoop_private
*mad_snoop_priv
;
622 /* If the TID is zero, the agent can only snoop. */
623 if (mad_agent
->hi_tid
) {
624 mad_agent_priv
= container_of(mad_agent
,
625 struct ib_mad_agent_private
,
627 unregister_mad_agent(mad_agent_priv
);
629 mad_snoop_priv
= container_of(mad_agent
,
630 struct ib_mad_snoop_private
,
632 unregister_mad_snoop(mad_snoop_priv
);
636 EXPORT_SYMBOL(ib_unregister_mad_agent
);
638 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
640 struct ib_mad_queue
*mad_queue
;
643 BUG_ON(!mad_list
->mad_queue
);
644 mad_queue
= mad_list
->mad_queue
;
645 spin_lock_irqsave(&mad_queue
->lock
, flags
);
646 list_del(&mad_list
->list
);
648 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
651 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
652 struct ib_mad_send_buf
*send_buf
,
653 struct ib_mad_send_wc
*mad_send_wc
,
656 struct ib_mad_snoop_private
*mad_snoop_priv
;
660 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
661 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
662 mad_snoop_priv
= qp_info
->snoop_table
[i
];
663 if (!mad_snoop_priv
||
664 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
667 atomic_inc(&mad_snoop_priv
->refcount
);
668 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
669 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
670 send_buf
, mad_send_wc
);
671 deref_snoop_agent(mad_snoop_priv
);
672 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
674 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
677 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
678 struct ib_mad_recv_wc
*mad_recv_wc
,
681 struct ib_mad_snoop_private
*mad_snoop_priv
;
685 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
686 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
687 mad_snoop_priv
= qp_info
->snoop_table
[i
];
688 if (!mad_snoop_priv
||
689 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
692 atomic_inc(&mad_snoop_priv
->refcount
);
693 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
694 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
696 deref_snoop_agent(mad_snoop_priv
);
697 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
699 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
702 static void build_smp_wc(struct ib_qp
*qp
,
703 u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
706 memset(wc
, 0, sizeof *wc
);
708 wc
->status
= IB_WC_SUCCESS
;
709 wc
->opcode
= IB_WC_RECV
;
710 wc
->pkey_index
= pkey_index
;
711 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
716 wc
->dlid_path_bits
= 0;
717 wc
->port_num
= port_num
;
721 * Return 0 if SMP is to be sent
722 * Return 1 if SMP was consumed locally (whether or not solicited)
723 * Return < 0 if error
725 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
726 struct ib_mad_send_wr_private
*mad_send_wr
)
729 struct ib_smp
*smp
= mad_send_wr
->send_buf
.mad
;
731 struct ib_mad_local_private
*local
;
732 struct ib_mad_private
*mad_priv
;
733 struct ib_mad_port_private
*port_priv
;
734 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
735 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
738 struct ib_send_wr
*send_wr
= &mad_send_wr
->send_wr
;
740 if (device
->node_type
== RDMA_NODE_IB_SWITCH
&&
741 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
742 port_num
= send_wr
->wr
.ud
.port_num
;
744 port_num
= mad_agent_priv
->agent
.port_num
;
747 * Directed route handling starts if the initial LID routed part of
748 * a request or the ending LID routed part of a response is empty.
749 * If we are at the start of the LID routed part, don't update the
750 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
752 if ((ib_get_smp_direction(smp
) ? smp
->dr_dlid
: smp
->dr_slid
) ==
754 smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
) ==
757 dev_err(&device
->dev
, "Invalid directed route\n");
761 /* Check to post send on QP or process locally */
762 if (smi_check_local_smp(smp
, device
) == IB_SMI_DISCARD
&&
763 smi_check_local_returning_smp(smp
, device
) == IB_SMI_DISCARD
)
766 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
769 dev_err(&device
->dev
, "No memory for ib_mad_local_private\n");
772 local
->mad_priv
= NULL
;
773 local
->recv_mad_agent
= NULL
;
774 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
777 dev_err(&device
->dev
, "No memory for local response MAD\n");
782 build_smp_wc(mad_agent_priv
->agent
.qp
,
783 send_wr
->wr_id
, be16_to_cpu(smp
->dr_slid
),
784 send_wr
->wr
.ud
.pkey_index
,
785 send_wr
->wr
.ud
.port_num
, &mad_wc
);
787 /* No GRH for DR SMP */
788 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
789 (struct ib_mad
*)smp
,
790 (struct ib_mad
*)&mad_priv
->mad
);
793 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
794 if (ib_response_mad(&mad_priv
->mad
.mad
.mad_hdr
) &&
795 mad_agent_priv
->agent
.recv_handler
) {
796 local
->mad_priv
= mad_priv
;
797 local
->recv_mad_agent
= mad_agent_priv
;
799 * Reference MAD agent until receive
800 * side of local completion handled
802 atomic_inc(&mad_agent_priv
->refcount
);
804 kmem_cache_free(ib_mad_cache
, mad_priv
);
806 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
807 kmem_cache_free(ib_mad_cache
, mad_priv
);
809 case IB_MAD_RESULT_SUCCESS
:
810 /* Treat like an incoming receive MAD */
811 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
812 mad_agent_priv
->agent
.port_num
);
814 memcpy(&mad_priv
->mad
.mad
, smp
, sizeof(struct ib_mad
));
815 recv_mad_agent
= find_mad_agent(port_priv
,
816 &mad_priv
->mad
.mad
.mad_hdr
);
818 if (!port_priv
|| !recv_mad_agent
) {
820 * No receiving agent so drop packet and
821 * generate send completion.
823 kmem_cache_free(ib_mad_cache
, mad_priv
);
826 local
->mad_priv
= mad_priv
;
827 local
->recv_mad_agent
= recv_mad_agent
;
830 kmem_cache_free(ib_mad_cache
, mad_priv
);
836 local
->mad_send_wr
= mad_send_wr
;
837 /* Reference MAD agent until send side of local completion handled */
838 atomic_inc(&mad_agent_priv
->refcount
);
839 /* Queue local completion to local list */
840 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
841 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
842 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
843 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
844 &mad_agent_priv
->local_work
);
850 static int get_pad_size(int hdr_len
, int data_len
)
854 seg_size
= sizeof(struct ib_mad
) - hdr_len
;
855 if (data_len
&& seg_size
) {
856 pad
= seg_size
- data_len
% seg_size
;
857 return pad
== seg_size
? 0 : pad
;
862 static void free_send_rmpp_list(struct ib_mad_send_wr_private
*mad_send_wr
)
864 struct ib_rmpp_segment
*s
, *t
;
866 list_for_each_entry_safe(s
, t
, &mad_send_wr
->rmpp_list
, list
) {
872 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private
*send_wr
,
875 struct ib_mad_send_buf
*send_buf
= &send_wr
->send_buf
;
876 struct ib_rmpp_mad
*rmpp_mad
= send_buf
->mad
;
877 struct ib_rmpp_segment
*seg
= NULL
;
878 int left
, seg_size
, pad
;
880 send_buf
->seg_size
= sizeof (struct ib_mad
) - send_buf
->hdr_len
;
881 seg_size
= send_buf
->seg_size
;
884 /* Allocate data segments. */
885 for (left
= send_buf
->data_len
+ pad
; left
> 0; left
-= seg_size
) {
886 seg
= kmalloc(sizeof (*seg
) + seg_size
, gfp_mask
);
888 dev_err(&send_buf
->mad_agent
->device
->dev
,
889 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
890 sizeof (*seg
) + seg_size
, gfp_mask
);
891 free_send_rmpp_list(send_wr
);
894 seg
->num
= ++send_buf
->seg_count
;
895 list_add_tail(&seg
->list
, &send_wr
->rmpp_list
);
898 /* Zero any padding */
900 memset(seg
->data
+ seg_size
- pad
, 0, pad
);
902 rmpp_mad
->rmpp_hdr
.rmpp_version
= send_wr
->mad_agent_priv
->
904 rmpp_mad
->rmpp_hdr
.rmpp_type
= IB_MGMT_RMPP_TYPE_DATA
;
905 ib_set_rmpp_flags(&rmpp_mad
->rmpp_hdr
, IB_MGMT_RMPP_FLAG_ACTIVE
);
907 send_wr
->cur_seg
= container_of(send_wr
->rmpp_list
.next
,
908 struct ib_rmpp_segment
, list
);
909 send_wr
->last_ack_seg
= send_wr
->cur_seg
;
913 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent
*agent
)
915 return agent
->rmpp_version
&& !(agent
->flags
& IB_MAD_USER_RMPP
);
917 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent
);
919 struct ib_mad_send_buf
* ib_create_send_mad(struct ib_mad_agent
*mad_agent
,
920 u32 remote_qpn
, u16 pkey_index
,
922 int hdr_len
, int data_len
,
926 struct ib_mad_agent_private
*mad_agent_priv
;
927 struct ib_mad_send_wr_private
*mad_send_wr
;
928 int pad
, message_size
, ret
, size
;
931 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
933 pad
= get_pad_size(hdr_len
, data_len
);
934 message_size
= hdr_len
+ data_len
+ pad
;
936 if (ib_mad_kernel_rmpp_agent(mad_agent
)) {
937 if (!rmpp_active
&& message_size
> sizeof(struct ib_mad
))
938 return ERR_PTR(-EINVAL
);
940 if (rmpp_active
|| message_size
> sizeof(struct ib_mad
))
941 return ERR_PTR(-EINVAL
);
943 size
= rmpp_active
? hdr_len
: sizeof(struct ib_mad
);
944 buf
= kzalloc(sizeof *mad_send_wr
+ size
, gfp_mask
);
946 return ERR_PTR(-ENOMEM
);
948 mad_send_wr
= buf
+ size
;
949 INIT_LIST_HEAD(&mad_send_wr
->rmpp_list
);
950 mad_send_wr
->send_buf
.mad
= buf
;
951 mad_send_wr
->send_buf
.hdr_len
= hdr_len
;
952 mad_send_wr
->send_buf
.data_len
= data_len
;
953 mad_send_wr
->pad
= pad
;
955 mad_send_wr
->mad_agent_priv
= mad_agent_priv
;
956 mad_send_wr
->sg_list
[0].length
= hdr_len
;
957 mad_send_wr
->sg_list
[0].lkey
= mad_agent
->mr
->lkey
;
958 mad_send_wr
->sg_list
[1].length
= sizeof(struct ib_mad
) - hdr_len
;
959 mad_send_wr
->sg_list
[1].lkey
= mad_agent
->mr
->lkey
;
961 mad_send_wr
->send_wr
.wr_id
= (unsigned long) mad_send_wr
;
962 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
963 mad_send_wr
->send_wr
.num_sge
= 2;
964 mad_send_wr
->send_wr
.opcode
= IB_WR_SEND
;
965 mad_send_wr
->send_wr
.send_flags
= IB_SEND_SIGNALED
;
966 mad_send_wr
->send_wr
.wr
.ud
.remote_qpn
= remote_qpn
;
967 mad_send_wr
->send_wr
.wr
.ud
.remote_qkey
= IB_QP_SET_QKEY
;
968 mad_send_wr
->send_wr
.wr
.ud
.pkey_index
= pkey_index
;
971 ret
= alloc_send_rmpp_list(mad_send_wr
, gfp_mask
);
978 mad_send_wr
->send_buf
.mad_agent
= mad_agent
;
979 atomic_inc(&mad_agent_priv
->refcount
);
980 return &mad_send_wr
->send_buf
;
982 EXPORT_SYMBOL(ib_create_send_mad
);
984 int ib_get_mad_data_offset(u8 mgmt_class
)
986 if (mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
)
987 return IB_MGMT_SA_HDR
;
988 else if ((mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
989 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
990 (mgmt_class
== IB_MGMT_CLASS_BIS
))
991 return IB_MGMT_DEVICE_HDR
;
992 else if ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
993 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
))
994 return IB_MGMT_VENDOR_HDR
;
996 return IB_MGMT_MAD_HDR
;
998 EXPORT_SYMBOL(ib_get_mad_data_offset
);
1000 int ib_is_mad_class_rmpp(u8 mgmt_class
)
1002 if ((mgmt_class
== IB_MGMT_CLASS_SUBN_ADM
) ||
1003 (mgmt_class
== IB_MGMT_CLASS_DEVICE_MGMT
) ||
1004 (mgmt_class
== IB_MGMT_CLASS_DEVICE_ADM
) ||
1005 (mgmt_class
== IB_MGMT_CLASS_BIS
) ||
1006 ((mgmt_class
>= IB_MGMT_CLASS_VENDOR_RANGE2_START
) &&
1007 (mgmt_class
<= IB_MGMT_CLASS_VENDOR_RANGE2_END
)))
1011 EXPORT_SYMBOL(ib_is_mad_class_rmpp
);
1013 void *ib_get_rmpp_segment(struct ib_mad_send_buf
*send_buf
, int seg_num
)
1015 struct ib_mad_send_wr_private
*mad_send_wr
;
1016 struct list_head
*list
;
1018 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1020 list
= &mad_send_wr
->cur_seg
->list
;
1022 if (mad_send_wr
->cur_seg
->num
< seg_num
) {
1023 list_for_each_entry(mad_send_wr
->cur_seg
, list
, list
)
1024 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1026 } else if (mad_send_wr
->cur_seg
->num
> seg_num
) {
1027 list_for_each_entry_reverse(mad_send_wr
->cur_seg
, list
, list
)
1028 if (mad_send_wr
->cur_seg
->num
== seg_num
)
1031 return mad_send_wr
->cur_seg
->data
;
1033 EXPORT_SYMBOL(ib_get_rmpp_segment
);
1035 static inline void *ib_get_payload(struct ib_mad_send_wr_private
*mad_send_wr
)
1037 if (mad_send_wr
->send_buf
.seg_count
)
1038 return ib_get_rmpp_segment(&mad_send_wr
->send_buf
,
1039 mad_send_wr
->seg_num
);
1041 return mad_send_wr
->send_buf
.mad
+
1042 mad_send_wr
->send_buf
.hdr_len
;
1045 void ib_free_send_mad(struct ib_mad_send_buf
*send_buf
)
1047 struct ib_mad_agent_private
*mad_agent_priv
;
1048 struct ib_mad_send_wr_private
*mad_send_wr
;
1050 mad_agent_priv
= container_of(send_buf
->mad_agent
,
1051 struct ib_mad_agent_private
, agent
);
1052 mad_send_wr
= container_of(send_buf
, struct ib_mad_send_wr_private
,
1055 free_send_rmpp_list(mad_send_wr
);
1056 kfree(send_buf
->mad
);
1057 deref_mad_agent(mad_agent_priv
);
1059 EXPORT_SYMBOL(ib_free_send_mad
);
1061 int ib_send_mad(struct ib_mad_send_wr_private
*mad_send_wr
)
1063 struct ib_mad_qp_info
*qp_info
;
1064 struct list_head
*list
;
1065 struct ib_send_wr
*bad_send_wr
;
1066 struct ib_mad_agent
*mad_agent
;
1068 unsigned long flags
;
1071 /* Set WR ID to find mad_send_wr upon completion */
1072 qp_info
= mad_send_wr
->mad_agent_priv
->qp_info
;
1073 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
1074 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
1076 mad_agent
= mad_send_wr
->send_buf
.mad_agent
;
1077 sge
= mad_send_wr
->sg_list
;
1078 sge
[0].addr
= ib_dma_map_single(mad_agent
->device
,
1079 mad_send_wr
->send_buf
.mad
,
1082 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[0].addr
)))
1085 mad_send_wr
->header_mapping
= sge
[0].addr
;
1087 sge
[1].addr
= ib_dma_map_single(mad_agent
->device
,
1088 ib_get_payload(mad_send_wr
),
1091 if (unlikely(ib_dma_mapping_error(mad_agent
->device
, sge
[1].addr
))) {
1092 ib_dma_unmap_single(mad_agent
->device
,
1093 mad_send_wr
->header_mapping
,
1094 sge
[0].length
, DMA_TO_DEVICE
);
1097 mad_send_wr
->payload_mapping
= sge
[1].addr
;
1099 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1100 if (qp_info
->send_queue
.count
< qp_info
->send_queue
.max_active
) {
1101 ret
= ib_post_send(mad_agent
->qp
, &mad_send_wr
->send_wr
,
1103 list
= &qp_info
->send_queue
.list
;
1106 list
= &qp_info
->overflow_list
;
1110 qp_info
->send_queue
.count
++;
1111 list_add_tail(&mad_send_wr
->mad_list
.list
, list
);
1113 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1115 ib_dma_unmap_single(mad_agent
->device
,
1116 mad_send_wr
->header_mapping
,
1117 sge
[0].length
, DMA_TO_DEVICE
);
1118 ib_dma_unmap_single(mad_agent
->device
,
1119 mad_send_wr
->payload_mapping
,
1120 sge
[1].length
, DMA_TO_DEVICE
);
1126 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1127 * with the registered client
1129 int ib_post_send_mad(struct ib_mad_send_buf
*send_buf
,
1130 struct ib_mad_send_buf
**bad_send_buf
)
1132 struct ib_mad_agent_private
*mad_agent_priv
;
1133 struct ib_mad_send_buf
*next_send_buf
;
1134 struct ib_mad_send_wr_private
*mad_send_wr
;
1135 unsigned long flags
;
1138 /* Walk list of send WRs and post each on send list */
1139 for (; send_buf
; send_buf
= next_send_buf
) {
1141 mad_send_wr
= container_of(send_buf
,
1142 struct ib_mad_send_wr_private
,
1144 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
1146 if (!send_buf
->mad_agent
->send_handler
||
1147 (send_buf
->timeout_ms
&&
1148 !send_buf
->mad_agent
->recv_handler
)) {
1153 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
)) {
1154 if (mad_agent_priv
->agent
.rmpp_version
) {
1161 * Save pointer to next work request to post in case the
1162 * current one completes, and the user modifies the work
1163 * request associated with the completion
1165 next_send_buf
= send_buf
->next
;
1166 mad_send_wr
->send_wr
.wr
.ud
.ah
= send_buf
->ah
;
1168 if (((struct ib_mad_hdr
*) send_buf
->mad
)->mgmt_class
==
1169 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1170 ret
= handle_outgoing_dr_smp(mad_agent_priv
,
1172 if (ret
< 0) /* error */
1174 else if (ret
== 1) /* locally consumed */
1178 mad_send_wr
->tid
= ((struct ib_mad_hdr
*) send_buf
->mad
)->tid
;
1179 /* Timeout will be updated after send completes */
1180 mad_send_wr
->timeout
= msecs_to_jiffies(send_buf
->timeout_ms
);
1181 mad_send_wr
->max_retries
= send_buf
->retries
;
1182 mad_send_wr
->retries_left
= send_buf
->retries
;
1183 send_buf
->retries
= 0;
1184 /* Reference for work request to QP + response */
1185 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
1186 mad_send_wr
->status
= IB_WC_SUCCESS
;
1188 /* Reference MAD agent until send completes */
1189 atomic_inc(&mad_agent_priv
->refcount
);
1190 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1191 list_add_tail(&mad_send_wr
->agent_list
,
1192 &mad_agent_priv
->send_list
);
1193 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1195 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1196 ret
= ib_send_rmpp_mad(mad_send_wr
);
1197 if (ret
>= 0 && ret
!= IB_RMPP_RESULT_CONSUMED
)
1198 ret
= ib_send_mad(mad_send_wr
);
1200 ret
= ib_send_mad(mad_send_wr
);
1202 /* Fail send request */
1203 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1204 list_del(&mad_send_wr
->agent_list
);
1205 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1206 atomic_dec(&mad_agent_priv
->refcount
);
1213 *bad_send_buf
= send_buf
;
1216 EXPORT_SYMBOL(ib_post_send_mad
);
1219 * ib_free_recv_mad - Returns data buffers used to receive
1220 * a MAD to the access layer
1222 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
1224 struct ib_mad_recv_buf
*mad_recv_buf
, *temp_recv_buf
;
1225 struct ib_mad_private_header
*mad_priv_hdr
;
1226 struct ib_mad_private
*priv
;
1227 struct list_head free_list
;
1229 INIT_LIST_HEAD(&free_list
);
1230 list_splice_init(&mad_recv_wc
->rmpp_list
, &free_list
);
1232 list_for_each_entry_safe(mad_recv_buf
, temp_recv_buf
,
1234 mad_recv_wc
= container_of(mad_recv_buf
, struct ib_mad_recv_wc
,
1236 mad_priv_hdr
= container_of(mad_recv_wc
,
1237 struct ib_mad_private_header
,
1239 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
1241 kmem_cache_free(ib_mad_cache
, priv
);
1244 EXPORT_SYMBOL(ib_free_recv_mad
);
1246 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
1248 ib_mad_send_handler send_handler
,
1249 ib_mad_recv_handler recv_handler
,
1252 return ERR_PTR(-EINVAL
); /* XXX: for now */
1254 EXPORT_SYMBOL(ib_redirect_mad_qp
);
1256 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
1259 dev_err(&mad_agent
->device
->dev
,
1260 "ib_process_mad_wc() not implemented yet\n");
1263 EXPORT_SYMBOL(ib_process_mad_wc
);
1265 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
1266 struct ib_mad_reg_req
*mad_reg_req
)
1270 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
) {
1271 if ((*method
)->agent
[i
]) {
1272 pr_err("Method %d already in use\n", i
);
1279 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
1281 /* Allocate management method table */
1282 *method
= kzalloc(sizeof **method
, GFP_ATOMIC
);
1284 pr_err("No memory for ib_mad_mgmt_method_table\n");
1292 * Check to see if there are any methods still in use
1294 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1298 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1299 if (method
->agent
[i
])
1305 * Check to see if there are any method tables for this class still in use
1307 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1311 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1312 if (class->method_table
[i
])
1317 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1321 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1322 if (vendor_class
->method_table
[i
])
1327 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1332 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1333 /* Is there matching OUI for this vendor class ? */
1334 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1340 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1344 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1345 if (vendor
->vendor_class
[i
])
1351 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1352 struct ib_mad_agent_private
*agent
)
1356 /* Remove any methods for this mad agent */
1357 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1358 if (method
->agent
[i
] == agent
) {
1359 method
->agent
[i
] = NULL
;
1364 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1365 struct ib_mad_agent_private
*agent_priv
,
1368 struct ib_mad_port_private
*port_priv
;
1369 struct ib_mad_mgmt_class_table
**class;
1370 struct ib_mad_mgmt_method_table
**method
;
1373 port_priv
= agent_priv
->qp_info
->port_priv
;
1374 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1376 /* Allocate management class table for "new" class version */
1377 *class = kzalloc(sizeof **class, GFP_ATOMIC
);
1379 dev_err(&agent_priv
->agent
.device
->dev
,
1380 "No memory for ib_mad_mgmt_class_table\n");
1385 /* Allocate method table for this management class */
1386 method
= &(*class)->method_table
[mgmt_class
];
1387 if ((ret
= allocate_method_table(method
)))
1390 method
= &(*class)->method_table
[mgmt_class
];
1392 /* Allocate method table for this management class */
1393 if ((ret
= allocate_method_table(method
)))
1398 /* Now, make sure methods are not already in use */
1399 if (method_in_use(method
, mad_reg_req
))
1402 /* Finally, add in methods being registered */
1403 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1404 (*method
)->agent
[i
] = agent_priv
;
1409 /* Remove any methods for this mad agent */
1410 remove_methods_mad_agent(*method
, agent_priv
);
1411 /* Now, check to see if there are any methods in use */
1412 if (!check_method_table(*method
)) {
1413 /* If not, release management method table */
1426 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1427 struct ib_mad_agent_private
*agent_priv
)
1429 struct ib_mad_port_private
*port_priv
;
1430 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1431 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1432 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1433 struct ib_mad_mgmt_method_table
**method
;
1434 int i
, ret
= -ENOMEM
;
1437 /* "New" vendor (with OUI) class */
1438 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1439 port_priv
= agent_priv
->qp_info
->port_priv
;
1440 vendor_table
= &port_priv
->version
[
1441 mad_reg_req
->mgmt_class_version
].vendor
;
1442 if (!*vendor_table
) {
1443 /* Allocate mgmt vendor class table for "new" class version */
1444 vendor
= kzalloc(sizeof *vendor
, GFP_ATOMIC
);
1446 dev_err(&agent_priv
->agent
.device
->dev
,
1447 "No memory for ib_mad_mgmt_vendor_class_table\n");
1451 *vendor_table
= vendor
;
1453 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1454 /* Allocate table for this management vendor class */
1455 vendor_class
= kzalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1456 if (!vendor_class
) {
1457 dev_err(&agent_priv
->agent
.device
->dev
,
1458 "No memory for ib_mad_mgmt_vendor_class\n");
1462 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1464 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1465 /* Is there matching OUI for this vendor class ? */
1466 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1467 mad_reg_req
->oui
, 3)) {
1468 method
= &(*vendor_table
)->vendor_class
[
1469 vclass
]->method_table
[i
];
1474 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1475 /* OUI slot available ? */
1476 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1478 method
= &(*vendor_table
)->vendor_class
[
1479 vclass
]->method_table
[i
];
1481 /* Allocate method table for this OUI */
1482 if ((ret
= allocate_method_table(method
)))
1484 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1485 mad_reg_req
->oui
, 3);
1489 dev_err(&agent_priv
->agent
.device
->dev
, "All OUI slots in use\n");
1493 /* Now, make sure methods are not already in use */
1494 if (method_in_use(method
, mad_reg_req
))
1497 /* Finally, add in methods being registered */
1498 for_each_set_bit(i
, mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
)
1499 (*method
)->agent
[i
] = agent_priv
;
1504 /* Remove any methods for this mad agent */
1505 remove_methods_mad_agent(*method
, agent_priv
);
1506 /* Now, check to see if there are any methods in use */
1507 if (!check_method_table(*method
)) {
1508 /* If not, release management method table */
1515 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1516 kfree(vendor_class
);
1520 *vendor_table
= NULL
;
1527 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1529 struct ib_mad_port_private
*port_priv
;
1530 struct ib_mad_mgmt_class_table
*class;
1531 struct ib_mad_mgmt_method_table
*method
;
1532 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1533 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1538 * Was MAD registration request supplied
1539 * with original registration ?
1541 if (!agent_priv
->reg_req
) {
1545 port_priv
= agent_priv
->qp_info
->port_priv
;
1546 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1547 class = port_priv
->version
[
1548 agent_priv
->reg_req
->mgmt_class_version
].class;
1552 method
= class->method_table
[mgmt_class
];
1554 /* Remove any methods for this mad agent */
1555 remove_methods_mad_agent(method
, agent_priv
);
1556 /* Now, check to see if there are any methods still in use */
1557 if (!check_method_table(method
)) {
1558 /* If not, release management method table */
1560 class->method_table
[mgmt_class
] = NULL
;
1561 /* Any management classes left ? */
1562 if (!check_class_table(class)) {
1563 /* If not, release management class table */
1566 agent_priv
->reg_req
->
1567 mgmt_class_version
].class = NULL
;
1573 if (!is_vendor_class(mgmt_class
))
1576 /* normalize mgmt_class to vendor range 2 */
1577 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1578 vendor
= port_priv
->version
[
1579 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1584 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1586 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1589 method
= vendor_class
->method_table
[index
];
1591 /* Remove any methods for this mad agent */
1592 remove_methods_mad_agent(method
, agent_priv
);
1594 * Now, check to see if there are
1595 * any methods still in use
1597 if (!check_method_table(method
)) {
1598 /* If not, release management method table */
1600 vendor_class
->method_table
[index
] = NULL
;
1601 memset(vendor_class
->oui
[index
], 0, 3);
1602 /* Any OUIs left ? */
1603 if (!check_vendor_class(vendor_class
)) {
1604 /* If not, release vendor class table */
1605 kfree(vendor_class
);
1606 vendor
->vendor_class
[mgmt_class
] = NULL
;
1607 /* Any other vendor classes left ? */
1608 if (!check_vendor_table(vendor
)) {
1611 agent_priv
->reg_req
->
1612 mgmt_class_version
].
1624 static struct ib_mad_agent_private
*
1625 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1626 const struct ib_mad_hdr
*mad_hdr
)
1628 struct ib_mad_agent_private
*mad_agent
= NULL
;
1629 unsigned long flags
;
1631 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1632 if (ib_response_mad(mad_hdr
)) {
1634 struct ib_mad_agent_private
*entry
;
1637 * Routing is based on high 32 bits of transaction ID
1640 hi_tid
= be64_to_cpu(mad_hdr
->tid
) >> 32;
1641 list_for_each_entry(entry
, &port_priv
->agent_list
, agent_list
) {
1642 if (entry
->agent
.hi_tid
== hi_tid
) {
1648 struct ib_mad_mgmt_class_table
*class;
1649 struct ib_mad_mgmt_method_table
*method
;
1650 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1651 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1652 const struct ib_vendor_mad
*vendor_mad
;
1656 * Routing is based on version, class, and method
1657 * For "newer" vendor MADs, also based on OUI
1659 if (mad_hdr
->class_version
>= MAX_MGMT_VERSION
)
1661 if (!is_vendor_class(mad_hdr
->mgmt_class
)) {
1662 class = port_priv
->version
[
1663 mad_hdr
->class_version
].class;
1666 if (convert_mgmt_class(mad_hdr
->mgmt_class
) >=
1667 IB_MGMT_MAX_METHODS
)
1669 method
= class->method_table
[convert_mgmt_class(
1670 mad_hdr
->mgmt_class
)];
1672 mad_agent
= method
->agent
[mad_hdr
->method
&
1673 ~IB_MGMT_METHOD_RESP
];
1675 vendor
= port_priv
->version
[
1676 mad_hdr
->class_version
].vendor
;
1679 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1680 mad_hdr
->mgmt_class
)];
1683 /* Find matching OUI */
1684 vendor_mad
= (const struct ib_vendor_mad
*)mad_hdr
;
1685 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1688 method
= vendor_class
->method_table
[index
];
1690 mad_agent
= method
->agent
[mad_hdr
->method
&
1691 ~IB_MGMT_METHOD_RESP
];
1697 if (mad_agent
->agent
.recv_handler
)
1698 atomic_inc(&mad_agent
->refcount
);
1700 dev_notice(&port_priv
->device
->dev
,
1701 "No receive handler for client %p on port %d\n",
1702 &mad_agent
->agent
, port_priv
->port_num
);
1707 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1712 static int validate_mad(const struct ib_mad_hdr
*mad_hdr
, u32 qp_num
)
1716 /* Make sure MAD base version is understood */
1717 if (mad_hdr
->base_version
!= IB_MGMT_BASE_VERSION
) {
1718 pr_err("MAD received with unsupported base version %d\n",
1719 mad_hdr
->base_version
);
1723 /* Filter SMI packets sent to other than QP0 */
1724 if ((mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1725 (mad_hdr
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1729 /* Filter GSI packets sent to QP0 */
1738 static int is_rmpp_data_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1739 const struct ib_mad_hdr
*mad_hdr
)
1741 struct ib_rmpp_mad
*rmpp_mad
;
1743 rmpp_mad
= (struct ib_rmpp_mad
*)mad_hdr
;
1744 return !mad_agent_priv
->agent
.rmpp_version
||
1745 !ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
) ||
1746 !(ib_get_rmpp_flags(&rmpp_mad
->rmpp_hdr
) &
1747 IB_MGMT_RMPP_FLAG_ACTIVE
) ||
1748 (rmpp_mad
->rmpp_hdr
.rmpp_type
== IB_MGMT_RMPP_TYPE_DATA
);
1751 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private
*wr
,
1752 const struct ib_mad_recv_wc
*rwc
)
1754 return ((struct ib_mad_hdr
*)(wr
->send_buf
.mad
))->mgmt_class
==
1755 rwc
->recv_buf
.mad
->mad_hdr
.mgmt_class
;
1758 static inline int rcv_has_same_gid(const struct ib_mad_agent_private
*mad_agent_priv
,
1759 const struct ib_mad_send_wr_private
*wr
,
1760 const struct ib_mad_recv_wc
*rwc
)
1762 struct ib_ah_attr attr
;
1763 u8 send_resp
, rcv_resp
;
1765 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
1766 u8 port_num
= mad_agent_priv
->agent
.port_num
;
1769 send_resp
= ib_response_mad((struct ib_mad_hdr
*)wr
->send_buf
.mad
);
1770 rcv_resp
= ib_response_mad(&rwc
->recv_buf
.mad
->mad_hdr
);
1772 if (send_resp
== rcv_resp
)
1773 /* both requests, or both responses. GIDs different */
1776 if (ib_query_ah(wr
->send_buf
.ah
, &attr
))
1777 /* Assume not equal, to avoid false positives. */
1780 if (!!(attr
.ah_flags
& IB_AH_GRH
) !=
1781 !!(rwc
->wc
->wc_flags
& IB_WC_GRH
))
1782 /* one has GID, other does not. Assume different */
1785 if (!send_resp
&& rcv_resp
) {
1786 /* is request/response. */
1787 if (!(attr
.ah_flags
& IB_AH_GRH
)) {
1788 if (ib_get_cached_lmc(device
, port_num
, &lmc
))
1790 return (!lmc
|| !((attr
.src_path_bits
^
1791 rwc
->wc
->dlid_path_bits
) &
1794 if (ib_get_cached_gid(device
, port_num
,
1795 attr
.grh
.sgid_index
, &sgid
))
1797 return !memcmp(sgid
.raw
, rwc
->recv_buf
.grh
->dgid
.raw
,
1802 if (!(attr
.ah_flags
& IB_AH_GRH
))
1803 return attr
.dlid
== rwc
->wc
->slid
;
1805 return !memcmp(attr
.grh
.dgid
.raw
, rwc
->recv_buf
.grh
->sgid
.raw
,
1809 static inline int is_direct(u8
class)
1811 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
);
1814 struct ib_mad_send_wr_private
*
1815 ib_find_send_mad(const struct ib_mad_agent_private
*mad_agent_priv
,
1816 const struct ib_mad_recv_wc
*wc
)
1818 struct ib_mad_send_wr_private
*wr
;
1819 const struct ib_mad_hdr
*mad_hdr
;
1821 mad_hdr
= &wc
->recv_buf
.mad
->mad_hdr
;
1823 list_for_each_entry(wr
, &mad_agent_priv
->wait_list
, agent_list
) {
1824 if ((wr
->tid
== mad_hdr
->tid
) &&
1825 rcv_has_same_class(wr
, wc
) &&
1827 * Don't check GID for direct routed MADs.
1828 * These might have permissive LIDs.
1830 (is_direct(mad_hdr
->mgmt_class
) ||
1831 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1832 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1836 * It's possible to receive the response before we've
1837 * been notified that the send has completed
1839 list_for_each_entry(wr
, &mad_agent_priv
->send_list
, agent_list
) {
1840 if (is_rmpp_data_mad(mad_agent_priv
, wr
->send_buf
.mad
) &&
1841 wr
->tid
== mad_hdr
->tid
&&
1843 rcv_has_same_class(wr
, wc
) &&
1845 * Don't check GID for direct routed MADs.
1846 * These might have permissive LIDs.
1848 (is_direct(mad_hdr
->mgmt_class
) ||
1849 rcv_has_same_gid(mad_agent_priv
, wr
, wc
)))
1850 /* Verify request has not been canceled */
1851 return (wr
->status
== IB_WC_SUCCESS
) ? wr
: NULL
;
1856 void ib_mark_mad_done(struct ib_mad_send_wr_private
*mad_send_wr
)
1858 mad_send_wr
->timeout
= 0;
1859 if (mad_send_wr
->refcount
== 1)
1860 list_move_tail(&mad_send_wr
->agent_list
,
1861 &mad_send_wr
->mad_agent_priv
->done_list
);
1864 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1865 struct ib_mad_recv_wc
*mad_recv_wc
)
1867 struct ib_mad_send_wr_private
*mad_send_wr
;
1868 struct ib_mad_send_wc mad_send_wc
;
1869 unsigned long flags
;
1871 INIT_LIST_HEAD(&mad_recv_wc
->rmpp_list
);
1872 list_add(&mad_recv_wc
->recv_buf
.list
, &mad_recv_wc
->rmpp_list
);
1873 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
1874 mad_recv_wc
= ib_process_rmpp_recv_wc(mad_agent_priv
,
1877 deref_mad_agent(mad_agent_priv
);
1882 /* Complete corresponding request */
1883 if (ib_response_mad(&mad_recv_wc
->recv_buf
.mad
->mad_hdr
)) {
1884 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1885 mad_send_wr
= ib_find_send_mad(mad_agent_priv
, mad_recv_wc
);
1887 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1888 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)
1889 && ib_is_mad_class_rmpp(mad_recv_wc
->recv_buf
.mad
->mad_hdr
.mgmt_class
)
1890 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad
*)mad_recv_wc
->recv_buf
.mad
)->rmpp_hdr
)
1891 & IB_MGMT_RMPP_FLAG_ACTIVE
)) {
1892 /* user rmpp is in effect
1893 * and this is an active RMPP MAD
1895 mad_recv_wc
->wc
->wr_id
= 0;
1896 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1898 atomic_dec(&mad_agent_priv
->refcount
);
1900 /* not user rmpp, revert to normal behavior and
1902 ib_free_recv_mad(mad_recv_wc
);
1903 deref_mad_agent(mad_agent_priv
);
1907 ib_mark_mad_done(mad_send_wr
);
1908 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1910 /* Defined behavior is to complete response before request */
1911 mad_recv_wc
->wc
->wr_id
= (unsigned long) &mad_send_wr
->send_buf
;
1912 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1914 atomic_dec(&mad_agent_priv
->refcount
);
1916 mad_send_wc
.status
= IB_WC_SUCCESS
;
1917 mad_send_wc
.vendor_err
= 0;
1918 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
1919 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1922 mad_agent_priv
->agent
.recv_handler(&mad_agent_priv
->agent
,
1924 deref_mad_agent(mad_agent_priv
);
1928 static enum smi_action
handle_ib_smi(const struct ib_mad_port_private
*port_priv
,
1929 const struct ib_mad_qp_info
*qp_info
,
1930 const struct ib_wc
*wc
,
1932 struct ib_mad_private
*recv
,
1933 struct ib_mad_private
*response
)
1935 enum smi_forward_action retsmi
;
1937 if (smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1938 port_priv
->device
->node_type
,
1940 port_priv
->device
->phys_port_cnt
) ==
1942 return IB_SMI_DISCARD
;
1944 retsmi
= smi_check_forward_dr_smp(&recv
->mad
.smp
);
1945 if (retsmi
== IB_SMI_LOCAL
)
1946 return IB_SMI_HANDLE
;
1948 if (retsmi
== IB_SMI_SEND
) { /* don't forward */
1949 if (smi_handle_dr_smp_send(&recv
->mad
.smp
,
1950 port_priv
->device
->node_type
,
1951 port_num
) == IB_SMI_DISCARD
)
1952 return IB_SMI_DISCARD
;
1954 if (smi_check_local_smp(&recv
->mad
.smp
, port_priv
->device
) == IB_SMI_DISCARD
)
1955 return IB_SMI_DISCARD
;
1956 } else if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
) {
1957 /* forward case for switches */
1958 memcpy(response
, recv
, sizeof(*response
));
1959 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
1960 response
->header
.recv_wc
.recv_buf
.mad
= &response
->mad
.mad
;
1961 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
1963 agent_send_response(&response
->mad
.mad
,
1966 smi_get_fwd_port(&recv
->mad
.smp
),
1967 qp_info
->qp
->qp_num
);
1969 return IB_SMI_DISCARD
;
1971 return IB_SMI_HANDLE
;
1974 static bool generate_unmatched_resp(struct ib_mad_private
*recv
,
1975 struct ib_mad_private
*response
)
1977 if (recv
->mad
.mad
.mad_hdr
.method
== IB_MGMT_METHOD_GET
||
1978 recv
->mad
.mad
.mad_hdr
.method
== IB_MGMT_METHOD_SET
) {
1979 memcpy(response
, recv
, sizeof *response
);
1980 response
->header
.recv_wc
.wc
= &response
->header
.wc
;
1981 response
->header
.recv_wc
.recv_buf
.mad
= &response
->mad
.mad
;
1982 response
->header
.recv_wc
.recv_buf
.grh
= &response
->grh
;
1983 response
->mad
.mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
1984 response
->mad
.mad
.mad_hdr
.status
=
1985 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB
);
1986 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
1987 response
->mad
.mad
.mad_hdr
.status
|= IB_SMP_DIRECTION
;
1994 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1997 struct ib_mad_qp_info
*qp_info
;
1998 struct ib_mad_private_header
*mad_priv_hdr
;
1999 struct ib_mad_private
*recv
, *response
= NULL
;
2000 struct ib_mad_list_head
*mad_list
;
2001 struct ib_mad_agent_private
*mad_agent
;
2003 int ret
= IB_MAD_RESULT_SUCCESS
;
2005 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2006 qp_info
= mad_list
->mad_queue
->qp_info
;
2007 dequeue_mad(mad_list
);
2009 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
2011 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
2012 ib_dma_unmap_single(port_priv
->device
,
2013 recv
->header
.mapping
,
2014 sizeof(struct ib_mad_private
) -
2015 sizeof(struct ib_mad_private_header
),
2018 /* Setup MAD receive work completion from "normal" work completion */
2019 recv
->header
.wc
= *wc
;
2020 recv
->header
.recv_wc
.wc
= &recv
->header
.wc
;
2021 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
2022 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
2023 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
2025 if (atomic_read(&qp_info
->snoop_count
))
2026 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
2029 if (!validate_mad(&recv
->mad
.mad
.mad_hdr
, qp_info
->qp
->qp_num
))
2032 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2034 dev_err(&port_priv
->device
->dev
,
2035 "ib_mad_recv_done_handler no memory for response buffer\n");
2039 if (port_priv
->device
->node_type
== RDMA_NODE_IB_SWITCH
)
2040 port_num
= wc
->port_num
;
2042 port_num
= port_priv
->port_num
;
2044 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
2045 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
2046 if (handle_ib_smi(port_priv
, qp_info
, wc
, port_num
, recv
,
2052 /* Give driver "right of first refusal" on incoming MAD */
2053 if (port_priv
->device
->process_mad
) {
2054 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
2055 port_priv
->port_num
,
2058 &response
->mad
.mad
);
2059 if (ret
& IB_MAD_RESULT_SUCCESS
) {
2060 if (ret
& IB_MAD_RESULT_CONSUMED
)
2062 if (ret
& IB_MAD_RESULT_REPLY
) {
2063 agent_send_response(&response
->mad
.mad
,
2067 qp_info
->qp
->qp_num
);
2073 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
.mad_hdr
);
2075 ib_mad_complete_recv(mad_agent
, &recv
->header
.recv_wc
);
2077 * recv is freed up in error cases in ib_mad_complete_recv
2078 * or via recv_handler in ib_mad_complete_recv()
2081 } else if ((ret
& IB_MAD_RESULT_SUCCESS
) &&
2082 generate_unmatched_resp(recv
, response
)) {
2083 agent_send_response(&response
->mad
.mad
, &recv
->grh
, wc
,
2084 port_priv
->device
, port_num
, qp_info
->qp
->qp_num
);
2088 /* Post another receive request for this QP */
2090 ib_mad_post_receive_mads(qp_info
, response
);
2092 kmem_cache_free(ib_mad_cache
, recv
);
2094 ib_mad_post_receive_mads(qp_info
, recv
);
2097 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
2099 struct ib_mad_send_wr_private
*mad_send_wr
;
2100 unsigned long delay
;
2102 if (list_empty(&mad_agent_priv
->wait_list
)) {
2103 cancel_delayed_work(&mad_agent_priv
->timed_work
);
2105 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2106 struct ib_mad_send_wr_private
,
2109 if (time_after(mad_agent_priv
->timeout
,
2110 mad_send_wr
->timeout
)) {
2111 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
2112 delay
= mad_send_wr
->timeout
- jiffies
;
2113 if ((long)delay
<= 0)
2115 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2116 &mad_agent_priv
->timed_work
, delay
);
2121 static void wait_for_response(struct ib_mad_send_wr_private
*mad_send_wr
)
2123 struct ib_mad_agent_private
*mad_agent_priv
;
2124 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
2125 struct list_head
*list_item
;
2126 unsigned long delay
;
2128 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2129 list_del(&mad_send_wr
->agent_list
);
2131 delay
= mad_send_wr
->timeout
;
2132 mad_send_wr
->timeout
+= jiffies
;
2135 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
2136 temp_mad_send_wr
= list_entry(list_item
,
2137 struct ib_mad_send_wr_private
,
2139 if (time_after(mad_send_wr
->timeout
,
2140 temp_mad_send_wr
->timeout
))
2145 list_item
= &mad_agent_priv
->wait_list
;
2146 list_add(&mad_send_wr
->agent_list
, list_item
);
2148 /* Reschedule a work item if we have a shorter timeout */
2149 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
)
2150 mod_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2151 &mad_agent_priv
->timed_work
, delay
);
2154 void ib_reset_mad_timeout(struct ib_mad_send_wr_private
*mad_send_wr
,
2157 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2158 wait_for_response(mad_send_wr
);
2162 * Process a send work completion
2164 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
2165 struct ib_mad_send_wc
*mad_send_wc
)
2167 struct ib_mad_agent_private
*mad_agent_priv
;
2168 unsigned long flags
;
2171 mad_agent_priv
= mad_send_wr
->mad_agent_priv
;
2172 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2173 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv
->agent
)) {
2174 ret
= ib_process_rmpp_send_wc(mad_send_wr
, mad_send_wc
);
2175 if (ret
== IB_RMPP_RESULT_CONSUMED
)
2178 ret
= IB_RMPP_RESULT_UNHANDLED
;
2180 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
2181 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2182 mad_send_wr
->status
= mad_send_wc
->status
;
2183 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2186 if (--mad_send_wr
->refcount
> 0) {
2187 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
2188 mad_send_wr
->status
== IB_WC_SUCCESS
) {
2189 wait_for_response(mad_send_wr
);
2194 /* Remove send from MAD agent and notify client of completion */
2195 list_del(&mad_send_wr
->agent_list
);
2196 adjust_timeout(mad_agent_priv
);
2197 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2199 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
2200 mad_send_wc
->status
= mad_send_wr
->status
;
2201 if (ret
== IB_RMPP_RESULT_INTERNAL
)
2202 ib_rmpp_send_handler(mad_send_wc
);
2204 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2207 /* Release reference on agent taken when sending */
2208 deref_mad_agent(mad_agent_priv
);
2211 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2214 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
2217 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
2218 struct ib_mad_list_head
*mad_list
;
2219 struct ib_mad_qp_info
*qp_info
;
2220 struct ib_mad_queue
*send_queue
;
2221 struct ib_send_wr
*bad_send_wr
;
2222 struct ib_mad_send_wc mad_send_wc
;
2223 unsigned long flags
;
2226 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2227 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2229 send_queue
= mad_list
->mad_queue
;
2230 qp_info
= send_queue
->qp_info
;
2233 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2234 mad_send_wr
->header_mapping
,
2235 mad_send_wr
->sg_list
[0].length
, DMA_TO_DEVICE
);
2236 ib_dma_unmap_single(mad_send_wr
->send_buf
.mad_agent
->device
,
2237 mad_send_wr
->payload_mapping
,
2238 mad_send_wr
->sg_list
[1].length
, DMA_TO_DEVICE
);
2239 queued_send_wr
= NULL
;
2240 spin_lock_irqsave(&send_queue
->lock
, flags
);
2241 list_del(&mad_list
->list
);
2243 /* Move queued send to the send queue */
2244 if (send_queue
->count
-- > send_queue
->max_active
) {
2245 mad_list
= container_of(qp_info
->overflow_list
.next
,
2246 struct ib_mad_list_head
, list
);
2247 queued_send_wr
= container_of(mad_list
,
2248 struct ib_mad_send_wr_private
,
2250 list_move_tail(&mad_list
->list
, &send_queue
->list
);
2252 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
2254 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2255 mad_send_wc
.status
= wc
->status
;
2256 mad_send_wc
.vendor_err
= wc
->vendor_err
;
2257 if (atomic_read(&qp_info
->snoop_count
))
2258 snoop_send(qp_info
, &mad_send_wr
->send_buf
, &mad_send_wc
,
2259 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2260 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
2262 if (queued_send_wr
) {
2263 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
2266 dev_err(&port_priv
->device
->dev
,
2267 "ib_post_send failed: %d\n", ret
);
2268 mad_send_wr
= queued_send_wr
;
2269 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
2275 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
2277 struct ib_mad_send_wr_private
*mad_send_wr
;
2278 struct ib_mad_list_head
*mad_list
;
2279 unsigned long flags
;
2281 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
2282 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
2283 mad_send_wr
= container_of(mad_list
,
2284 struct ib_mad_send_wr_private
,
2286 mad_send_wr
->retry
= 1;
2288 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
2291 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
2294 struct ib_mad_list_head
*mad_list
;
2295 struct ib_mad_qp_info
*qp_info
;
2296 struct ib_mad_send_wr_private
*mad_send_wr
;
2299 /* Determine if failure was a send or receive */
2300 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
2301 qp_info
= mad_list
->mad_queue
->qp_info
;
2302 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
2304 * Receive errors indicate that the QP has entered the error
2305 * state - error handling/shutdown code will cleanup
2310 * Send errors will transition the QP to SQE - move
2311 * QP to RTS and repost flushed work requests
2313 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
2315 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
2316 if (mad_send_wr
->retry
) {
2318 struct ib_send_wr
*bad_send_wr
;
2320 mad_send_wr
->retry
= 0;
2321 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
2324 ib_mad_send_done_handler(port_priv
, wc
);
2326 ib_mad_send_done_handler(port_priv
, wc
);
2328 struct ib_qp_attr
*attr
;
2330 /* Transition QP to RTS and fail offending send */
2331 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2333 attr
->qp_state
= IB_QPS_RTS
;
2334 attr
->cur_qp_state
= IB_QPS_SQE
;
2335 ret
= ib_modify_qp(qp_info
->qp
, attr
,
2336 IB_QP_STATE
| IB_QP_CUR_STATE
);
2339 dev_err(&port_priv
->device
->dev
,
2340 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2343 mark_sends_for_retry(qp_info
);
2345 ib_mad_send_done_handler(port_priv
, wc
);
2350 * IB MAD completion callback
2352 static void ib_mad_completion_handler(struct work_struct
*work
)
2354 struct ib_mad_port_private
*port_priv
;
2357 port_priv
= container_of(work
, struct ib_mad_port_private
, work
);
2358 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2360 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
2361 if (wc
.status
== IB_WC_SUCCESS
) {
2362 switch (wc
.opcode
) {
2364 ib_mad_send_done_handler(port_priv
, &wc
);
2367 ib_mad_recv_done_handler(port_priv
, &wc
);
2374 mad_error_handler(port_priv
, &wc
);
2378 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
2380 unsigned long flags
;
2381 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
2382 struct ib_mad_send_wc mad_send_wc
;
2383 struct list_head cancel_list
;
2385 INIT_LIST_HEAD(&cancel_list
);
2387 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2388 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2389 &mad_agent_priv
->send_list
, agent_list
) {
2390 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
2391 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2392 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2396 /* Empty wait list to prevent receives from finding a request */
2397 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
2398 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2400 /* Report all cancelled requests */
2401 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2402 mad_send_wc
.vendor_err
= 0;
2404 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
2405 &cancel_list
, agent_list
) {
2406 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2407 list_del(&mad_send_wr
->agent_list
);
2408 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2410 atomic_dec(&mad_agent_priv
->refcount
);
2414 static struct ib_mad_send_wr_private
*
2415 find_send_wr(struct ib_mad_agent_private
*mad_agent_priv
,
2416 struct ib_mad_send_buf
*send_buf
)
2418 struct ib_mad_send_wr_private
*mad_send_wr
;
2420 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
2422 if (&mad_send_wr
->send_buf
== send_buf
)
2426 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
2428 if (is_rmpp_data_mad(mad_agent_priv
,
2429 mad_send_wr
->send_buf
.mad
) &&
2430 &mad_send_wr
->send_buf
== send_buf
)
2436 int ib_modify_mad(struct ib_mad_agent
*mad_agent
,
2437 struct ib_mad_send_buf
*send_buf
, u32 timeout_ms
)
2439 struct ib_mad_agent_private
*mad_agent_priv
;
2440 struct ib_mad_send_wr_private
*mad_send_wr
;
2441 unsigned long flags
;
2444 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2446 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2447 mad_send_wr
= find_send_wr(mad_agent_priv
, send_buf
);
2448 if (!mad_send_wr
|| mad_send_wr
->status
!= IB_WC_SUCCESS
) {
2449 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2453 active
= (!mad_send_wr
->timeout
|| mad_send_wr
->refcount
> 1);
2455 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2456 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2459 mad_send_wr
->send_buf
.timeout_ms
= timeout_ms
;
2461 mad_send_wr
->timeout
= msecs_to_jiffies(timeout_ms
);
2463 ib_reset_mad_timeout(mad_send_wr
, timeout_ms
);
2465 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2468 EXPORT_SYMBOL(ib_modify_mad
);
2470 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2471 struct ib_mad_send_buf
*send_buf
)
2473 ib_modify_mad(mad_agent
, send_buf
, 0);
2475 EXPORT_SYMBOL(ib_cancel_mad
);
2477 static void local_completions(struct work_struct
*work
)
2479 struct ib_mad_agent_private
*mad_agent_priv
;
2480 struct ib_mad_local_private
*local
;
2481 struct ib_mad_agent_private
*recv_mad_agent
;
2482 unsigned long flags
;
2485 struct ib_mad_send_wc mad_send_wc
;
2488 container_of(work
, struct ib_mad_agent_private
, local_work
);
2490 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2491 while (!list_empty(&mad_agent_priv
->local_list
)) {
2492 local
= list_entry(mad_agent_priv
->local_list
.next
,
2493 struct ib_mad_local_private
,
2495 list_del(&local
->completion_list
);
2496 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2498 if (local
->mad_priv
) {
2499 recv_mad_agent
= local
->recv_mad_agent
;
2500 if (!recv_mad_agent
) {
2501 dev_err(&mad_agent_priv
->agent
.device
->dev
,
2502 "No receive MAD agent for local completion\n");
2504 goto local_send_completion
;
2508 * Defined behavior is to complete response
2511 build_smp_wc(recv_mad_agent
->agent
.qp
,
2512 (unsigned long) local
->mad_send_wr
,
2513 be16_to_cpu(IB_LID_PERMISSIVE
),
2514 0, recv_mad_agent
->agent
.port_num
, &wc
);
2516 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2517 local
->mad_priv
->header
.recv_wc
.mad_len
=
2518 sizeof(struct ib_mad
);
2519 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2520 list_add(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
,
2521 &local
->mad_priv
->header
.recv_wc
.rmpp_list
);
2522 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2523 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2524 &local
->mad_priv
->mad
.mad
;
2525 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2526 snoop_recv(recv_mad_agent
->qp_info
,
2527 &local
->mad_priv
->header
.recv_wc
,
2528 IB_MAD_SNOOP_RECVS
);
2529 recv_mad_agent
->agent
.recv_handler(
2530 &recv_mad_agent
->agent
,
2531 &local
->mad_priv
->header
.recv_wc
);
2532 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2533 atomic_dec(&recv_mad_agent
->refcount
);
2534 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2537 local_send_completion
:
2539 mad_send_wc
.status
= IB_WC_SUCCESS
;
2540 mad_send_wc
.vendor_err
= 0;
2541 mad_send_wc
.send_buf
= &local
->mad_send_wr
->send_buf
;
2542 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2543 snoop_send(mad_agent_priv
->qp_info
,
2544 &local
->mad_send_wr
->send_buf
,
2545 &mad_send_wc
, IB_MAD_SNOOP_SEND_COMPLETIONS
);
2546 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2549 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2550 atomic_dec(&mad_agent_priv
->refcount
);
2552 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2555 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2558 static int retry_send(struct ib_mad_send_wr_private
*mad_send_wr
)
2562 if (!mad_send_wr
->retries_left
)
2565 mad_send_wr
->retries_left
--;
2566 mad_send_wr
->send_buf
.retries
++;
2568 mad_send_wr
->timeout
= msecs_to_jiffies(mad_send_wr
->send_buf
.timeout_ms
);
2570 if (ib_mad_kernel_rmpp_agent(&mad_send_wr
->mad_agent_priv
->agent
)) {
2571 ret
= ib_retry_rmpp(mad_send_wr
);
2573 case IB_RMPP_RESULT_UNHANDLED
:
2574 ret
= ib_send_mad(mad_send_wr
);
2576 case IB_RMPP_RESULT_CONSUMED
:
2584 ret
= ib_send_mad(mad_send_wr
);
2587 mad_send_wr
->refcount
++;
2588 list_add_tail(&mad_send_wr
->agent_list
,
2589 &mad_send_wr
->mad_agent_priv
->send_list
);
2594 static void timeout_sends(struct work_struct
*work
)
2596 struct ib_mad_agent_private
*mad_agent_priv
;
2597 struct ib_mad_send_wr_private
*mad_send_wr
;
2598 struct ib_mad_send_wc mad_send_wc
;
2599 unsigned long flags
, delay
;
2601 mad_agent_priv
= container_of(work
, struct ib_mad_agent_private
,
2603 mad_send_wc
.vendor_err
= 0;
2605 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2606 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2607 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2608 struct ib_mad_send_wr_private
,
2611 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2612 delay
= mad_send_wr
->timeout
- jiffies
;
2613 if ((long)delay
<= 0)
2615 queue_delayed_work(mad_agent_priv
->qp_info
->
2617 &mad_agent_priv
->timed_work
, delay
);
2621 list_del(&mad_send_wr
->agent_list
);
2622 if (mad_send_wr
->status
== IB_WC_SUCCESS
&&
2623 !retry_send(mad_send_wr
))
2626 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2628 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2629 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2631 mad_send_wc
.status
= mad_send_wr
->status
;
2632 mad_send_wc
.send_buf
= &mad_send_wr
->send_buf
;
2633 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2636 atomic_dec(&mad_agent_priv
->refcount
);
2637 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2639 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2642 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
, void *arg
)
2644 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2645 unsigned long flags
;
2647 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2648 if (!list_empty(&port_priv
->port_list
))
2649 queue_work(port_priv
->wq
, &port_priv
->work
);
2650 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2654 * Allocate receive MADs and post receive WRs for them
2656 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2657 struct ib_mad_private
*mad
)
2659 unsigned long flags
;
2661 struct ib_mad_private
*mad_priv
;
2662 struct ib_sge sg_list
;
2663 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2664 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2666 /* Initialize common scatter list fields */
2667 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2668 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2670 /* Initialize common receive WR fields */
2671 recv_wr
.next
= NULL
;
2672 recv_wr
.sg_list
= &sg_list
;
2673 recv_wr
.num_sge
= 1;
2676 /* Allocate and map receive buffer */
2681 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2683 dev_err(&qp_info
->port_priv
->device
->dev
,
2684 "No memory for receive buffer\n");
2689 sg_list
.addr
= ib_dma_map_single(qp_info
->port_priv
->device
,
2692 sizeof mad_priv
->header
,
2694 if (unlikely(ib_dma_mapping_error(qp_info
->port_priv
->device
,
2699 mad_priv
->header
.mapping
= sg_list
.addr
;
2700 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2701 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2703 /* Post receive WR */
2704 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2705 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2706 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2707 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2708 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2710 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2711 list_del(&mad_priv
->header
.mad_list
.list
);
2712 recv_queue
->count
--;
2713 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2714 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2715 mad_priv
->header
.mapping
,
2717 sizeof mad_priv
->header
,
2719 kmem_cache_free(ib_mad_cache
, mad_priv
);
2720 dev_err(&qp_info
->port_priv
->device
->dev
,
2721 "ib_post_recv failed: %d\n", ret
);
2730 * Return all the posted receive MADs
2732 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2734 struct ib_mad_private_header
*mad_priv_hdr
;
2735 struct ib_mad_private
*recv
;
2736 struct ib_mad_list_head
*mad_list
;
2741 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2743 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2744 struct ib_mad_list_head
, list
);
2745 mad_priv_hdr
= container_of(mad_list
,
2746 struct ib_mad_private_header
,
2748 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2751 /* Remove from posted receive MAD list */
2752 list_del(&mad_list
->list
);
2754 ib_dma_unmap_single(qp_info
->port_priv
->device
,
2755 recv
->header
.mapping
,
2756 sizeof(struct ib_mad_private
) -
2757 sizeof(struct ib_mad_private_header
),
2759 kmem_cache_free(ib_mad_cache
, recv
);
2762 qp_info
->recv_queue
.count
= 0;
2768 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2771 struct ib_qp_attr
*attr
;
2775 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2777 dev_err(&port_priv
->device
->dev
,
2778 "Couldn't kmalloc ib_qp_attr\n");
2782 ret
= ib_find_pkey(port_priv
->device
, port_priv
->port_num
,
2783 IB_DEFAULT_PKEY_FULL
, &pkey_index
);
2787 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2788 qp
= port_priv
->qp_info
[i
].qp
;
2793 * PKey index for QP1 is irrelevant but
2794 * one is needed for the Reset to Init transition
2796 attr
->qp_state
= IB_QPS_INIT
;
2797 attr
->pkey_index
= pkey_index
;
2798 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2799 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2800 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2802 dev_err(&port_priv
->device
->dev
,
2803 "Couldn't change QP%d state to INIT: %d\n",
2808 attr
->qp_state
= IB_QPS_RTR
;
2809 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2811 dev_err(&port_priv
->device
->dev
,
2812 "Couldn't change QP%d state to RTR: %d\n",
2817 attr
->qp_state
= IB_QPS_RTS
;
2818 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2819 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2821 dev_err(&port_priv
->device
->dev
,
2822 "Couldn't change QP%d state to RTS: %d\n",
2828 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2830 dev_err(&port_priv
->device
->dev
,
2831 "Failed to request completion notification: %d\n",
2836 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2837 if (!port_priv
->qp_info
[i
].qp
)
2840 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2842 dev_err(&port_priv
->device
->dev
,
2843 "Couldn't post receive WRs\n");
2852 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2854 struct ib_mad_qp_info
*qp_info
= qp_context
;
2856 /* It's worse than that! He's dead, Jim! */
2857 dev_err(&qp_info
->port_priv
->device
->dev
,
2858 "Fatal error (%d) on MAD QP (%d)\n",
2859 event
->event
, qp_info
->qp
->qp_num
);
2862 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2863 struct ib_mad_queue
*mad_queue
)
2865 mad_queue
->qp_info
= qp_info
;
2866 mad_queue
->count
= 0;
2867 spin_lock_init(&mad_queue
->lock
);
2868 INIT_LIST_HEAD(&mad_queue
->list
);
2871 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2872 struct ib_mad_qp_info
*qp_info
)
2874 qp_info
->port_priv
= port_priv
;
2875 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2876 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2877 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2878 spin_lock_init(&qp_info
->snoop_lock
);
2879 qp_info
->snoop_table
= NULL
;
2880 qp_info
->snoop_table_size
= 0;
2881 atomic_set(&qp_info
->snoop_count
, 0);
2884 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2885 enum ib_qp_type qp_type
)
2887 struct ib_qp_init_attr qp_init_attr
;
2890 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2891 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2892 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2893 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2894 qp_init_attr
.cap
.max_send_wr
= mad_sendq_size
;
2895 qp_init_attr
.cap
.max_recv_wr
= mad_recvq_size
;
2896 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2897 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2898 qp_init_attr
.qp_type
= qp_type
;
2899 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2900 qp_init_attr
.qp_context
= qp_info
;
2901 qp_init_attr
.event_handler
= qp_event_handler
;
2902 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2903 if (IS_ERR(qp_info
->qp
)) {
2904 dev_err(&qp_info
->port_priv
->device
->dev
,
2905 "Couldn't create ib_mad QP%d\n",
2906 get_spl_qp_index(qp_type
));
2907 ret
= PTR_ERR(qp_info
->qp
);
2910 /* Use minimum queue sizes unless the CQ is resized */
2911 qp_info
->send_queue
.max_active
= mad_sendq_size
;
2912 qp_info
->recv_queue
.max_active
= mad_recvq_size
;
2919 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2924 ib_destroy_qp(qp_info
->qp
);
2925 kfree(qp_info
->snoop_table
);
2930 * Create the QP, PD, MR, and CQ if needed
2932 static int ib_mad_port_open(struct ib_device
*device
,
2936 struct ib_mad_port_private
*port_priv
;
2937 unsigned long flags
;
2938 char name
[sizeof "ib_mad123"];
2940 struct ib_cq_init_attr cq_attr
= {};
2942 if (WARN_ON(rdma_max_mad_size(device
, port_num
) < IB_MGMT_MAD_SIZE
))
2945 /* Create new device info */
2946 port_priv
= kzalloc(sizeof *port_priv
, GFP_KERNEL
);
2948 dev_err(&device
->dev
, "No memory for ib_mad_port_private\n");
2952 port_priv
->device
= device
;
2953 port_priv
->port_num
= port_num
;
2954 spin_lock_init(&port_priv
->reg_lock
);
2955 INIT_LIST_HEAD(&port_priv
->agent_list
);
2956 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2957 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2959 cq_size
= mad_sendq_size
+ mad_recvq_size
;
2960 has_smi
= rdma_cap_ib_smi(device
, port_num
);
2964 cq_attr
.cqe
= cq_size
;
2965 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2966 ib_mad_thread_completion_handler
,
2967 NULL
, port_priv
, &cq_attr
);
2968 if (IS_ERR(port_priv
->cq
)) {
2969 dev_err(&device
->dev
, "Couldn't create ib_mad CQ\n");
2970 ret
= PTR_ERR(port_priv
->cq
);
2974 port_priv
->pd
= ib_alloc_pd(device
);
2975 if (IS_ERR(port_priv
->pd
)) {
2976 dev_err(&device
->dev
, "Couldn't create ib_mad PD\n");
2977 ret
= PTR_ERR(port_priv
->pd
);
2981 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2982 if (IS_ERR(port_priv
->mr
)) {
2983 dev_err(&device
->dev
, "Couldn't get ib_mad DMA MR\n");
2984 ret
= PTR_ERR(port_priv
->mr
);
2989 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2993 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2997 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2998 port_priv
->wq
= create_singlethread_workqueue(name
);
2999 if (!port_priv
->wq
) {
3003 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
);
3005 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3006 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
3007 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3009 ret
= ib_mad_port_start(port_priv
);
3011 dev_err(&device
->dev
, "Couldn't start port\n");
3018 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3019 list_del_init(&port_priv
->port_list
);
3020 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3022 destroy_workqueue(port_priv
->wq
);
3024 destroy_mad_qp(&port_priv
->qp_info
[1]);
3026 destroy_mad_qp(&port_priv
->qp_info
[0]);
3028 ib_dereg_mr(port_priv
->mr
);
3030 ib_dealloc_pd(port_priv
->pd
);
3032 ib_destroy_cq(port_priv
->cq
);
3033 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3034 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3043 * If there are no classes using the port, free the port
3044 * resources (CQ, MR, PD, QP) and remove the port's info structure
3046 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
3048 struct ib_mad_port_private
*port_priv
;
3049 unsigned long flags
;
3051 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
3052 port_priv
= __ib_get_mad_port(device
, port_num
);
3053 if (port_priv
== NULL
) {
3054 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3055 dev_err(&device
->dev
, "Port %d not found\n", port_num
);
3058 list_del_init(&port_priv
->port_list
);
3059 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
3061 destroy_workqueue(port_priv
->wq
);
3062 destroy_mad_qp(&port_priv
->qp_info
[1]);
3063 destroy_mad_qp(&port_priv
->qp_info
[0]);
3064 ib_dereg_mr(port_priv
->mr
);
3065 ib_dealloc_pd(port_priv
->pd
);
3066 ib_destroy_cq(port_priv
->cq
);
3067 cleanup_recv_queue(&port_priv
->qp_info
[1]);
3068 cleanup_recv_queue(&port_priv
->qp_info
[0]);
3069 /* XXX: Handle deallocation of MAD registration tables */
3076 static void ib_mad_init_device(struct ib_device
*device
)
3080 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
3085 end
= device
->phys_port_cnt
;
3088 for (i
= start
; i
<= end
; i
++) {
3089 if (!rdma_cap_ib_mad(device
, i
))
3092 if (ib_mad_port_open(device
, i
)) {
3093 dev_err(&device
->dev
, "Couldn't open port %d\n", i
);
3096 if (ib_agent_port_open(device
, i
)) {
3097 dev_err(&device
->dev
,
3098 "Couldn't open port %d for agents\n", i
);
3105 if (ib_mad_port_close(device
, i
))
3106 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3109 while (--i
>= start
) {
3110 if (!rdma_cap_ib_mad(device
, i
))
3113 if (ib_agent_port_close(device
, i
))
3114 dev_err(&device
->dev
,
3115 "Couldn't close port %d for agents\n", i
);
3116 if (ib_mad_port_close(device
, i
))
3117 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3121 static void ib_mad_remove_device(struct ib_device
*device
)
3125 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
3130 end
= device
->phys_port_cnt
;
3133 for (i
= start
; i
<= end
; i
++) {
3134 if (!rdma_cap_ib_mad(device
, i
))
3137 if (ib_agent_port_close(device
, i
))
3138 dev_err(&device
->dev
,
3139 "Couldn't close port %d for agents\n", i
);
3140 if (ib_mad_port_close(device
, i
))
3141 dev_err(&device
->dev
, "Couldn't close port %d\n", i
);
3145 static struct ib_client mad_client
= {
3147 .add
= ib_mad_init_device
,
3148 .remove
= ib_mad_remove_device
3151 static int __init
ib_mad_init_module(void)
3155 mad_recvq_size
= min(mad_recvq_size
, IB_MAD_QP_MAX_SIZE
);
3156 mad_recvq_size
= max(mad_recvq_size
, IB_MAD_QP_MIN_SIZE
);
3158 mad_sendq_size
= min(mad_sendq_size
, IB_MAD_QP_MAX_SIZE
);
3159 mad_sendq_size
= max(mad_sendq_size
, IB_MAD_QP_MIN_SIZE
);
3161 ib_mad_cache
= kmem_cache_create("ib_mad",
3162 sizeof(struct ib_mad_private
),
3166 if (!ib_mad_cache
) {
3167 pr_err("Couldn't create ib_mad cache\n");
3172 INIT_LIST_HEAD(&ib_mad_port_list
);
3174 if (ib_register_client(&mad_client
)) {
3175 pr_err("Couldn't register ib_mad client\n");
3183 kmem_cache_destroy(ib_mad_cache
);
3188 static void __exit
ib_mad_cleanup_module(void)
3190 ib_unregister_client(&mad_client
);
3191 kmem_cache_destroy(ib_mad_cache
);
3194 module_init(ib_mad_init_module
);
3195 module_exit(ib_mad_cleanup_module
);