2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/workqueue.h>
48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h>
52 MODULE_AUTHOR("Sean Hefty");
53 MODULE_DESCRIPTION("InfiniBand CM");
54 MODULE_LICENSE("Dual BSD/GPL");
56 static void cm_add_one(struct ib_device
*device
);
57 static void cm_remove_one(struct ib_device
*device
);
59 static struct ib_client cm_client
= {
62 .remove
= cm_remove_one
67 struct list_head device_list
;
69 struct rb_root listen_service_table
;
70 u64 listen_service_id
;
71 /* struct rb_root peer_service_table; todo: fix peer to peer */
72 struct rb_root remote_qp_table
;
73 struct rb_root remote_id_table
;
74 struct rb_root remote_sidr_table
;
75 struct idr local_id_table
;
76 struct workqueue_struct
*wq
;
80 struct cm_device
*cm_dev
;
81 struct ib_mad_agent
*mad_agent
;
86 struct list_head list
;
87 struct ib_device
*device
;
89 struct cm_port port
[0];
95 struct ib_ah_attr ah_attr
;
101 struct work_struct work
;
102 struct list_head list
;
103 struct cm_port
*port
;
104 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
105 __be32 local_id
; /* Established / timewait */
107 struct ib_cm_event cm_event
;
108 struct ib_sa_path_rec path
[0];
111 struct cm_timewait_info
{
112 struct cm_work work
; /* Must be first. */
113 struct rb_node remote_qp_node
;
114 struct rb_node remote_id_node
;
115 __be64 remote_ca_guid
;
117 u8 inserted_remote_qp
;
118 u8 inserted_remote_id
;
121 struct cm_id_private
{
124 struct rb_node service_node
;
125 struct rb_node sidr_id_node
;
126 spinlock_t lock
; /* Do not acquire inside cm.lock */
127 struct completion comp
;
130 struct ib_mad_send_buf
*msg
;
131 struct cm_timewait_info
*timewait_info
;
132 /* todo: use alternate port on send failure */
135 struct ib_cm_compare_data
*compare_data
;
141 enum ib_qp_type qp_type
;
145 enum ib_mtu path_mtu
;
149 u8 responder_resources
;
151 u8 local_ack_timeout
;
156 struct list_head work_list
;
160 static void cm_work_handler(void *data
);
162 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
164 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
165 complete(&cm_id_priv
->comp
);
168 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
169 struct ib_mad_send_buf
**msg
)
171 struct ib_mad_agent
*mad_agent
;
172 struct ib_mad_send_buf
*m
;
175 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
176 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
180 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
181 cm_id_priv
->av
.pkey_index
,
182 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
189 /* Timeout set by caller if response is expected. */
191 m
->retries
= cm_id_priv
->max_cm_retries
;
193 atomic_inc(&cm_id_priv
->refcount
);
194 m
->context
[0] = cm_id_priv
;
199 static int cm_alloc_response_msg(struct cm_port
*port
,
200 struct ib_mad_recv_wc
*mad_recv_wc
,
201 struct ib_mad_send_buf
**msg
)
203 struct ib_mad_send_buf
*m
;
206 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
207 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
211 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
212 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
223 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
225 ib_destroy_ah(msg
->ah
);
227 cm_deref_id(msg
->context
[0]);
228 ib_free_send_mad(msg
);
231 static void * cm_copy_private_data(const void *private_data
,
236 if (!private_data
|| !private_data_len
)
239 data
= kmalloc(private_data_len
, GFP_KERNEL
);
241 return ERR_PTR(-ENOMEM
);
243 memcpy(data
, private_data
, private_data_len
);
247 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
248 void *private_data
, u8 private_data_len
)
250 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
251 kfree(cm_id_priv
->private_data
);
253 cm_id_priv
->private_data
= private_data
;
254 cm_id_priv
->private_data_len
= private_data_len
;
257 static void cm_init_av_for_response(struct cm_port
*port
, struct ib_wc
*wc
,
258 struct ib_grh
*grh
, struct cm_av
*av
)
261 av
->pkey_index
= wc
->pkey_index
;
262 ib_init_ah_from_wc(port
->cm_dev
->device
, port
->port_num
, wc
,
266 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
268 struct cm_device
*cm_dev
;
269 struct cm_port
*port
= NULL
;
274 read_lock_irqsave(&cm
.device_lock
, flags
);
275 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
276 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
278 port
= &cm_dev
->port
[p
-1];
282 read_unlock_irqrestore(&cm
.device_lock
, flags
);
287 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
288 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
293 ib_init_ah_from_path(cm_dev
->device
, port
->port_num
, path
,
295 av
->packet_life_time
= path
->packet_life_time
;
299 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
306 spin_lock_irqsave(&cm
.lock
, flags
);
307 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
, next_id
++,
308 (__force
int *) &cm_id_priv
->id
.local_id
);
309 spin_unlock_irqrestore(&cm
.lock
, flags
);
310 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
314 static void cm_free_id(__be32 local_id
)
318 spin_lock_irqsave(&cm
.lock
, flags
);
319 idr_remove(&cm
.local_id_table
, (__force
int) local_id
);
320 spin_unlock_irqrestore(&cm
.lock
, flags
);
323 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
325 struct cm_id_private
*cm_id_priv
;
327 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int) local_id
);
329 if (cm_id_priv
->id
.remote_id
== remote_id
)
330 atomic_inc(&cm_id_priv
->refcount
);
338 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
340 struct cm_id_private
*cm_id_priv
;
343 spin_lock_irqsave(&cm
.lock
, flags
);
344 cm_id_priv
= cm_get_id(local_id
, remote_id
);
345 spin_unlock_irqrestore(&cm
.lock
, flags
);
350 static void cm_mask_copy(u8
*dst
, u8
*src
, u8
*mask
)
354 for (i
= 0; i
< IB_CM_COMPARE_SIZE
/ sizeof(unsigned long); i
++)
355 ((unsigned long *) dst
)[i
] = ((unsigned long *) src
)[i
] &
356 ((unsigned long *) mask
)[i
];
359 static int cm_compare_data(struct ib_cm_compare_data
*src_data
,
360 struct ib_cm_compare_data
*dst_data
)
362 u8 src
[IB_CM_COMPARE_SIZE
];
363 u8 dst
[IB_CM_COMPARE_SIZE
];
365 if (!src_data
|| !dst_data
)
368 cm_mask_copy(src
, src_data
->data
, dst_data
->mask
);
369 cm_mask_copy(dst
, dst_data
->data
, src_data
->mask
);
370 return memcmp(src
, dst
, IB_CM_COMPARE_SIZE
);
373 static int cm_compare_private_data(u8
*private_data
,
374 struct ib_cm_compare_data
*dst_data
)
376 u8 src
[IB_CM_COMPARE_SIZE
];
381 cm_mask_copy(src
, private_data
, dst_data
->mask
);
382 return memcmp(src
, dst_data
->data
, IB_CM_COMPARE_SIZE
);
385 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
387 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
388 struct rb_node
*parent
= NULL
;
389 struct cm_id_private
*cur_cm_id_priv
;
390 __be64 service_id
= cm_id_priv
->id
.service_id
;
391 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
396 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
398 data_cmp
= cm_compare_data(cm_id_priv
->compare_data
,
399 cur_cm_id_priv
->compare_data
);
400 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
401 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
402 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
) &&
404 return cur_cm_id_priv
;
406 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
407 link
= &(*link
)->rb_left
;
408 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
409 link
= &(*link
)->rb_right
;
410 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
411 link
= &(*link
)->rb_left
;
412 else if (service_id
> cur_cm_id_priv
->id
.service_id
)
413 link
= &(*link
)->rb_right
;
414 else if (data_cmp
< 0)
415 link
= &(*link
)->rb_left
;
417 link
= &(*link
)->rb_right
;
419 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
420 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
424 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
428 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
429 struct cm_id_private
*cm_id_priv
;
433 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
434 data_cmp
= cm_compare_private_data(private_data
,
435 cm_id_priv
->compare_data
);
436 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
437 cm_id_priv
->id
.service_id
&&
438 (cm_id_priv
->id
.device
== device
) && !data_cmp
)
441 if (device
< cm_id_priv
->id
.device
)
442 node
= node
->rb_left
;
443 else if (device
> cm_id_priv
->id
.device
)
444 node
= node
->rb_right
;
445 else if (service_id
< cm_id_priv
->id
.service_id
)
446 node
= node
->rb_left
;
447 else if (service_id
> cm_id_priv
->id
.service_id
)
448 node
= node
->rb_right
;
449 else if (data_cmp
< 0)
450 node
= node
->rb_left
;
452 node
= node
->rb_right
;
457 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
460 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
461 struct rb_node
*parent
= NULL
;
462 struct cm_timewait_info
*cur_timewait_info
;
463 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
464 __be32 remote_id
= timewait_info
->work
.remote_id
;
468 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
470 if (remote_id
< cur_timewait_info
->work
.remote_id
)
471 link
= &(*link
)->rb_left
;
472 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
473 link
= &(*link
)->rb_right
;
474 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
475 link
= &(*link
)->rb_left
;
476 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
477 link
= &(*link
)->rb_right
;
479 return cur_timewait_info
;
481 timewait_info
->inserted_remote_id
= 1;
482 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
483 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
487 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
490 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
491 struct cm_timewait_info
*timewait_info
;
494 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
496 if (remote_id
< timewait_info
->work
.remote_id
)
497 node
= node
->rb_left
;
498 else if (remote_id
> timewait_info
->work
.remote_id
)
499 node
= node
->rb_right
;
500 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
501 node
= node
->rb_left
;
502 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
503 node
= node
->rb_right
;
505 return timewait_info
;
510 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
513 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
514 struct rb_node
*parent
= NULL
;
515 struct cm_timewait_info
*cur_timewait_info
;
516 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
517 __be32 remote_qpn
= timewait_info
->remote_qpn
;
521 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
523 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
524 link
= &(*link
)->rb_left
;
525 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
526 link
= &(*link
)->rb_right
;
527 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
528 link
= &(*link
)->rb_left
;
529 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
530 link
= &(*link
)->rb_right
;
532 return cur_timewait_info
;
534 timewait_info
->inserted_remote_qp
= 1;
535 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
536 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
540 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
543 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
544 struct rb_node
*parent
= NULL
;
545 struct cm_id_private
*cur_cm_id_priv
;
546 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
547 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
551 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
553 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
554 link
= &(*link
)->rb_left
;
555 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
556 link
= &(*link
)->rb_right
;
559 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
562 link
= &(*link
)->rb_left
;
564 link
= &(*link
)->rb_right
;
566 return cur_cm_id_priv
;
569 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
570 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
574 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
575 enum ib_cm_sidr_status status
)
577 struct ib_cm_sidr_rep_param param
;
579 memset(¶m
, 0, sizeof param
);
580 param
.status
= status
;
581 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
584 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
585 ib_cm_handler cm_handler
,
588 struct cm_id_private
*cm_id_priv
;
591 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
593 return ERR_PTR(-ENOMEM
);
595 cm_id_priv
->id
.state
= IB_CM_IDLE
;
596 cm_id_priv
->id
.device
= device
;
597 cm_id_priv
->id
.cm_handler
= cm_handler
;
598 cm_id_priv
->id
.context
= context
;
599 cm_id_priv
->id
.remote_cm_qpn
= 1;
600 ret
= cm_alloc_id(cm_id_priv
);
604 spin_lock_init(&cm_id_priv
->lock
);
605 init_completion(&cm_id_priv
->comp
);
606 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
607 atomic_set(&cm_id_priv
->work_count
, -1);
608 atomic_set(&cm_id_priv
->refcount
, 1);
609 return &cm_id_priv
->id
;
613 return ERR_PTR(-ENOMEM
);
615 EXPORT_SYMBOL(ib_create_cm_id
);
617 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
619 struct cm_work
*work
;
621 if (list_empty(&cm_id_priv
->work_list
))
624 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
625 list_del(&work
->list
);
629 static void cm_free_work(struct cm_work
*work
)
631 if (work
->mad_recv_wc
)
632 ib_free_recv_mad(work
->mad_recv_wc
);
636 static inline int cm_convert_to_ms(int iba_time
)
638 /* approximate conversion to ms from 4.096us x 2^iba_time */
639 return 1 << max(iba_time
- 8, 0);
642 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
646 if (!timewait_info
->inserted_remote_id
&&
647 !timewait_info
->inserted_remote_qp
)
650 spin_lock_irqsave(&cm
.lock
, flags
);
651 if (timewait_info
->inserted_remote_id
) {
652 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
653 timewait_info
->inserted_remote_id
= 0;
656 if (timewait_info
->inserted_remote_qp
) {
657 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
658 timewait_info
->inserted_remote_qp
= 0;
660 spin_unlock_irqrestore(&cm
.lock
, flags
);
663 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
665 struct cm_timewait_info
*timewait_info
;
667 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
669 return ERR_PTR(-ENOMEM
);
671 timewait_info
->work
.local_id
= local_id
;
672 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
673 &timewait_info
->work
);
674 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
675 return timewait_info
;
678 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
683 * The cm_id could be destroyed by the user before we exit timewait.
684 * To protect against this, we search for the cm_id after exiting
685 * timewait before notifying the user that we've exited timewait.
687 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
688 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
689 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
690 msecs_to_jiffies(wait_time
));
691 cm_id_priv
->timewait_info
= NULL
;
694 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
696 cm_id_priv
->id
.state
= IB_CM_IDLE
;
697 if (cm_id_priv
->timewait_info
) {
698 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
699 kfree(cm_id_priv
->timewait_info
);
700 cm_id_priv
->timewait_info
= NULL
;
704 static void cm_destroy_id(struct ib_cm_id
*cm_id
, int err
)
706 struct cm_id_private
*cm_id_priv
;
707 struct cm_work
*work
;
710 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
712 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
713 switch (cm_id
->state
) {
715 cm_id
->state
= IB_CM_IDLE
;
716 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
717 spin_lock_irqsave(&cm
.lock
, flags
);
718 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
719 spin_unlock_irqrestore(&cm
.lock
, flags
);
721 case IB_CM_SIDR_REQ_SENT
:
722 cm_id
->state
= IB_CM_IDLE
;
723 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
724 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
726 case IB_CM_SIDR_REQ_RCVD
:
727 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
728 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
731 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
732 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
733 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
734 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
735 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
739 if (err
== -ENOMEM
) {
740 /* Do not reject to allow future retries. */
741 cm_reset_to_idle(cm_id_priv
);
742 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
744 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
745 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
749 case IB_CM_MRA_REQ_RCVD
:
751 case IB_CM_MRA_REP_RCVD
:
752 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
754 case IB_CM_MRA_REQ_SENT
:
756 case IB_CM_MRA_REP_SENT
:
757 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
758 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
761 case IB_CM_ESTABLISHED
:
762 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
763 ib_send_cm_dreq(cm_id
, NULL
, 0);
765 case IB_CM_DREQ_SENT
:
766 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
767 cm_enter_timewait(cm_id_priv
);
768 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
770 case IB_CM_DREQ_RCVD
:
771 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
772 ib_send_cm_drep(cm_id
, NULL
, 0);
775 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
779 cm_free_id(cm_id
->local_id
);
780 cm_deref_id(cm_id_priv
);
781 wait_for_completion(&cm_id_priv
->comp
);
782 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
784 kfree(cm_id_priv
->compare_data
);
785 kfree(cm_id_priv
->private_data
);
789 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
791 cm_destroy_id(cm_id
, 0);
793 EXPORT_SYMBOL(ib_destroy_cm_id
);
795 int ib_cm_listen(struct ib_cm_id
*cm_id
, __be64 service_id
, __be64 service_mask
,
796 struct ib_cm_compare_data
*compare_data
)
798 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
802 service_mask
= service_mask
? service_mask
:
803 __constant_cpu_to_be64(~0ULL);
804 service_id
&= service_mask
;
805 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
806 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
809 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
810 if (cm_id
->state
!= IB_CM_IDLE
)
814 cm_id_priv
->compare_data
= kzalloc(sizeof *compare_data
,
816 if (!cm_id_priv
->compare_data
)
818 cm_mask_copy(cm_id_priv
->compare_data
->data
,
819 compare_data
->data
, compare_data
->mask
);
820 memcpy(cm_id_priv
->compare_data
->mask
, compare_data
->mask
,
824 cm_id
->state
= IB_CM_LISTEN
;
826 spin_lock_irqsave(&cm
.lock
, flags
);
827 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
828 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
829 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
831 cm_id
->service_id
= service_id
;
832 cm_id
->service_mask
= service_mask
;
834 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
835 spin_unlock_irqrestore(&cm
.lock
, flags
);
837 if (cur_cm_id_priv
) {
838 cm_id
->state
= IB_CM_IDLE
;
839 kfree(cm_id_priv
->compare_data
);
840 cm_id_priv
->compare_data
= NULL
;
845 EXPORT_SYMBOL(ib_cm_listen
);
847 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
848 enum cm_msg_sequence msg_seq
)
852 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
853 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
855 return cpu_to_be64(hi_tid
| low_tid
);
858 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
859 __be16 attr_id
, __be64 tid
)
861 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
862 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
863 hdr
->class_version
= IB_CM_CLASS_VERSION
;
864 hdr
->method
= IB_MGMT_METHOD_SEND
;
865 hdr
->attr_id
= attr_id
;
869 static void cm_format_req(struct cm_req_msg
*req_msg
,
870 struct cm_id_private
*cm_id_priv
,
871 struct ib_cm_req_param
*param
)
873 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
874 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
876 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
877 req_msg
->service_id
= param
->service_id
;
878 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
879 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
880 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
881 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
882 cm_req_set_remote_resp_timeout(req_msg
,
883 param
->remote_cm_response_timeout
);
884 cm_req_set_qp_type(req_msg
, param
->qp_type
);
885 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
886 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
887 cm_req_set_local_resp_timeout(req_msg
,
888 param
->local_cm_response_timeout
);
889 cm_req_set_retry_count(req_msg
, param
->retry_count
);
890 req_msg
->pkey
= param
->primary_path
->pkey
;
891 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
892 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
893 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
894 cm_req_set_srq(req_msg
, param
->srq
);
896 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
897 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
898 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
899 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
900 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
901 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
902 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
903 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
904 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
905 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
906 cm_req_set_primary_local_ack_timeout(req_msg
,
907 min(31, param
->primary_path
->packet_life_time
+ 1));
909 if (param
->alternate_path
) {
910 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
911 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
912 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
913 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
914 cm_req_set_alt_flow_label(req_msg
,
915 param
->alternate_path
->flow_label
);
916 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
917 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
918 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
919 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
920 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
921 cm_req_set_alt_local_ack_timeout(req_msg
,
922 min(31, param
->alternate_path
->packet_life_time
+ 1));
925 if (param
->private_data
&& param
->private_data_len
)
926 memcpy(req_msg
->private_data
, param
->private_data
,
927 param
->private_data_len
);
930 static int cm_validate_req_param(struct ib_cm_req_param
*param
)
932 /* peer-to-peer not supported */
933 if (param
->peer_to_peer
)
936 if (!param
->primary_path
)
939 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
942 if (param
->private_data
&&
943 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
946 if (param
->alternate_path
&&
947 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
948 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
954 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
955 struct ib_cm_req_param
*param
)
957 struct cm_id_private
*cm_id_priv
;
958 struct cm_req_msg
*req_msg
;
962 ret
= cm_validate_req_param(param
);
966 /* Verify that we're not in timewait. */
967 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
968 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
969 if (cm_id
->state
!= IB_CM_IDLE
) {
970 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
974 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
976 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
978 if (IS_ERR(cm_id_priv
->timewait_info
)) {
979 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
983 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
986 if (param
->alternate_path
) {
987 ret
= cm_init_av_by_path(param
->alternate_path
,
988 &cm_id_priv
->alt_av
);
992 cm_id
->service_id
= param
->service_id
;
993 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
994 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
995 param
->primary_path
->packet_life_time
) * 2 +
997 param
->remote_cm_response_timeout
);
998 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
999 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1000 cm_id_priv
->responder_resources
= param
->responder_resources
;
1001 cm_id_priv
->retry_count
= param
->retry_count
;
1002 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
1003 cm_id_priv
->qp_type
= param
->qp_type
;
1005 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
1009 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
1010 cm_format_req(req_msg
, cm_id_priv
, param
);
1011 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1012 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1013 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
1015 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
1016 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
1017 cm_id_priv
->local_ack_timeout
=
1018 cm_req_get_primary_local_ack_timeout(req_msg
);
1020 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1021 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
1023 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1026 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
1027 cm_id
->state
= IB_CM_REQ_SENT
;
1028 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1031 error2
: cm_free_msg(cm_id_priv
->msg
);
1032 error1
: kfree(cm_id_priv
->timewait_info
);
1035 EXPORT_SYMBOL(ib_send_cm_req
);
1037 static int cm_issue_rej(struct cm_port
*port
,
1038 struct ib_mad_recv_wc
*mad_recv_wc
,
1039 enum ib_cm_rej_reason reason
,
1040 enum cm_msg_response msg_rejected
,
1041 void *ari
, u8 ari_length
)
1043 struct ib_mad_send_buf
*msg
= NULL
;
1044 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
1047 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1051 /* We just need common CM header information. Cast to any message. */
1052 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
1053 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
1055 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
1056 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
1057 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
1058 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
1059 rej_msg
->reason
= cpu_to_be16(reason
);
1061 if (ari
&& ari_length
) {
1062 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1063 memcpy(rej_msg
->ari
, ari
, ari_length
);
1066 ret
= ib_post_send_mad(msg
, NULL
);
1073 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1074 __be32 local_qpn
, __be32 remote_qpn
)
1076 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1077 ((local_ca_guid
== remote_ca_guid
) &&
1078 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1081 static void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1082 struct ib_sa_path_rec
*primary_path
,
1083 struct ib_sa_path_rec
*alt_path
)
1085 memset(primary_path
, 0, sizeof *primary_path
);
1086 primary_path
->dgid
= req_msg
->primary_local_gid
;
1087 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1088 primary_path
->dlid
= req_msg
->primary_local_lid
;
1089 primary_path
->slid
= req_msg
->primary_remote_lid
;
1090 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1091 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1092 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1093 primary_path
->reversible
= 1;
1094 primary_path
->pkey
= req_msg
->pkey
;
1095 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1096 primary_path
->mtu_selector
= IB_SA_EQ
;
1097 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1098 primary_path
->rate_selector
= IB_SA_EQ
;
1099 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1100 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1101 primary_path
->packet_life_time
=
1102 cm_req_get_primary_local_ack_timeout(req_msg
);
1103 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1105 if (req_msg
->alt_local_lid
) {
1106 memset(alt_path
, 0, sizeof *alt_path
);
1107 alt_path
->dgid
= req_msg
->alt_local_gid
;
1108 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1109 alt_path
->dlid
= req_msg
->alt_local_lid
;
1110 alt_path
->slid
= req_msg
->alt_remote_lid
;
1111 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1112 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1113 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1114 alt_path
->reversible
= 1;
1115 alt_path
->pkey
= req_msg
->pkey
;
1116 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1117 alt_path
->mtu_selector
= IB_SA_EQ
;
1118 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1119 alt_path
->rate_selector
= IB_SA_EQ
;
1120 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1121 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1122 alt_path
->packet_life_time
=
1123 cm_req_get_alt_local_ack_timeout(req_msg
);
1124 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1128 static void cm_format_req_event(struct cm_work
*work
,
1129 struct cm_id_private
*cm_id_priv
,
1130 struct ib_cm_id
*listen_id
)
1132 struct cm_req_msg
*req_msg
;
1133 struct ib_cm_req_event_param
*param
;
1135 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1136 param
= &work
->cm_event
.param
.req_rcvd
;
1137 param
->listen_id
= listen_id
;
1138 param
->port
= cm_id_priv
->av
.port
->port_num
;
1139 param
->primary_path
= &work
->path
[0];
1140 if (req_msg
->alt_local_lid
)
1141 param
->alternate_path
= &work
->path
[1];
1143 param
->alternate_path
= NULL
;
1144 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1145 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1146 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1147 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1148 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1149 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1150 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1151 param
->local_cm_response_timeout
=
1152 cm_req_get_remote_resp_timeout(req_msg
);
1153 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1154 param
->remote_cm_response_timeout
=
1155 cm_req_get_local_resp_timeout(req_msg
);
1156 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1157 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1158 param
->srq
= cm_req_get_srq(req_msg
);
1159 work
->cm_event
.private_data
= &req_msg
->private_data
;
1162 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1163 struct cm_work
*work
)
1165 unsigned long flags
;
1168 /* We will typically only have the current event to report. */
1169 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1172 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1173 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1174 work
= cm_dequeue_work(cm_id_priv
);
1175 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1177 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1181 cm_deref_id(cm_id_priv
);
1183 cm_destroy_id(&cm_id_priv
->id
, ret
);
1186 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1187 struct cm_id_private
*cm_id_priv
,
1188 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1189 const void *private_data
, u8 private_data_len
)
1191 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1192 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1193 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1194 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1195 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1197 if (private_data
&& private_data_len
)
1198 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1201 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1202 struct cm_id_private
*cm_id_priv
,
1203 enum ib_cm_rej_reason reason
,
1206 const void *private_data
,
1207 u8 private_data_len
)
1209 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1210 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1212 switch(cm_id_priv
->id
.state
) {
1213 case IB_CM_REQ_RCVD
:
1214 rej_msg
->local_comm_id
= 0;
1215 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1217 case IB_CM_MRA_REQ_SENT
:
1218 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1219 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1221 case IB_CM_REP_RCVD
:
1222 case IB_CM_MRA_REP_SENT
:
1223 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1224 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1227 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1228 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1232 rej_msg
->reason
= cpu_to_be16(reason
);
1233 if (ari
&& ari_length
) {
1234 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1235 memcpy(rej_msg
->ari
, ari
, ari_length
);
1238 if (private_data
&& private_data_len
)
1239 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1242 static void cm_dup_req_handler(struct cm_work
*work
,
1243 struct cm_id_private
*cm_id_priv
)
1245 struct ib_mad_send_buf
*msg
= NULL
;
1246 unsigned long flags
;
1249 /* Quick state check to discard duplicate REQs. */
1250 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1253 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1257 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1258 switch (cm_id_priv
->id
.state
) {
1259 case IB_CM_MRA_REQ_SENT
:
1260 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1261 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1262 cm_id_priv
->private_data
,
1263 cm_id_priv
->private_data_len
);
1265 case IB_CM_TIMEWAIT
:
1266 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1267 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1272 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1274 ret
= ib_post_send_mad(msg
, NULL
);
1279 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1280 free
: cm_free_msg(msg
);
1283 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1284 struct cm_id_private
*cm_id_priv
)
1286 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1287 struct cm_timewait_info
*timewait_info
;
1288 struct cm_req_msg
*req_msg
;
1289 unsigned long flags
;
1291 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1293 /* Check for duplicate REQ and stale connections. */
1294 spin_lock_irqsave(&cm
.lock
, flags
);
1295 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1297 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1299 if (timewait_info
) {
1300 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1301 timewait_info
->work
.remote_id
);
1302 spin_unlock_irqrestore(&cm
.lock
, flags
);
1303 if (cur_cm_id_priv
) {
1304 cm_dup_req_handler(work
, cur_cm_id_priv
);
1305 cm_deref_id(cur_cm_id_priv
);
1307 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1308 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1313 /* Find matching listen request. */
1314 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1315 req_msg
->service_id
,
1316 req_msg
->private_data
);
1317 if (!listen_cm_id_priv
) {
1318 spin_unlock_irqrestore(&cm
.lock
, flags
);
1319 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1320 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1324 atomic_inc(&listen_cm_id_priv
->refcount
);
1325 atomic_inc(&cm_id_priv
->refcount
);
1326 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1327 atomic_inc(&cm_id_priv
->work_count
);
1328 spin_unlock_irqrestore(&cm
.lock
, flags
);
1329 return listen_cm_id_priv
;
1331 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1335 static int cm_req_handler(struct cm_work
*work
)
1337 struct ib_cm_id
*cm_id
;
1338 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1339 struct cm_req_msg
*req_msg
;
1342 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1344 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1346 return PTR_ERR(cm_id
);
1348 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1349 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1350 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1351 work
->mad_recv_wc
->recv_buf
.grh
,
1353 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1355 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1356 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1359 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1360 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1361 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1363 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1364 if (!listen_cm_id_priv
) {
1366 kfree(cm_id_priv
->timewait_info
);
1370 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1371 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1372 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1373 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1375 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1376 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1378 ib_get_cached_gid(work
->port
->cm_dev
->device
,
1379 work
->port
->port_num
, 0, &work
->path
[0].sgid
);
1380 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_GID
,
1381 &work
->path
[0].sgid
, sizeof work
->path
[0].sgid
,
1385 if (req_msg
->alt_local_lid
) {
1386 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1388 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_ALT_GID
,
1389 &work
->path
[0].sgid
,
1390 sizeof work
->path
[0].sgid
, NULL
, 0);
1394 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1395 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1396 cm_req_get_local_resp_timeout(req_msg
));
1397 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1398 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1399 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1400 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1401 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1402 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1403 cm_id_priv
->local_ack_timeout
=
1404 cm_req_get_primary_local_ack_timeout(req_msg
);
1405 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1406 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1407 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1409 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1410 cm_process_work(cm_id_priv
, work
);
1411 cm_deref_id(listen_cm_id_priv
);
1415 atomic_dec(&cm_id_priv
->refcount
);
1416 cm_deref_id(listen_cm_id_priv
);
1418 ib_destroy_cm_id(cm_id
);
1422 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1423 struct cm_id_private
*cm_id_priv
,
1424 struct ib_cm_rep_param
*param
)
1426 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1427 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1428 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1429 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1430 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1431 rep_msg
->resp_resources
= param
->responder_resources
;
1432 rep_msg
->initiator_depth
= param
->initiator_depth
;
1433 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1434 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1435 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1436 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1437 cm_rep_set_srq(rep_msg
, param
->srq
);
1438 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1440 if (param
->private_data
&& param
->private_data_len
)
1441 memcpy(rep_msg
->private_data
, param
->private_data
,
1442 param
->private_data_len
);
1445 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1446 struct ib_cm_rep_param
*param
)
1448 struct cm_id_private
*cm_id_priv
;
1449 struct ib_mad_send_buf
*msg
;
1450 struct cm_rep_msg
*rep_msg
;
1451 unsigned long flags
;
1454 if (param
->private_data
&&
1455 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1458 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1459 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1460 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1461 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1466 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1470 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1471 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1472 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1473 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1475 ret
= ib_post_send_mad(msg
, NULL
);
1477 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1482 cm_id
->state
= IB_CM_REP_SENT
;
1483 cm_id_priv
->msg
= msg
;
1484 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1485 cm_id_priv
->responder_resources
= param
->responder_resources
;
1486 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1487 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1489 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1492 EXPORT_SYMBOL(ib_send_cm_rep
);
1494 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1495 struct cm_id_private
*cm_id_priv
,
1496 const void *private_data
,
1497 u8 private_data_len
)
1499 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1500 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1501 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1503 if (private_data
&& private_data_len
)
1504 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1507 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1508 const void *private_data
,
1509 u8 private_data_len
)
1511 struct cm_id_private
*cm_id_priv
;
1512 struct ib_mad_send_buf
*msg
;
1513 unsigned long flags
;
1517 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1520 data
= cm_copy_private_data(private_data
, private_data_len
);
1522 return PTR_ERR(data
);
1524 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1525 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1526 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1527 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1532 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1536 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1537 private_data
, private_data_len
);
1539 ret
= ib_post_send_mad(msg
, NULL
);
1541 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1547 cm_id
->state
= IB_CM_ESTABLISHED
;
1548 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1549 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1552 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1556 EXPORT_SYMBOL(ib_send_cm_rtu
);
1558 static void cm_format_rep_event(struct cm_work
*work
)
1560 struct cm_rep_msg
*rep_msg
;
1561 struct ib_cm_rep_event_param
*param
;
1563 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1564 param
= &work
->cm_event
.param
.rep_rcvd
;
1565 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1566 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1567 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1568 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1569 param
->responder_resources
= rep_msg
->initiator_depth
;
1570 param
->initiator_depth
= rep_msg
->resp_resources
;
1571 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1572 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1573 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1574 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1575 param
->srq
= cm_rep_get_srq(rep_msg
);
1576 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1579 static void cm_dup_rep_handler(struct cm_work
*work
)
1581 struct cm_id_private
*cm_id_priv
;
1582 struct cm_rep_msg
*rep_msg
;
1583 struct ib_mad_send_buf
*msg
= NULL
;
1584 unsigned long flags
;
1587 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1588 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1589 rep_msg
->local_comm_id
);
1593 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1597 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1598 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1599 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1600 cm_id_priv
->private_data
,
1601 cm_id_priv
->private_data_len
);
1602 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1603 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1604 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1605 cm_id_priv
->private_data
,
1606 cm_id_priv
->private_data_len
);
1609 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1611 ret
= ib_post_send_mad(msg
, NULL
);
1616 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1617 free
: cm_free_msg(msg
);
1618 deref
: cm_deref_id(cm_id_priv
);
1621 static int cm_rep_handler(struct cm_work
*work
)
1623 struct cm_id_private
*cm_id_priv
;
1624 struct cm_rep_msg
*rep_msg
;
1625 unsigned long flags
;
1628 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1629 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1631 cm_dup_rep_handler(work
);
1635 cm_format_rep_event(work
);
1637 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1638 switch (cm_id_priv
->id
.state
) {
1639 case IB_CM_REQ_SENT
:
1640 case IB_CM_MRA_REQ_RCVD
:
1643 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1648 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1649 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1650 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1652 spin_lock(&cm
.lock
);
1653 /* Check for duplicate REP. */
1654 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1655 spin_unlock(&cm
.lock
);
1656 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1660 /* Check for a stale connection. */
1661 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1662 rb_erase(&cm_id_priv
->timewait_info
->remote_id_node
,
1663 &cm
.remote_id_table
);
1664 cm_id_priv
->timewait_info
->inserted_remote_id
= 0;
1665 spin_unlock(&cm
.lock
);
1666 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1667 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1668 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1673 spin_unlock(&cm
.lock
);
1675 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1676 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1677 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1678 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1679 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1680 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1681 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1683 /* todo: handle peer_to_peer */
1685 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1686 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1688 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1689 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1692 cm_process_work(cm_id_priv
, work
);
1694 cm_deref_id(cm_id_priv
);
1698 cm_deref_id(cm_id_priv
);
1702 static int cm_establish_handler(struct cm_work
*work
)
1704 struct cm_id_private
*cm_id_priv
;
1705 unsigned long flags
;
1708 /* See comment in ib_cm_establish about lookup. */
1709 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1713 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1714 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1715 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1719 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1720 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1722 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1723 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1726 cm_process_work(cm_id_priv
, work
);
1728 cm_deref_id(cm_id_priv
);
1731 cm_deref_id(cm_id_priv
);
1735 static int cm_rtu_handler(struct cm_work
*work
)
1737 struct cm_id_private
*cm_id_priv
;
1738 struct cm_rtu_msg
*rtu_msg
;
1739 unsigned long flags
;
1742 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1743 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1744 rtu_msg
->local_comm_id
);
1748 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1750 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1751 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1752 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1753 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1756 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1758 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1759 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1761 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1762 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1765 cm_process_work(cm_id_priv
, work
);
1767 cm_deref_id(cm_id_priv
);
1770 cm_deref_id(cm_id_priv
);
1774 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1775 struct cm_id_private
*cm_id_priv
,
1776 const void *private_data
,
1777 u8 private_data_len
)
1779 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1780 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1781 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1782 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1783 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1785 if (private_data
&& private_data_len
)
1786 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1789 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1790 const void *private_data
,
1791 u8 private_data_len
)
1793 struct cm_id_private
*cm_id_priv
;
1794 struct ib_mad_send_buf
*msg
;
1795 unsigned long flags
;
1798 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1801 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1802 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1803 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1808 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1810 cm_enter_timewait(cm_id_priv
);
1814 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1815 private_data
, private_data_len
);
1816 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1817 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1819 ret
= ib_post_send_mad(msg
, NULL
);
1821 cm_enter_timewait(cm_id_priv
);
1822 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1827 cm_id
->state
= IB_CM_DREQ_SENT
;
1828 cm_id_priv
->msg
= msg
;
1829 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1832 EXPORT_SYMBOL(ib_send_cm_dreq
);
1834 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1835 struct cm_id_private
*cm_id_priv
,
1836 const void *private_data
,
1837 u8 private_data_len
)
1839 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1840 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1841 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1843 if (private_data
&& private_data_len
)
1844 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1847 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1848 const void *private_data
,
1849 u8 private_data_len
)
1851 struct cm_id_private
*cm_id_priv
;
1852 struct ib_mad_send_buf
*msg
;
1853 unsigned long flags
;
1857 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1860 data
= cm_copy_private_data(private_data
, private_data_len
);
1862 return PTR_ERR(data
);
1864 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1865 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1866 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1867 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1872 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1873 cm_enter_timewait(cm_id_priv
);
1875 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1879 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1880 private_data
, private_data_len
);
1882 ret
= ib_post_send_mad(msg
, NULL
);
1884 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1889 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1892 EXPORT_SYMBOL(ib_send_cm_drep
);
1894 static int cm_dreq_handler(struct cm_work
*work
)
1896 struct cm_id_private
*cm_id_priv
;
1897 struct cm_dreq_msg
*dreq_msg
;
1898 struct ib_mad_send_buf
*msg
= NULL
;
1899 unsigned long flags
;
1902 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1903 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1904 dreq_msg
->local_comm_id
);
1908 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1910 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1911 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1914 switch (cm_id_priv
->id
.state
) {
1915 case IB_CM_REP_SENT
:
1916 case IB_CM_DREQ_SENT
:
1917 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1919 case IB_CM_ESTABLISHED
:
1920 case IB_CM_MRA_REP_RCVD
:
1922 case IB_CM_TIMEWAIT
:
1923 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1926 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1927 cm_id_priv
->private_data
,
1928 cm_id_priv
->private_data_len
);
1929 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1931 if (ib_post_send_mad(msg
, NULL
))
1937 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1938 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1939 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1941 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1942 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1945 cm_process_work(cm_id_priv
, work
);
1947 cm_deref_id(cm_id_priv
);
1950 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1951 deref
: cm_deref_id(cm_id_priv
);
1955 static int cm_drep_handler(struct cm_work
*work
)
1957 struct cm_id_private
*cm_id_priv
;
1958 struct cm_drep_msg
*drep_msg
;
1959 unsigned long flags
;
1962 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1963 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1964 drep_msg
->local_comm_id
);
1968 work
->cm_event
.private_data
= &drep_msg
->private_data
;
1970 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1971 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
1972 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
1973 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1976 cm_enter_timewait(cm_id_priv
);
1978 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1979 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1981 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1982 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1985 cm_process_work(cm_id_priv
, work
);
1987 cm_deref_id(cm_id_priv
);
1990 cm_deref_id(cm_id_priv
);
1994 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
1995 enum ib_cm_rej_reason reason
,
1998 const void *private_data
,
1999 u8 private_data_len
)
2001 struct cm_id_private
*cm_id_priv
;
2002 struct ib_mad_send_buf
*msg
;
2003 unsigned long flags
;
2006 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
2007 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
2010 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2012 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2013 switch (cm_id
->state
) {
2014 case IB_CM_REQ_SENT
:
2015 case IB_CM_MRA_REQ_RCVD
:
2016 case IB_CM_REQ_RCVD
:
2017 case IB_CM_MRA_REQ_SENT
:
2018 case IB_CM_REP_RCVD
:
2019 case IB_CM_MRA_REP_SENT
:
2020 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2022 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2023 cm_id_priv
, reason
, ari
, ari_length
,
2024 private_data
, private_data_len
);
2026 cm_reset_to_idle(cm_id_priv
);
2028 case IB_CM_REP_SENT
:
2029 case IB_CM_MRA_REP_RCVD
:
2030 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2032 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2033 cm_id_priv
, reason
, ari
, ari_length
,
2034 private_data
, private_data_len
);
2036 cm_enter_timewait(cm_id_priv
);
2046 ret
= ib_post_send_mad(msg
, NULL
);
2050 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2053 EXPORT_SYMBOL(ib_send_cm_rej
);
2055 static void cm_format_rej_event(struct cm_work
*work
)
2057 struct cm_rej_msg
*rej_msg
;
2058 struct ib_cm_rej_event_param
*param
;
2060 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2061 param
= &work
->cm_event
.param
.rej_rcvd
;
2062 param
->ari
= rej_msg
->ari
;
2063 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
2064 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
2065 work
->cm_event
.private_data
= &rej_msg
->private_data
;
2068 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
2070 struct cm_timewait_info
*timewait_info
;
2071 struct cm_id_private
*cm_id_priv
;
2072 unsigned long flags
;
2075 remote_id
= rej_msg
->local_comm_id
;
2077 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
2078 spin_lock_irqsave(&cm
.lock
, flags
);
2079 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
2081 if (!timewait_info
) {
2082 spin_unlock_irqrestore(&cm
.lock
, flags
);
2085 cm_id_priv
= idr_find(&cm
.local_id_table
,
2086 (__force
int) timewait_info
->work
.local_id
);
2088 if (cm_id_priv
->id
.remote_id
== remote_id
)
2089 atomic_inc(&cm_id_priv
->refcount
);
2093 spin_unlock_irqrestore(&cm
.lock
, flags
);
2094 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2095 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2097 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2102 static int cm_rej_handler(struct cm_work
*work
)
2104 struct cm_id_private
*cm_id_priv
;
2105 struct cm_rej_msg
*rej_msg
;
2106 unsigned long flags
;
2109 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2110 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2114 cm_format_rej_event(work
);
2116 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2117 switch (cm_id_priv
->id
.state
) {
2118 case IB_CM_REQ_SENT
:
2119 case IB_CM_MRA_REQ_RCVD
:
2120 case IB_CM_REP_SENT
:
2121 case IB_CM_MRA_REP_RCVD
:
2122 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2124 case IB_CM_REQ_RCVD
:
2125 case IB_CM_MRA_REQ_SENT
:
2126 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2127 cm_enter_timewait(cm_id_priv
);
2129 cm_reset_to_idle(cm_id_priv
);
2131 case IB_CM_DREQ_SENT
:
2132 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2134 case IB_CM_REP_RCVD
:
2135 case IB_CM_MRA_REP_SENT
:
2136 case IB_CM_ESTABLISHED
:
2137 cm_enter_timewait(cm_id_priv
);
2140 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2145 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2147 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2148 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2151 cm_process_work(cm_id_priv
, work
);
2153 cm_deref_id(cm_id_priv
);
2156 cm_deref_id(cm_id_priv
);
2160 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2162 const void *private_data
,
2163 u8 private_data_len
)
2165 struct cm_id_private
*cm_id_priv
;
2166 struct ib_mad_send_buf
*msg
;
2168 unsigned long flags
;
2171 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2174 data
= cm_copy_private_data(private_data
, private_data_len
);
2176 return PTR_ERR(data
);
2178 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2180 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2181 switch(cm_id_priv
->id
.state
) {
2182 case IB_CM_REQ_RCVD
:
2183 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2187 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2188 CM_MSG_RESPONSE_REQ
, service_timeout
,
2189 private_data
, private_data_len
);
2190 ret
= ib_post_send_mad(msg
, NULL
);
2193 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2195 case IB_CM_REP_RCVD
:
2196 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2200 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2201 CM_MSG_RESPONSE_REP
, service_timeout
,
2202 private_data
, private_data_len
);
2203 ret
= ib_post_send_mad(msg
, NULL
);
2206 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2208 case IB_CM_ESTABLISHED
:
2209 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2213 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2214 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2215 private_data
, private_data_len
);
2216 ret
= ib_post_send_mad(msg
, NULL
);
2219 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2225 cm_id_priv
->service_timeout
= service_timeout
;
2226 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2227 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2230 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2234 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2239 EXPORT_SYMBOL(ib_send_cm_mra
);
2241 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2243 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2244 case CM_MSG_RESPONSE_REQ
:
2245 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2246 case CM_MSG_RESPONSE_REP
:
2247 case CM_MSG_RESPONSE_OTHER
:
2248 return cm_acquire_id(mra_msg
->remote_comm_id
,
2249 mra_msg
->local_comm_id
);
2255 static int cm_mra_handler(struct cm_work
*work
)
2257 struct cm_id_private
*cm_id_priv
;
2258 struct cm_mra_msg
*mra_msg
;
2259 unsigned long flags
;
2262 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2263 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2267 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2268 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2269 cm_mra_get_service_timeout(mra_msg
);
2270 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2271 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2273 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2274 switch (cm_id_priv
->id
.state
) {
2275 case IB_CM_REQ_SENT
:
2276 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2277 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2278 cm_id_priv
->msg
, timeout
))
2280 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2282 case IB_CM_REP_SENT
:
2283 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2284 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2285 cm_id_priv
->msg
, timeout
))
2287 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2289 case IB_CM_ESTABLISHED
:
2290 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2291 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2292 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2293 cm_id_priv
->msg
, timeout
))
2295 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2301 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2302 cm_id_priv
->id
.state
;
2303 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2305 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2306 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2309 cm_process_work(cm_id_priv
, work
);
2311 cm_deref_id(cm_id_priv
);
2314 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2315 cm_deref_id(cm_id_priv
);
2319 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2320 struct cm_id_private
*cm_id_priv
,
2321 struct ib_sa_path_rec
*alternate_path
,
2322 const void *private_data
,
2323 u8 private_data_len
)
2325 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2326 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2327 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2328 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2329 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2330 /* todo: need remote CM response timeout */
2331 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2332 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2333 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2334 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2335 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2336 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2337 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2338 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2339 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2340 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2341 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2342 cm_lap_set_local_ack_timeout(lap_msg
,
2343 min(31, alternate_path
->packet_life_time
+ 1));
2345 if (private_data
&& private_data_len
)
2346 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2349 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2350 struct ib_sa_path_rec
*alternate_path
,
2351 const void *private_data
,
2352 u8 private_data_len
)
2354 struct cm_id_private
*cm_id_priv
;
2355 struct ib_mad_send_buf
*msg
;
2356 unsigned long flags
;
2359 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2362 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2363 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2364 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2365 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2370 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2374 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2375 alternate_path
, private_data
, private_data_len
);
2376 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2377 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2379 ret
= ib_post_send_mad(msg
, NULL
);
2381 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2386 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2387 cm_id_priv
->msg
= msg
;
2389 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2392 EXPORT_SYMBOL(ib_send_cm_lap
);
2394 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2395 struct cm_lap_msg
*lap_msg
)
2397 memset(path
, 0, sizeof *path
);
2398 path
->dgid
= lap_msg
->alt_local_gid
;
2399 path
->sgid
= lap_msg
->alt_remote_gid
;
2400 path
->dlid
= lap_msg
->alt_local_lid
;
2401 path
->slid
= lap_msg
->alt_remote_lid
;
2402 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2403 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2404 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2405 path
->reversible
= 1;
2406 /* pkey is same as in REQ */
2407 path
->sl
= cm_lap_get_sl(lap_msg
);
2408 path
->mtu_selector
= IB_SA_EQ
;
2409 /* mtu is same as in REQ */
2410 path
->rate_selector
= IB_SA_EQ
;
2411 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2412 path
->packet_life_time_selector
= IB_SA_EQ
;
2413 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2414 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2417 static int cm_lap_handler(struct cm_work
*work
)
2419 struct cm_id_private
*cm_id_priv
;
2420 struct cm_lap_msg
*lap_msg
;
2421 struct ib_cm_lap_event_param
*param
;
2422 struct ib_mad_send_buf
*msg
= NULL
;
2423 unsigned long flags
;
2426 /* todo: verify LAP request and send reject APR if invalid. */
2427 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2428 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2429 lap_msg
->local_comm_id
);
2433 param
= &work
->cm_event
.param
.lap_rcvd
;
2434 param
->alternate_path
= &work
->path
[0];
2435 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2436 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2438 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2439 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2442 switch (cm_id_priv
->id
.lap_state
) {
2443 case IB_CM_LAP_IDLE
:
2445 case IB_CM_MRA_LAP_SENT
:
2446 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2449 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2450 CM_MSG_RESPONSE_OTHER
,
2451 cm_id_priv
->service_timeout
,
2452 cm_id_priv
->private_data
,
2453 cm_id_priv
->private_data_len
);
2454 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2456 if (ib_post_send_mad(msg
, NULL
))
2463 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2464 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2465 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2467 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2468 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2471 cm_process_work(cm_id_priv
, work
);
2473 cm_deref_id(cm_id_priv
);
2476 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2477 deref
: cm_deref_id(cm_id_priv
);
2481 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2482 struct cm_id_private
*cm_id_priv
,
2483 enum ib_cm_apr_status status
,
2486 const void *private_data
,
2487 u8 private_data_len
)
2489 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2490 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2491 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2492 apr_msg
->ap_status
= (u8
) status
;
2494 if (info
&& info_length
) {
2495 apr_msg
->info_length
= info_length
;
2496 memcpy(apr_msg
->info
, info
, info_length
);
2499 if (private_data
&& private_data_len
)
2500 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2503 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2504 enum ib_cm_apr_status status
,
2507 const void *private_data
,
2508 u8 private_data_len
)
2510 struct cm_id_private
*cm_id_priv
;
2511 struct ib_mad_send_buf
*msg
;
2512 unsigned long flags
;
2515 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2516 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2519 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2520 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2521 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2522 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2523 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2528 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2532 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2533 info
, info_length
, private_data
, private_data_len
);
2534 ret
= ib_post_send_mad(msg
, NULL
);
2536 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2541 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2542 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2545 EXPORT_SYMBOL(ib_send_cm_apr
);
2547 static int cm_apr_handler(struct cm_work
*work
)
2549 struct cm_id_private
*cm_id_priv
;
2550 struct cm_apr_msg
*apr_msg
;
2551 unsigned long flags
;
2554 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2555 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2556 apr_msg
->local_comm_id
);
2558 return -EINVAL
; /* Unmatched reply. */
2560 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2561 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2562 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2563 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2565 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2566 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2567 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2568 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2569 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2572 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2573 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2574 cm_id_priv
->msg
= NULL
;
2576 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2578 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2579 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2582 cm_process_work(cm_id_priv
, work
);
2584 cm_deref_id(cm_id_priv
);
2587 cm_deref_id(cm_id_priv
);
2591 static int cm_timewait_handler(struct cm_work
*work
)
2593 struct cm_timewait_info
*timewait_info
;
2594 struct cm_id_private
*cm_id_priv
;
2595 unsigned long flags
;
2598 timewait_info
= (struct cm_timewait_info
*)work
;
2599 cm_cleanup_timewait(timewait_info
);
2601 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2602 timewait_info
->work
.remote_id
);
2606 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2607 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2608 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2609 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2612 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2613 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2615 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2616 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2619 cm_process_work(cm_id_priv
, work
);
2621 cm_deref_id(cm_id_priv
);
2624 cm_deref_id(cm_id_priv
);
2628 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2629 struct cm_id_private
*cm_id_priv
,
2630 struct ib_cm_sidr_req_param
*param
)
2632 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2633 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2634 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2635 sidr_req_msg
->pkey
= cpu_to_be16(param
->path
->pkey
);
2636 sidr_req_msg
->service_id
= param
->service_id
;
2638 if (param
->private_data
&& param
->private_data_len
)
2639 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2640 param
->private_data_len
);
2643 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2644 struct ib_cm_sidr_req_param
*param
)
2646 struct cm_id_private
*cm_id_priv
;
2647 struct ib_mad_send_buf
*msg
;
2648 unsigned long flags
;
2651 if (!param
->path
|| (param
->private_data
&&
2652 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2655 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2656 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2660 cm_id
->service_id
= param
->service_id
;
2661 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2662 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2663 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2664 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2668 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2670 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2671 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2673 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2674 if (cm_id
->state
== IB_CM_IDLE
)
2675 ret
= ib_post_send_mad(msg
, NULL
);
2680 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2684 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2685 cm_id_priv
->msg
= msg
;
2686 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2690 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2692 static void cm_format_sidr_req_event(struct cm_work
*work
,
2693 struct ib_cm_id
*listen_id
)
2695 struct cm_sidr_req_msg
*sidr_req_msg
;
2696 struct ib_cm_sidr_req_event_param
*param
;
2698 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2699 work
->mad_recv_wc
->recv_buf
.mad
;
2700 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2701 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2702 param
->listen_id
= listen_id
;
2703 param
->port
= work
->port
->port_num
;
2704 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2707 static int cm_sidr_req_handler(struct cm_work
*work
)
2709 struct ib_cm_id
*cm_id
;
2710 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2711 struct cm_sidr_req_msg
*sidr_req_msg
;
2713 unsigned long flags
;
2715 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2717 return PTR_ERR(cm_id
);
2718 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2720 /* Record SGID/SLID and request ID for lookup. */
2721 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2722 work
->mad_recv_wc
->recv_buf
.mad
;
2723 wc
= work
->mad_recv_wc
->wc
;
2724 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2725 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2726 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2727 work
->mad_recv_wc
->recv_buf
.grh
,
2729 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2730 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2731 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2732 atomic_inc(&cm_id_priv
->work_count
);
2734 spin_lock_irqsave(&cm
.lock
, flags
);
2735 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2736 if (cur_cm_id_priv
) {
2737 spin_unlock_irqrestore(&cm
.lock
, flags
);
2738 goto out
; /* Duplicate message. */
2740 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2741 sidr_req_msg
->service_id
,
2742 sidr_req_msg
->private_data
);
2743 if (!cur_cm_id_priv
) {
2744 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2745 spin_unlock_irqrestore(&cm
.lock
, flags
);
2746 /* todo: reply with no match */
2747 goto out
; /* No match. */
2749 atomic_inc(&cur_cm_id_priv
->refcount
);
2750 spin_unlock_irqrestore(&cm
.lock
, flags
);
2752 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2753 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2754 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2755 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2757 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2758 cm_process_work(cm_id_priv
, work
);
2759 cm_deref_id(cur_cm_id_priv
);
2762 ib_destroy_cm_id(&cm_id_priv
->id
);
2766 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2767 struct cm_id_private
*cm_id_priv
,
2768 struct ib_cm_sidr_rep_param
*param
)
2770 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2772 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2773 sidr_rep_msg
->status
= param
->status
;
2774 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2775 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2776 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2778 if (param
->info
&& param
->info_length
)
2779 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2781 if (param
->private_data
&& param
->private_data_len
)
2782 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2783 param
->private_data_len
);
2786 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2787 struct ib_cm_sidr_rep_param
*param
)
2789 struct cm_id_private
*cm_id_priv
;
2790 struct ib_mad_send_buf
*msg
;
2791 unsigned long flags
;
2794 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2795 (param
->private_data
&&
2796 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2799 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2800 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2801 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2806 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2810 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2812 ret
= ib_post_send_mad(msg
, NULL
);
2814 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2818 cm_id
->state
= IB_CM_IDLE
;
2819 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2821 spin_lock_irqsave(&cm
.lock
, flags
);
2822 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2823 spin_unlock_irqrestore(&cm
.lock
, flags
);
2826 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2829 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2831 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2833 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2834 struct ib_cm_sidr_rep_event_param
*param
;
2836 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2837 work
->mad_recv_wc
->recv_buf
.mad
;
2838 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2839 param
->status
= sidr_rep_msg
->status
;
2840 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2841 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2842 param
->info
= &sidr_rep_msg
->info
;
2843 param
->info_len
= sidr_rep_msg
->info_length
;
2844 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2847 static int cm_sidr_rep_handler(struct cm_work
*work
)
2849 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2850 struct cm_id_private
*cm_id_priv
;
2851 unsigned long flags
;
2853 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2854 work
->mad_recv_wc
->recv_buf
.mad
;
2855 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2857 return -EINVAL
; /* Unmatched reply. */
2859 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2860 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2861 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2864 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2865 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2866 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2868 cm_format_sidr_rep_event(work
);
2869 cm_process_work(cm_id_priv
, work
);
2872 cm_deref_id(cm_id_priv
);
2876 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2877 enum ib_wc_status wc_status
)
2879 struct cm_id_private
*cm_id_priv
;
2880 struct ib_cm_event cm_event
;
2881 enum ib_cm_state state
;
2882 unsigned long flags
;
2885 memset(&cm_event
, 0, sizeof cm_event
);
2886 cm_id_priv
= msg
->context
[0];
2888 /* Discard old sends or ones without a response. */
2889 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2890 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2891 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2895 case IB_CM_REQ_SENT
:
2896 case IB_CM_MRA_REQ_RCVD
:
2897 cm_reset_to_idle(cm_id_priv
);
2898 cm_event
.event
= IB_CM_REQ_ERROR
;
2900 case IB_CM_REP_SENT
:
2901 case IB_CM_MRA_REP_RCVD
:
2902 cm_reset_to_idle(cm_id_priv
);
2903 cm_event
.event
= IB_CM_REP_ERROR
;
2905 case IB_CM_DREQ_SENT
:
2906 cm_enter_timewait(cm_id_priv
);
2907 cm_event
.event
= IB_CM_DREQ_ERROR
;
2909 case IB_CM_SIDR_REQ_SENT
:
2910 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2911 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2916 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2917 cm_event
.param
.send_status
= wc_status
;
2919 /* No other events can occur on the cm_id at this point. */
2920 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2923 ib_destroy_cm_id(&cm_id_priv
->id
);
2926 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2930 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2931 struct ib_mad_send_wc
*mad_send_wc
)
2933 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2935 switch (mad_send_wc
->status
) {
2937 case IB_WC_WR_FLUSH_ERR
:
2941 if (msg
->context
[0] && msg
->context
[1])
2942 cm_process_send_error(msg
, mad_send_wc
->status
);
2949 static void cm_work_handler(void *data
)
2951 struct cm_work
*work
= data
;
2954 switch (work
->cm_event
.event
) {
2955 case IB_CM_REQ_RECEIVED
:
2956 ret
= cm_req_handler(work
);
2958 case IB_CM_MRA_RECEIVED
:
2959 ret
= cm_mra_handler(work
);
2961 case IB_CM_REJ_RECEIVED
:
2962 ret
= cm_rej_handler(work
);
2964 case IB_CM_REP_RECEIVED
:
2965 ret
= cm_rep_handler(work
);
2967 case IB_CM_RTU_RECEIVED
:
2968 ret
= cm_rtu_handler(work
);
2970 case IB_CM_USER_ESTABLISHED
:
2971 ret
= cm_establish_handler(work
);
2973 case IB_CM_DREQ_RECEIVED
:
2974 ret
= cm_dreq_handler(work
);
2976 case IB_CM_DREP_RECEIVED
:
2977 ret
= cm_drep_handler(work
);
2979 case IB_CM_SIDR_REQ_RECEIVED
:
2980 ret
= cm_sidr_req_handler(work
);
2982 case IB_CM_SIDR_REP_RECEIVED
:
2983 ret
= cm_sidr_rep_handler(work
);
2985 case IB_CM_LAP_RECEIVED
:
2986 ret
= cm_lap_handler(work
);
2988 case IB_CM_APR_RECEIVED
:
2989 ret
= cm_apr_handler(work
);
2991 case IB_CM_TIMEWAIT_EXIT
:
2992 ret
= cm_timewait_handler(work
);
3002 int ib_cm_establish(struct ib_cm_id
*cm_id
)
3004 struct cm_id_private
*cm_id_priv
;
3005 struct cm_work
*work
;
3006 unsigned long flags
;
3009 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
3013 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3014 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3015 switch (cm_id
->state
)
3017 case IB_CM_REP_SENT
:
3018 case IB_CM_MRA_REP_RCVD
:
3019 cm_id
->state
= IB_CM_ESTABLISHED
;
3021 case IB_CM_ESTABLISHED
:
3028 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3036 * The CM worker thread may try to destroy the cm_id before it
3037 * can execute this work item. To prevent potential deadlock,
3038 * we need to find the cm_id once we're in the context of the
3039 * worker thread, rather than holding a reference on it.
3041 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3042 work
->local_id
= cm_id
->local_id
;
3043 work
->remote_id
= cm_id
->remote_id
;
3044 work
->mad_recv_wc
= NULL
;
3045 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
3046 queue_work(cm
.wq
, &work
->work
);
3050 EXPORT_SYMBOL(ib_cm_establish
);
3052 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
3053 struct ib_mad_recv_wc
*mad_recv_wc
)
3055 struct cm_work
*work
;
3056 enum ib_cm_event_type event
;
3059 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
3060 case CM_REQ_ATTR_ID
:
3061 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
3062 alt_local_lid
!= 0);
3063 event
= IB_CM_REQ_RECEIVED
;
3065 case CM_MRA_ATTR_ID
:
3066 event
= IB_CM_MRA_RECEIVED
;
3068 case CM_REJ_ATTR_ID
:
3069 event
= IB_CM_REJ_RECEIVED
;
3071 case CM_REP_ATTR_ID
:
3072 event
= IB_CM_REP_RECEIVED
;
3074 case CM_RTU_ATTR_ID
:
3075 event
= IB_CM_RTU_RECEIVED
;
3077 case CM_DREQ_ATTR_ID
:
3078 event
= IB_CM_DREQ_RECEIVED
;
3080 case CM_DREP_ATTR_ID
:
3081 event
= IB_CM_DREP_RECEIVED
;
3083 case CM_SIDR_REQ_ATTR_ID
:
3084 event
= IB_CM_SIDR_REQ_RECEIVED
;
3086 case CM_SIDR_REP_ATTR_ID
:
3087 event
= IB_CM_SIDR_REP_RECEIVED
;
3089 case CM_LAP_ATTR_ID
:
3091 event
= IB_CM_LAP_RECEIVED
;
3093 case CM_APR_ATTR_ID
:
3094 event
= IB_CM_APR_RECEIVED
;
3097 ib_free_recv_mad(mad_recv_wc
);
3101 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3104 ib_free_recv_mad(mad_recv_wc
);
3108 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3109 work
->cm_event
.event
= event
;
3110 work
->mad_recv_wc
= mad_recv_wc
;
3111 work
->port
= (struct cm_port
*)mad_agent
->context
;
3112 queue_work(cm
.wq
, &work
->work
);
3115 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3116 struct ib_qp_attr
*qp_attr
,
3119 unsigned long flags
;
3122 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3123 switch (cm_id_priv
->id
.state
) {
3124 case IB_CM_REQ_SENT
:
3125 case IB_CM_MRA_REQ_RCVD
:
3126 case IB_CM_REQ_RCVD
:
3127 case IB_CM_MRA_REQ_SENT
:
3128 case IB_CM_REP_RCVD
:
3129 case IB_CM_MRA_REP_SENT
:
3130 case IB_CM_REP_SENT
:
3131 case IB_CM_MRA_REP_RCVD
:
3132 case IB_CM_ESTABLISHED
:
3133 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3134 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3135 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
3136 IB_ACCESS_REMOTE_WRITE
;
3137 if (cm_id_priv
->responder_resources
)
3138 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
|
3139 IB_ACCESS_REMOTE_ATOMIC
;
3140 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3141 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3148 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3152 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3153 struct ib_qp_attr
*qp_attr
,
3156 unsigned long flags
;
3159 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3160 switch (cm_id_priv
->id
.state
) {
3161 case IB_CM_REQ_RCVD
:
3162 case IB_CM_MRA_REQ_SENT
:
3163 case IB_CM_REP_RCVD
:
3164 case IB_CM_MRA_REP_SENT
:
3165 case IB_CM_REP_SENT
:
3166 case IB_CM_MRA_REP_RCVD
:
3167 case IB_CM_ESTABLISHED
:
3168 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3169 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3170 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3171 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3172 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3173 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3174 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3175 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3176 IB_QP_MIN_RNR_TIMER
;
3177 qp_attr
->max_dest_rd_atomic
=
3178 cm_id_priv
->responder_resources
;
3179 qp_attr
->min_rnr_timer
= 0;
3181 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3182 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3183 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3184 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3192 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3196 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3197 struct ib_qp_attr
*qp_attr
,
3200 unsigned long flags
;
3203 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3204 switch (cm_id_priv
->id
.state
) {
3205 case IB_CM_REP_RCVD
:
3206 case IB_CM_MRA_REP_SENT
:
3207 case IB_CM_REP_SENT
:
3208 case IB_CM_MRA_REP_RCVD
:
3209 case IB_CM_ESTABLISHED
:
3210 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3211 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3212 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3213 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3215 IB_QP_MAX_QP_RD_ATOMIC
;
3216 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3217 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3218 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3219 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3221 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3222 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3223 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3231 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3235 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3236 struct ib_qp_attr
*qp_attr
,
3239 struct cm_id_private
*cm_id_priv
;
3242 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3243 switch (qp_attr
->qp_state
) {
3245 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3248 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3251 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3259 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3261 static void cm_add_one(struct ib_device
*device
)
3263 struct cm_device
*cm_dev
;
3264 struct cm_port
*port
;
3265 struct ib_mad_reg_req reg_req
= {
3266 .mgmt_class
= IB_MGMT_CLASS_CM
,
3267 .mgmt_class_version
= IB_CM_CLASS_VERSION
3269 struct ib_port_modify port_modify
= {
3270 .set_port_cap_mask
= IB_PORT_CM_SUP
3272 unsigned long flags
;
3276 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3277 device
->phys_port_cnt
, GFP_KERNEL
);
3281 cm_dev
->device
= device
;
3282 cm_dev
->ca_guid
= device
->node_guid
;
3284 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3285 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3286 port
= &cm_dev
->port
[i
-1];
3287 port
->cm_dev
= cm_dev
;
3289 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3296 if (IS_ERR(port
->mad_agent
))
3299 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3303 ib_set_client_data(device
, &cm_client
, cm_dev
);
3305 write_lock_irqsave(&cm
.device_lock
, flags
);
3306 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3307 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3311 ib_unregister_mad_agent(port
->mad_agent
);
3313 port_modify
.set_port_cap_mask
= 0;
3314 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3316 port
= &cm_dev
->port
[i
-1];
3317 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3318 ib_unregister_mad_agent(port
->mad_agent
);
3323 static void cm_remove_one(struct ib_device
*device
)
3325 struct cm_device
*cm_dev
;
3326 struct cm_port
*port
;
3327 struct ib_port_modify port_modify
= {
3328 .clr_port_cap_mask
= IB_PORT_CM_SUP
3330 unsigned long flags
;
3333 cm_dev
= ib_get_client_data(device
, &cm_client
);
3337 write_lock_irqsave(&cm
.device_lock
, flags
);
3338 list_del(&cm_dev
->list
);
3339 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3341 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3342 port
= &cm_dev
->port
[i
-1];
3343 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3344 ib_unregister_mad_agent(port
->mad_agent
);
3349 static int __init
ib_cm_init(void)
3353 memset(&cm
, 0, sizeof cm
);
3354 INIT_LIST_HEAD(&cm
.device_list
);
3355 rwlock_init(&cm
.device_lock
);
3356 spin_lock_init(&cm
.lock
);
3357 cm
.listen_service_table
= RB_ROOT
;
3358 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3359 cm
.remote_id_table
= RB_ROOT
;
3360 cm
.remote_qp_table
= RB_ROOT
;
3361 cm
.remote_sidr_table
= RB_ROOT
;
3362 idr_init(&cm
.local_id_table
);
3363 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3365 cm
.wq
= create_workqueue("ib_cm");
3369 ret
= ib_register_client(&cm_client
);
3375 destroy_workqueue(cm
.wq
);
3379 static void __exit
ib_cm_cleanup(void)
3381 destroy_workqueue(cm
.wq
);
3382 ib_unregister_client(&cm_client
);
3383 idr_destroy(&cm
.local_id_table
);
3386 module_init(ib_cm_init
);
3387 module_exit(ib_cm_cleanup
);