2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/random.h>
45 #include <linux/rbtree.h>
46 #include <linux/spinlock.h>
47 #include <linux/workqueue.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("InfiniBand CM");
55 MODULE_LICENSE("Dual BSD/GPL");
57 static void cm_add_one(struct ib_device
*device
);
58 static void cm_remove_one(struct ib_device
*device
);
60 static struct ib_client cm_client
= {
63 .remove
= cm_remove_one
68 struct list_head device_list
;
70 struct rb_root listen_service_table
;
71 u64 listen_service_id
;
72 /* struct rb_root peer_service_table; todo: fix peer to peer */
73 struct rb_root remote_qp_table
;
74 struct rb_root remote_id_table
;
75 struct rb_root remote_sidr_table
;
76 struct idr local_id_table
;
77 __be32 random_id_operand
;
78 struct list_head timewait_list
;
79 struct workqueue_struct
*wq
;
83 struct cm_device
*cm_dev
;
84 struct ib_mad_agent
*mad_agent
;
89 struct list_head list
;
90 struct ib_device
*device
;
92 struct cm_port port
[0];
98 struct ib_ah_attr ah_attr
;
104 struct work_struct work
;
105 struct list_head list
;
106 struct cm_port
*port
;
107 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
108 __be32 local_id
; /* Established / timewait */
110 struct ib_cm_event cm_event
;
111 struct ib_sa_path_rec path
[0];
114 struct cm_timewait_info
{
115 struct cm_work work
; /* Must be first. */
116 struct list_head list
;
117 struct rb_node remote_qp_node
;
118 struct rb_node remote_id_node
;
119 __be64 remote_ca_guid
;
121 u8 inserted_remote_qp
;
122 u8 inserted_remote_id
;
125 struct cm_id_private
{
128 struct rb_node service_node
;
129 struct rb_node sidr_id_node
;
130 spinlock_t lock
; /* Do not acquire inside cm.lock */
131 struct completion comp
;
134 struct ib_mad_send_buf
*msg
;
135 struct cm_timewait_info
*timewait_info
;
136 /* todo: use alternate port on send failure */
139 struct ib_cm_compare_data
*compare_data
;
145 enum ib_qp_type qp_type
;
149 enum ib_mtu path_mtu
;
153 u8 responder_resources
;
155 u8 local_ack_timeout
;
160 struct list_head work_list
;
164 static void cm_work_handler(void *data
);
166 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
168 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
169 complete(&cm_id_priv
->comp
);
172 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
173 struct ib_mad_send_buf
**msg
)
175 struct ib_mad_agent
*mad_agent
;
176 struct ib_mad_send_buf
*m
;
179 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
180 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
184 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
185 cm_id_priv
->av
.pkey_index
,
186 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
193 /* Timeout set by caller if response is expected. */
195 m
->retries
= cm_id_priv
->max_cm_retries
;
197 atomic_inc(&cm_id_priv
->refcount
);
198 m
->context
[0] = cm_id_priv
;
203 static int cm_alloc_response_msg(struct cm_port
*port
,
204 struct ib_mad_recv_wc
*mad_recv_wc
,
205 struct ib_mad_send_buf
**msg
)
207 struct ib_mad_send_buf
*m
;
210 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
211 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
215 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
216 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
227 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
229 ib_destroy_ah(msg
->ah
);
231 cm_deref_id(msg
->context
[0]);
232 ib_free_send_mad(msg
);
235 static void * cm_copy_private_data(const void *private_data
,
240 if (!private_data
|| !private_data_len
)
243 data
= kmalloc(private_data_len
, GFP_KERNEL
);
245 return ERR_PTR(-ENOMEM
);
247 memcpy(data
, private_data
, private_data_len
);
251 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
252 void *private_data
, u8 private_data_len
)
254 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
255 kfree(cm_id_priv
->private_data
);
257 cm_id_priv
->private_data
= private_data
;
258 cm_id_priv
->private_data_len
= private_data_len
;
261 static void cm_init_av_for_response(struct cm_port
*port
, struct ib_wc
*wc
,
262 struct ib_grh
*grh
, struct cm_av
*av
)
265 av
->pkey_index
= wc
->pkey_index
;
266 ib_init_ah_from_wc(port
->cm_dev
->device
, port
->port_num
, wc
,
270 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
272 struct cm_device
*cm_dev
;
273 struct cm_port
*port
= NULL
;
278 read_lock_irqsave(&cm
.device_lock
, flags
);
279 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
280 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
282 port
= &cm_dev
->port
[p
-1];
286 read_unlock_irqrestore(&cm
.device_lock
, flags
);
291 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
292 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
297 ib_init_ah_from_path(cm_dev
->device
, port
->port_num
, path
,
299 av
->packet_life_time
= path
->packet_life_time
;
303 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
310 spin_lock_irqsave(&cm
.lock
, flags
);
311 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
,
313 spin_unlock_irqrestore(&cm
.lock
, flags
);
314 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
316 cm_id_priv
->id
.local_id
= (__force __be32
) (id
^ cm
.random_id_operand
);
320 static void cm_free_id(__be32 local_id
)
324 spin_lock_irqsave(&cm
.lock
, flags
);
325 idr_remove(&cm
.local_id_table
,
326 (__force
int) (local_id
^ cm
.random_id_operand
));
327 spin_unlock_irqrestore(&cm
.lock
, flags
);
330 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
332 struct cm_id_private
*cm_id_priv
;
334 cm_id_priv
= idr_find(&cm
.local_id_table
,
335 (__force
int) (local_id
^ cm
.random_id_operand
));
337 if (cm_id_priv
->id
.remote_id
== remote_id
)
338 atomic_inc(&cm_id_priv
->refcount
);
346 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
348 struct cm_id_private
*cm_id_priv
;
351 spin_lock_irqsave(&cm
.lock
, flags
);
352 cm_id_priv
= cm_get_id(local_id
, remote_id
);
353 spin_unlock_irqrestore(&cm
.lock
, flags
);
358 static void cm_mask_copy(u8
*dst
, u8
*src
, u8
*mask
)
362 for (i
= 0; i
< IB_CM_COMPARE_SIZE
/ sizeof(unsigned long); i
++)
363 ((unsigned long *) dst
)[i
] = ((unsigned long *) src
)[i
] &
364 ((unsigned long *) mask
)[i
];
367 static int cm_compare_data(struct ib_cm_compare_data
*src_data
,
368 struct ib_cm_compare_data
*dst_data
)
370 u8 src
[IB_CM_COMPARE_SIZE
];
371 u8 dst
[IB_CM_COMPARE_SIZE
];
373 if (!src_data
|| !dst_data
)
376 cm_mask_copy(src
, src_data
->data
, dst_data
->mask
);
377 cm_mask_copy(dst
, dst_data
->data
, src_data
->mask
);
378 return memcmp(src
, dst
, IB_CM_COMPARE_SIZE
);
381 static int cm_compare_private_data(u8
*private_data
,
382 struct ib_cm_compare_data
*dst_data
)
384 u8 src
[IB_CM_COMPARE_SIZE
];
389 cm_mask_copy(src
, private_data
, dst_data
->mask
);
390 return memcmp(src
, dst_data
->data
, IB_CM_COMPARE_SIZE
);
393 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
395 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
396 struct rb_node
*parent
= NULL
;
397 struct cm_id_private
*cur_cm_id_priv
;
398 __be64 service_id
= cm_id_priv
->id
.service_id
;
399 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
404 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
406 data_cmp
= cm_compare_data(cm_id_priv
->compare_data
,
407 cur_cm_id_priv
->compare_data
);
408 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
409 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
410 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
) &&
412 return cur_cm_id_priv
;
414 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
415 link
= &(*link
)->rb_left
;
416 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
417 link
= &(*link
)->rb_right
;
418 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
419 link
= &(*link
)->rb_left
;
420 else if (service_id
> cur_cm_id_priv
->id
.service_id
)
421 link
= &(*link
)->rb_right
;
422 else if (data_cmp
< 0)
423 link
= &(*link
)->rb_left
;
425 link
= &(*link
)->rb_right
;
427 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
428 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
432 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
436 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
437 struct cm_id_private
*cm_id_priv
;
441 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
442 data_cmp
= cm_compare_private_data(private_data
,
443 cm_id_priv
->compare_data
);
444 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
445 cm_id_priv
->id
.service_id
&&
446 (cm_id_priv
->id
.device
== device
) && !data_cmp
)
449 if (device
< cm_id_priv
->id
.device
)
450 node
= node
->rb_left
;
451 else if (device
> cm_id_priv
->id
.device
)
452 node
= node
->rb_right
;
453 else if (service_id
< cm_id_priv
->id
.service_id
)
454 node
= node
->rb_left
;
455 else if (service_id
> cm_id_priv
->id
.service_id
)
456 node
= node
->rb_right
;
457 else if (data_cmp
< 0)
458 node
= node
->rb_left
;
460 node
= node
->rb_right
;
465 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
468 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
469 struct rb_node
*parent
= NULL
;
470 struct cm_timewait_info
*cur_timewait_info
;
471 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
472 __be32 remote_id
= timewait_info
->work
.remote_id
;
476 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
478 if (remote_id
< cur_timewait_info
->work
.remote_id
)
479 link
= &(*link
)->rb_left
;
480 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
481 link
= &(*link
)->rb_right
;
482 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
483 link
= &(*link
)->rb_left
;
484 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
485 link
= &(*link
)->rb_right
;
487 return cur_timewait_info
;
489 timewait_info
->inserted_remote_id
= 1;
490 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
491 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
495 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
498 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
499 struct cm_timewait_info
*timewait_info
;
502 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
504 if (remote_id
< timewait_info
->work
.remote_id
)
505 node
= node
->rb_left
;
506 else if (remote_id
> timewait_info
->work
.remote_id
)
507 node
= node
->rb_right
;
508 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
509 node
= node
->rb_left
;
510 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
511 node
= node
->rb_right
;
513 return timewait_info
;
518 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
521 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
522 struct rb_node
*parent
= NULL
;
523 struct cm_timewait_info
*cur_timewait_info
;
524 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
525 __be32 remote_qpn
= timewait_info
->remote_qpn
;
529 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
531 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
532 link
= &(*link
)->rb_left
;
533 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
534 link
= &(*link
)->rb_right
;
535 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
536 link
= &(*link
)->rb_left
;
537 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
538 link
= &(*link
)->rb_right
;
540 return cur_timewait_info
;
542 timewait_info
->inserted_remote_qp
= 1;
543 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
544 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
548 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
551 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
552 struct rb_node
*parent
= NULL
;
553 struct cm_id_private
*cur_cm_id_priv
;
554 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
555 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
559 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
561 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
562 link
= &(*link
)->rb_left
;
563 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
564 link
= &(*link
)->rb_right
;
567 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
570 link
= &(*link
)->rb_left
;
572 link
= &(*link
)->rb_right
;
574 return cur_cm_id_priv
;
577 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
578 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
582 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
583 enum ib_cm_sidr_status status
)
585 struct ib_cm_sidr_rep_param param
;
587 memset(¶m
, 0, sizeof param
);
588 param
.status
= status
;
589 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
592 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
593 ib_cm_handler cm_handler
,
596 struct cm_id_private
*cm_id_priv
;
599 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
601 return ERR_PTR(-ENOMEM
);
603 cm_id_priv
->id
.state
= IB_CM_IDLE
;
604 cm_id_priv
->id
.device
= device
;
605 cm_id_priv
->id
.cm_handler
= cm_handler
;
606 cm_id_priv
->id
.context
= context
;
607 cm_id_priv
->id
.remote_cm_qpn
= 1;
608 ret
= cm_alloc_id(cm_id_priv
);
612 spin_lock_init(&cm_id_priv
->lock
);
613 init_completion(&cm_id_priv
->comp
);
614 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
615 atomic_set(&cm_id_priv
->work_count
, -1);
616 atomic_set(&cm_id_priv
->refcount
, 1);
617 return &cm_id_priv
->id
;
621 return ERR_PTR(-ENOMEM
);
623 EXPORT_SYMBOL(ib_create_cm_id
);
625 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
627 struct cm_work
*work
;
629 if (list_empty(&cm_id_priv
->work_list
))
632 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
633 list_del(&work
->list
);
637 static void cm_free_work(struct cm_work
*work
)
639 if (work
->mad_recv_wc
)
640 ib_free_recv_mad(work
->mad_recv_wc
);
644 static inline int cm_convert_to_ms(int iba_time
)
646 /* approximate conversion to ms from 4.096us x 2^iba_time */
647 return 1 << max(iba_time
- 8, 0);
650 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
652 if (timewait_info
->inserted_remote_id
) {
653 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
654 timewait_info
->inserted_remote_id
= 0;
657 if (timewait_info
->inserted_remote_qp
) {
658 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
659 timewait_info
->inserted_remote_qp
= 0;
663 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
665 struct cm_timewait_info
*timewait_info
;
667 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
669 return ERR_PTR(-ENOMEM
);
671 timewait_info
->work
.local_id
= local_id
;
672 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
673 &timewait_info
->work
);
674 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
675 return timewait_info
;
678 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
683 spin_lock_irqsave(&cm
.lock
, flags
);
684 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
685 list_add_tail(&cm_id_priv
->timewait_info
->list
, &cm
.timewait_list
);
686 spin_unlock_irqrestore(&cm
.lock
, flags
);
689 * The cm_id could be destroyed by the user before we exit timewait.
690 * To protect against this, we search for the cm_id after exiting
691 * timewait before notifying the user that we've exited timewait.
693 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
694 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
695 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
696 msecs_to_jiffies(wait_time
));
697 cm_id_priv
->timewait_info
= NULL
;
700 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
704 cm_id_priv
->id
.state
= IB_CM_IDLE
;
705 if (cm_id_priv
->timewait_info
) {
706 spin_lock_irqsave(&cm
.lock
, flags
);
707 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
708 spin_unlock_irqrestore(&cm
.lock
, flags
);
709 kfree(cm_id_priv
->timewait_info
);
710 cm_id_priv
->timewait_info
= NULL
;
714 static void cm_destroy_id(struct ib_cm_id
*cm_id
, int err
)
716 struct cm_id_private
*cm_id_priv
;
717 struct cm_work
*work
;
720 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
722 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
723 switch (cm_id
->state
) {
725 cm_id
->state
= IB_CM_IDLE
;
726 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
727 spin_lock_irqsave(&cm
.lock
, flags
);
728 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
729 spin_unlock_irqrestore(&cm
.lock
, flags
);
731 case IB_CM_SIDR_REQ_SENT
:
732 cm_id
->state
= IB_CM_IDLE
;
733 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
734 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
736 case IB_CM_SIDR_REQ_RCVD
:
737 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
738 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
741 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
742 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
743 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
744 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
745 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
749 if (err
== -ENOMEM
) {
750 /* Do not reject to allow future retries. */
751 cm_reset_to_idle(cm_id_priv
);
752 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
754 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
755 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
759 case IB_CM_MRA_REQ_RCVD
:
761 case IB_CM_MRA_REP_RCVD
:
762 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
764 case IB_CM_MRA_REQ_SENT
:
766 case IB_CM_MRA_REP_SENT
:
767 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
768 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
771 case IB_CM_ESTABLISHED
:
772 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
773 ib_send_cm_dreq(cm_id
, NULL
, 0);
775 case IB_CM_DREQ_SENT
:
776 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
777 cm_enter_timewait(cm_id_priv
);
778 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
780 case IB_CM_DREQ_RCVD
:
781 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
782 ib_send_cm_drep(cm_id
, NULL
, 0);
785 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
789 cm_free_id(cm_id
->local_id
);
790 cm_deref_id(cm_id_priv
);
791 wait_for_completion(&cm_id_priv
->comp
);
792 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
794 kfree(cm_id_priv
->compare_data
);
795 kfree(cm_id_priv
->private_data
);
799 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
801 cm_destroy_id(cm_id
, 0);
803 EXPORT_SYMBOL(ib_destroy_cm_id
);
805 int ib_cm_listen(struct ib_cm_id
*cm_id
, __be64 service_id
, __be64 service_mask
,
806 struct ib_cm_compare_data
*compare_data
)
808 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
812 service_mask
= service_mask
? service_mask
:
813 __constant_cpu_to_be64(~0ULL);
814 service_id
&= service_mask
;
815 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
816 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
819 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
820 if (cm_id
->state
!= IB_CM_IDLE
)
824 cm_id_priv
->compare_data
= kzalloc(sizeof *compare_data
,
826 if (!cm_id_priv
->compare_data
)
828 cm_mask_copy(cm_id_priv
->compare_data
->data
,
829 compare_data
->data
, compare_data
->mask
);
830 memcpy(cm_id_priv
->compare_data
->mask
, compare_data
->mask
,
834 cm_id
->state
= IB_CM_LISTEN
;
836 spin_lock_irqsave(&cm
.lock
, flags
);
837 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
838 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
839 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
841 cm_id
->service_id
= service_id
;
842 cm_id
->service_mask
= service_mask
;
844 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
845 spin_unlock_irqrestore(&cm
.lock
, flags
);
847 if (cur_cm_id_priv
) {
848 cm_id
->state
= IB_CM_IDLE
;
849 kfree(cm_id_priv
->compare_data
);
850 cm_id_priv
->compare_data
= NULL
;
855 EXPORT_SYMBOL(ib_cm_listen
);
857 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
858 enum cm_msg_sequence msg_seq
)
862 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
863 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
865 return cpu_to_be64(hi_tid
| low_tid
);
868 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
869 __be16 attr_id
, __be64 tid
)
871 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
872 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
873 hdr
->class_version
= IB_CM_CLASS_VERSION
;
874 hdr
->method
= IB_MGMT_METHOD_SEND
;
875 hdr
->attr_id
= attr_id
;
879 static void cm_format_req(struct cm_req_msg
*req_msg
,
880 struct cm_id_private
*cm_id_priv
,
881 struct ib_cm_req_param
*param
)
883 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
884 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
886 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
887 req_msg
->service_id
= param
->service_id
;
888 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
889 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
890 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
891 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
892 cm_req_set_remote_resp_timeout(req_msg
,
893 param
->remote_cm_response_timeout
);
894 cm_req_set_qp_type(req_msg
, param
->qp_type
);
895 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
896 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
897 cm_req_set_local_resp_timeout(req_msg
,
898 param
->local_cm_response_timeout
);
899 cm_req_set_retry_count(req_msg
, param
->retry_count
);
900 req_msg
->pkey
= param
->primary_path
->pkey
;
901 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
902 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
903 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
904 cm_req_set_srq(req_msg
, param
->srq
);
906 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
907 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
908 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
909 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
910 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
911 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
912 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
913 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
914 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
915 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
916 cm_req_set_primary_local_ack_timeout(req_msg
,
917 min(31, param
->primary_path
->packet_life_time
+ 1));
919 if (param
->alternate_path
) {
920 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
921 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
922 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
923 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
924 cm_req_set_alt_flow_label(req_msg
,
925 param
->alternate_path
->flow_label
);
926 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
927 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
928 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
929 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
930 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
931 cm_req_set_alt_local_ack_timeout(req_msg
,
932 min(31, param
->alternate_path
->packet_life_time
+ 1));
935 if (param
->private_data
&& param
->private_data_len
)
936 memcpy(req_msg
->private_data
, param
->private_data
,
937 param
->private_data_len
);
940 static int cm_validate_req_param(struct ib_cm_req_param
*param
)
942 /* peer-to-peer not supported */
943 if (param
->peer_to_peer
)
946 if (!param
->primary_path
)
949 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
952 if (param
->private_data
&&
953 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
956 if (param
->alternate_path
&&
957 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
958 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
964 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
965 struct ib_cm_req_param
*param
)
967 struct cm_id_private
*cm_id_priv
;
968 struct cm_req_msg
*req_msg
;
972 ret
= cm_validate_req_param(param
);
976 /* Verify that we're not in timewait. */
977 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
978 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
979 if (cm_id
->state
!= IB_CM_IDLE
) {
980 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
984 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
986 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
988 if (IS_ERR(cm_id_priv
->timewait_info
)) {
989 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
993 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
996 if (param
->alternate_path
) {
997 ret
= cm_init_av_by_path(param
->alternate_path
,
998 &cm_id_priv
->alt_av
);
1002 cm_id
->service_id
= param
->service_id
;
1003 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
1004 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1005 param
->primary_path
->packet_life_time
) * 2 +
1007 param
->remote_cm_response_timeout
);
1008 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
1009 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1010 cm_id_priv
->responder_resources
= param
->responder_resources
;
1011 cm_id_priv
->retry_count
= param
->retry_count
;
1012 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
1013 cm_id_priv
->qp_type
= param
->qp_type
;
1015 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
1019 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
1020 cm_format_req(req_msg
, cm_id_priv
, param
);
1021 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1022 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1023 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
1025 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
1026 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
1027 cm_id_priv
->local_ack_timeout
=
1028 cm_req_get_primary_local_ack_timeout(req_msg
);
1030 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1031 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
1033 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1036 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
1037 cm_id
->state
= IB_CM_REQ_SENT
;
1038 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1041 error2
: cm_free_msg(cm_id_priv
->msg
);
1042 error1
: kfree(cm_id_priv
->timewait_info
);
1045 EXPORT_SYMBOL(ib_send_cm_req
);
1047 static int cm_issue_rej(struct cm_port
*port
,
1048 struct ib_mad_recv_wc
*mad_recv_wc
,
1049 enum ib_cm_rej_reason reason
,
1050 enum cm_msg_response msg_rejected
,
1051 void *ari
, u8 ari_length
)
1053 struct ib_mad_send_buf
*msg
= NULL
;
1054 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
1057 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1061 /* We just need common CM header information. Cast to any message. */
1062 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
1063 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
1065 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
1066 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
1067 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
1068 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
1069 rej_msg
->reason
= cpu_to_be16(reason
);
1071 if (ari
&& ari_length
) {
1072 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1073 memcpy(rej_msg
->ari
, ari
, ari_length
);
1076 ret
= ib_post_send_mad(msg
, NULL
);
1083 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1084 __be32 local_qpn
, __be32 remote_qpn
)
1086 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1087 ((local_ca_guid
== remote_ca_guid
) &&
1088 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1091 static void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1092 struct ib_sa_path_rec
*primary_path
,
1093 struct ib_sa_path_rec
*alt_path
)
1095 memset(primary_path
, 0, sizeof *primary_path
);
1096 primary_path
->dgid
= req_msg
->primary_local_gid
;
1097 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1098 primary_path
->dlid
= req_msg
->primary_local_lid
;
1099 primary_path
->slid
= req_msg
->primary_remote_lid
;
1100 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1101 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1102 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1103 primary_path
->reversible
= 1;
1104 primary_path
->pkey
= req_msg
->pkey
;
1105 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1106 primary_path
->mtu_selector
= IB_SA_EQ
;
1107 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1108 primary_path
->rate_selector
= IB_SA_EQ
;
1109 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1110 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1111 primary_path
->packet_life_time
=
1112 cm_req_get_primary_local_ack_timeout(req_msg
);
1113 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1115 if (req_msg
->alt_local_lid
) {
1116 memset(alt_path
, 0, sizeof *alt_path
);
1117 alt_path
->dgid
= req_msg
->alt_local_gid
;
1118 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1119 alt_path
->dlid
= req_msg
->alt_local_lid
;
1120 alt_path
->slid
= req_msg
->alt_remote_lid
;
1121 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1122 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1123 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1124 alt_path
->reversible
= 1;
1125 alt_path
->pkey
= req_msg
->pkey
;
1126 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1127 alt_path
->mtu_selector
= IB_SA_EQ
;
1128 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1129 alt_path
->rate_selector
= IB_SA_EQ
;
1130 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1131 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1132 alt_path
->packet_life_time
=
1133 cm_req_get_alt_local_ack_timeout(req_msg
);
1134 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1138 static void cm_format_req_event(struct cm_work
*work
,
1139 struct cm_id_private
*cm_id_priv
,
1140 struct ib_cm_id
*listen_id
)
1142 struct cm_req_msg
*req_msg
;
1143 struct ib_cm_req_event_param
*param
;
1145 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1146 param
= &work
->cm_event
.param
.req_rcvd
;
1147 param
->listen_id
= listen_id
;
1148 param
->port
= cm_id_priv
->av
.port
->port_num
;
1149 param
->primary_path
= &work
->path
[0];
1150 if (req_msg
->alt_local_lid
)
1151 param
->alternate_path
= &work
->path
[1];
1153 param
->alternate_path
= NULL
;
1154 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1155 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1156 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1157 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1158 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1159 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1160 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1161 param
->local_cm_response_timeout
=
1162 cm_req_get_remote_resp_timeout(req_msg
);
1163 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1164 param
->remote_cm_response_timeout
=
1165 cm_req_get_local_resp_timeout(req_msg
);
1166 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1167 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1168 param
->srq
= cm_req_get_srq(req_msg
);
1169 work
->cm_event
.private_data
= &req_msg
->private_data
;
1172 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1173 struct cm_work
*work
)
1175 unsigned long flags
;
1178 /* We will typically only have the current event to report. */
1179 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1182 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1183 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1184 work
= cm_dequeue_work(cm_id_priv
);
1185 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1187 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1191 cm_deref_id(cm_id_priv
);
1193 cm_destroy_id(&cm_id_priv
->id
, ret
);
1196 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1197 struct cm_id_private
*cm_id_priv
,
1198 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1199 const void *private_data
, u8 private_data_len
)
1201 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1202 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1203 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1204 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1205 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1207 if (private_data
&& private_data_len
)
1208 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1211 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1212 struct cm_id_private
*cm_id_priv
,
1213 enum ib_cm_rej_reason reason
,
1216 const void *private_data
,
1217 u8 private_data_len
)
1219 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1220 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1222 switch(cm_id_priv
->id
.state
) {
1223 case IB_CM_REQ_RCVD
:
1224 rej_msg
->local_comm_id
= 0;
1225 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1227 case IB_CM_MRA_REQ_SENT
:
1228 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1229 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1231 case IB_CM_REP_RCVD
:
1232 case IB_CM_MRA_REP_SENT
:
1233 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1234 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1237 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1238 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1242 rej_msg
->reason
= cpu_to_be16(reason
);
1243 if (ari
&& ari_length
) {
1244 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1245 memcpy(rej_msg
->ari
, ari
, ari_length
);
1248 if (private_data
&& private_data_len
)
1249 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1252 static void cm_dup_req_handler(struct cm_work
*work
,
1253 struct cm_id_private
*cm_id_priv
)
1255 struct ib_mad_send_buf
*msg
= NULL
;
1256 unsigned long flags
;
1259 /* Quick state check to discard duplicate REQs. */
1260 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1263 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1267 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1268 switch (cm_id_priv
->id
.state
) {
1269 case IB_CM_MRA_REQ_SENT
:
1270 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1271 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1272 cm_id_priv
->private_data
,
1273 cm_id_priv
->private_data_len
);
1275 case IB_CM_TIMEWAIT
:
1276 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1277 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1282 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1284 ret
= ib_post_send_mad(msg
, NULL
);
1289 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1290 free
: cm_free_msg(msg
);
1293 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1294 struct cm_id_private
*cm_id_priv
)
1296 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1297 struct cm_timewait_info
*timewait_info
;
1298 struct cm_req_msg
*req_msg
;
1299 unsigned long flags
;
1301 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1303 /* Check for duplicate REQ and stale connections. */
1304 spin_lock_irqsave(&cm
.lock
, flags
);
1305 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1307 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1309 if (timewait_info
) {
1310 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1311 timewait_info
->work
.remote_id
);
1312 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1313 spin_unlock_irqrestore(&cm
.lock
, flags
);
1314 if (cur_cm_id_priv
) {
1315 cm_dup_req_handler(work
, cur_cm_id_priv
);
1316 cm_deref_id(cur_cm_id_priv
);
1318 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1319 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1321 listen_cm_id_priv
= NULL
;
1325 /* Find matching listen request. */
1326 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1327 req_msg
->service_id
,
1328 req_msg
->private_data
);
1329 if (!listen_cm_id_priv
) {
1330 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1331 spin_unlock_irqrestore(&cm
.lock
, flags
);
1332 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1333 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1337 atomic_inc(&listen_cm_id_priv
->refcount
);
1338 atomic_inc(&cm_id_priv
->refcount
);
1339 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1340 atomic_inc(&cm_id_priv
->work_count
);
1341 spin_unlock_irqrestore(&cm
.lock
, flags
);
1343 return listen_cm_id_priv
;
1346 static int cm_req_handler(struct cm_work
*work
)
1348 struct ib_cm_id
*cm_id
;
1349 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1350 struct cm_req_msg
*req_msg
;
1353 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1355 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1357 return PTR_ERR(cm_id
);
1359 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1360 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1361 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1362 work
->mad_recv_wc
->recv_buf
.grh
,
1364 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1366 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1367 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1370 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1371 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1372 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1374 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1375 if (!listen_cm_id_priv
) {
1377 kfree(cm_id_priv
->timewait_info
);
1381 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1382 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1383 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1384 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1386 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1387 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1389 ib_get_cached_gid(work
->port
->cm_dev
->device
,
1390 work
->port
->port_num
, 0, &work
->path
[0].sgid
);
1391 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_GID
,
1392 &work
->path
[0].sgid
, sizeof work
->path
[0].sgid
,
1396 if (req_msg
->alt_local_lid
) {
1397 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1399 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_ALT_GID
,
1400 &work
->path
[0].sgid
,
1401 sizeof work
->path
[0].sgid
, NULL
, 0);
1405 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1406 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1407 cm_req_get_local_resp_timeout(req_msg
));
1408 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1409 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1410 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1411 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1412 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1413 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1414 cm_id_priv
->local_ack_timeout
=
1415 cm_req_get_primary_local_ack_timeout(req_msg
);
1416 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1417 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1418 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1420 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1421 cm_process_work(cm_id_priv
, work
);
1422 cm_deref_id(listen_cm_id_priv
);
1426 atomic_dec(&cm_id_priv
->refcount
);
1427 cm_deref_id(listen_cm_id_priv
);
1429 ib_destroy_cm_id(cm_id
);
1433 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1434 struct cm_id_private
*cm_id_priv
,
1435 struct ib_cm_rep_param
*param
)
1437 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1438 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1439 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1440 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1441 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1442 rep_msg
->resp_resources
= param
->responder_resources
;
1443 rep_msg
->initiator_depth
= param
->initiator_depth
;
1444 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1445 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1446 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1447 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1448 cm_rep_set_srq(rep_msg
, param
->srq
);
1449 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1451 if (param
->private_data
&& param
->private_data_len
)
1452 memcpy(rep_msg
->private_data
, param
->private_data
,
1453 param
->private_data_len
);
1456 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1457 struct ib_cm_rep_param
*param
)
1459 struct cm_id_private
*cm_id_priv
;
1460 struct ib_mad_send_buf
*msg
;
1461 struct cm_rep_msg
*rep_msg
;
1462 unsigned long flags
;
1465 if (param
->private_data
&&
1466 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1469 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1470 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1471 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1472 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1477 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1481 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1482 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1483 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1484 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1486 ret
= ib_post_send_mad(msg
, NULL
);
1488 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1493 cm_id
->state
= IB_CM_REP_SENT
;
1494 cm_id_priv
->msg
= msg
;
1495 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1496 cm_id_priv
->responder_resources
= param
->responder_resources
;
1497 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1498 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1500 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1503 EXPORT_SYMBOL(ib_send_cm_rep
);
1505 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1506 struct cm_id_private
*cm_id_priv
,
1507 const void *private_data
,
1508 u8 private_data_len
)
1510 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1511 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1512 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1514 if (private_data
&& private_data_len
)
1515 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1518 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1519 const void *private_data
,
1520 u8 private_data_len
)
1522 struct cm_id_private
*cm_id_priv
;
1523 struct ib_mad_send_buf
*msg
;
1524 unsigned long flags
;
1528 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1531 data
= cm_copy_private_data(private_data
, private_data_len
);
1533 return PTR_ERR(data
);
1535 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1536 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1537 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1538 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1543 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1547 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1548 private_data
, private_data_len
);
1550 ret
= ib_post_send_mad(msg
, NULL
);
1552 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1558 cm_id
->state
= IB_CM_ESTABLISHED
;
1559 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1560 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1563 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1567 EXPORT_SYMBOL(ib_send_cm_rtu
);
1569 static void cm_format_rep_event(struct cm_work
*work
)
1571 struct cm_rep_msg
*rep_msg
;
1572 struct ib_cm_rep_event_param
*param
;
1574 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1575 param
= &work
->cm_event
.param
.rep_rcvd
;
1576 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1577 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1578 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1579 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1580 param
->responder_resources
= rep_msg
->initiator_depth
;
1581 param
->initiator_depth
= rep_msg
->resp_resources
;
1582 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1583 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1584 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1585 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1586 param
->srq
= cm_rep_get_srq(rep_msg
);
1587 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1590 static void cm_dup_rep_handler(struct cm_work
*work
)
1592 struct cm_id_private
*cm_id_priv
;
1593 struct cm_rep_msg
*rep_msg
;
1594 struct ib_mad_send_buf
*msg
= NULL
;
1595 unsigned long flags
;
1598 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1599 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1600 rep_msg
->local_comm_id
);
1604 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1608 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1609 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1610 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1611 cm_id_priv
->private_data
,
1612 cm_id_priv
->private_data_len
);
1613 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1614 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1615 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1616 cm_id_priv
->private_data
,
1617 cm_id_priv
->private_data_len
);
1620 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1622 ret
= ib_post_send_mad(msg
, NULL
);
1627 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1628 free
: cm_free_msg(msg
);
1629 deref
: cm_deref_id(cm_id_priv
);
1632 static int cm_rep_handler(struct cm_work
*work
)
1634 struct cm_id_private
*cm_id_priv
;
1635 struct cm_rep_msg
*rep_msg
;
1636 unsigned long flags
;
1639 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1640 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1642 cm_dup_rep_handler(work
);
1646 cm_format_rep_event(work
);
1648 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1649 switch (cm_id_priv
->id
.state
) {
1650 case IB_CM_REQ_SENT
:
1651 case IB_CM_MRA_REQ_RCVD
:
1654 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1659 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1660 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1661 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1663 spin_lock(&cm
.lock
);
1664 /* Check for duplicate REP. */
1665 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1666 spin_unlock(&cm
.lock
);
1667 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1671 /* Check for a stale connection. */
1672 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1673 rb_erase(&cm_id_priv
->timewait_info
->remote_id_node
,
1674 &cm
.remote_id_table
);
1675 cm_id_priv
->timewait_info
->inserted_remote_id
= 0;
1676 spin_unlock(&cm
.lock
);
1677 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1678 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1679 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1684 spin_unlock(&cm
.lock
);
1686 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1687 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1688 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1689 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1690 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1691 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1692 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1694 /* todo: handle peer_to_peer */
1696 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1697 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1699 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1700 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1703 cm_process_work(cm_id_priv
, work
);
1705 cm_deref_id(cm_id_priv
);
1709 cm_deref_id(cm_id_priv
);
1713 static int cm_establish_handler(struct cm_work
*work
)
1715 struct cm_id_private
*cm_id_priv
;
1716 unsigned long flags
;
1719 /* See comment in ib_cm_establish about lookup. */
1720 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1724 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1725 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1726 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1730 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1731 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1733 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1734 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1737 cm_process_work(cm_id_priv
, work
);
1739 cm_deref_id(cm_id_priv
);
1742 cm_deref_id(cm_id_priv
);
1746 static int cm_rtu_handler(struct cm_work
*work
)
1748 struct cm_id_private
*cm_id_priv
;
1749 struct cm_rtu_msg
*rtu_msg
;
1750 unsigned long flags
;
1753 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1754 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1755 rtu_msg
->local_comm_id
);
1759 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1761 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1762 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1763 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1764 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1767 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1769 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1770 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1772 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1773 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1776 cm_process_work(cm_id_priv
, work
);
1778 cm_deref_id(cm_id_priv
);
1781 cm_deref_id(cm_id_priv
);
1785 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1786 struct cm_id_private
*cm_id_priv
,
1787 const void *private_data
,
1788 u8 private_data_len
)
1790 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1791 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1792 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1793 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1794 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1796 if (private_data
&& private_data_len
)
1797 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1800 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1801 const void *private_data
,
1802 u8 private_data_len
)
1804 struct cm_id_private
*cm_id_priv
;
1805 struct ib_mad_send_buf
*msg
;
1806 unsigned long flags
;
1809 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1812 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1813 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1814 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1819 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1821 cm_enter_timewait(cm_id_priv
);
1825 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1826 private_data
, private_data_len
);
1827 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1828 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1830 ret
= ib_post_send_mad(msg
, NULL
);
1832 cm_enter_timewait(cm_id_priv
);
1833 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1838 cm_id
->state
= IB_CM_DREQ_SENT
;
1839 cm_id_priv
->msg
= msg
;
1840 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1843 EXPORT_SYMBOL(ib_send_cm_dreq
);
1845 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1846 struct cm_id_private
*cm_id_priv
,
1847 const void *private_data
,
1848 u8 private_data_len
)
1850 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1851 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1852 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1854 if (private_data
&& private_data_len
)
1855 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1858 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1859 const void *private_data
,
1860 u8 private_data_len
)
1862 struct cm_id_private
*cm_id_priv
;
1863 struct ib_mad_send_buf
*msg
;
1864 unsigned long flags
;
1868 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1871 data
= cm_copy_private_data(private_data
, private_data_len
);
1873 return PTR_ERR(data
);
1875 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1876 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1877 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1878 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1883 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1884 cm_enter_timewait(cm_id_priv
);
1886 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1890 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1891 private_data
, private_data_len
);
1893 ret
= ib_post_send_mad(msg
, NULL
);
1895 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1900 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1903 EXPORT_SYMBOL(ib_send_cm_drep
);
1905 static int cm_issue_drep(struct cm_port
*port
,
1906 struct ib_mad_recv_wc
*mad_recv_wc
)
1908 struct ib_mad_send_buf
*msg
= NULL
;
1909 struct cm_dreq_msg
*dreq_msg
;
1910 struct cm_drep_msg
*drep_msg
;
1913 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1917 dreq_msg
= (struct cm_dreq_msg
*) mad_recv_wc
->recv_buf
.mad
;
1918 drep_msg
= (struct cm_drep_msg
*) msg
->mad
;
1920 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, dreq_msg
->hdr
.tid
);
1921 drep_msg
->remote_comm_id
= dreq_msg
->local_comm_id
;
1922 drep_msg
->local_comm_id
= dreq_msg
->remote_comm_id
;
1924 ret
= ib_post_send_mad(msg
, NULL
);
1931 static int cm_dreq_handler(struct cm_work
*work
)
1933 struct cm_id_private
*cm_id_priv
;
1934 struct cm_dreq_msg
*dreq_msg
;
1935 struct ib_mad_send_buf
*msg
= NULL
;
1936 unsigned long flags
;
1939 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1940 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1941 dreq_msg
->local_comm_id
);
1943 cm_issue_drep(work
->port
, work
->mad_recv_wc
);
1947 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1949 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1950 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1953 switch (cm_id_priv
->id
.state
) {
1954 case IB_CM_REP_SENT
:
1955 case IB_CM_DREQ_SENT
:
1956 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1958 case IB_CM_ESTABLISHED
:
1959 case IB_CM_MRA_REP_RCVD
:
1961 case IB_CM_TIMEWAIT
:
1962 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1965 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1966 cm_id_priv
->private_data
,
1967 cm_id_priv
->private_data_len
);
1968 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1970 if (ib_post_send_mad(msg
, NULL
))
1976 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1977 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1978 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1980 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1981 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1984 cm_process_work(cm_id_priv
, work
);
1986 cm_deref_id(cm_id_priv
);
1989 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1990 deref
: cm_deref_id(cm_id_priv
);
1994 static int cm_drep_handler(struct cm_work
*work
)
1996 struct cm_id_private
*cm_id_priv
;
1997 struct cm_drep_msg
*drep_msg
;
1998 unsigned long flags
;
2001 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2002 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
2003 drep_msg
->local_comm_id
);
2007 work
->cm_event
.private_data
= &drep_msg
->private_data
;
2009 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2010 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
2011 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
2012 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2015 cm_enter_timewait(cm_id_priv
);
2017 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2018 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2020 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2021 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2024 cm_process_work(cm_id_priv
, work
);
2026 cm_deref_id(cm_id_priv
);
2029 cm_deref_id(cm_id_priv
);
2033 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
2034 enum ib_cm_rej_reason reason
,
2037 const void *private_data
,
2038 u8 private_data_len
)
2040 struct cm_id_private
*cm_id_priv
;
2041 struct ib_mad_send_buf
*msg
;
2042 unsigned long flags
;
2045 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
2046 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
2049 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2051 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2052 switch (cm_id
->state
) {
2053 case IB_CM_REQ_SENT
:
2054 case IB_CM_MRA_REQ_RCVD
:
2055 case IB_CM_REQ_RCVD
:
2056 case IB_CM_MRA_REQ_SENT
:
2057 case IB_CM_REP_RCVD
:
2058 case IB_CM_MRA_REP_SENT
:
2059 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2061 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2062 cm_id_priv
, reason
, ari
, ari_length
,
2063 private_data
, private_data_len
);
2065 cm_reset_to_idle(cm_id_priv
);
2067 case IB_CM_REP_SENT
:
2068 case IB_CM_MRA_REP_RCVD
:
2069 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2071 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2072 cm_id_priv
, reason
, ari
, ari_length
,
2073 private_data
, private_data_len
);
2075 cm_enter_timewait(cm_id_priv
);
2085 ret
= ib_post_send_mad(msg
, NULL
);
2089 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2092 EXPORT_SYMBOL(ib_send_cm_rej
);
2094 static void cm_format_rej_event(struct cm_work
*work
)
2096 struct cm_rej_msg
*rej_msg
;
2097 struct ib_cm_rej_event_param
*param
;
2099 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2100 param
= &work
->cm_event
.param
.rej_rcvd
;
2101 param
->ari
= rej_msg
->ari
;
2102 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
2103 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
2104 work
->cm_event
.private_data
= &rej_msg
->private_data
;
2107 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
2109 struct cm_timewait_info
*timewait_info
;
2110 struct cm_id_private
*cm_id_priv
;
2111 unsigned long flags
;
2114 remote_id
= rej_msg
->local_comm_id
;
2116 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
2117 spin_lock_irqsave(&cm
.lock
, flags
);
2118 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
2120 if (!timewait_info
) {
2121 spin_unlock_irqrestore(&cm
.lock
, flags
);
2124 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int)
2125 (timewait_info
->work
.local_id
^
2126 cm
.random_id_operand
));
2128 if (cm_id_priv
->id
.remote_id
== remote_id
)
2129 atomic_inc(&cm_id_priv
->refcount
);
2133 spin_unlock_irqrestore(&cm
.lock
, flags
);
2134 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2135 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2137 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2142 static int cm_rej_handler(struct cm_work
*work
)
2144 struct cm_id_private
*cm_id_priv
;
2145 struct cm_rej_msg
*rej_msg
;
2146 unsigned long flags
;
2149 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2150 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2154 cm_format_rej_event(work
);
2156 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2157 switch (cm_id_priv
->id
.state
) {
2158 case IB_CM_REQ_SENT
:
2159 case IB_CM_MRA_REQ_RCVD
:
2160 case IB_CM_REP_SENT
:
2161 case IB_CM_MRA_REP_RCVD
:
2162 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2164 case IB_CM_REQ_RCVD
:
2165 case IB_CM_MRA_REQ_SENT
:
2166 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2167 cm_enter_timewait(cm_id_priv
);
2169 cm_reset_to_idle(cm_id_priv
);
2171 case IB_CM_DREQ_SENT
:
2172 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2174 case IB_CM_REP_RCVD
:
2175 case IB_CM_MRA_REP_SENT
:
2176 case IB_CM_ESTABLISHED
:
2177 cm_enter_timewait(cm_id_priv
);
2180 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2185 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2187 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2188 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2191 cm_process_work(cm_id_priv
, work
);
2193 cm_deref_id(cm_id_priv
);
2196 cm_deref_id(cm_id_priv
);
2200 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2202 const void *private_data
,
2203 u8 private_data_len
)
2205 struct cm_id_private
*cm_id_priv
;
2206 struct ib_mad_send_buf
*msg
;
2208 unsigned long flags
;
2211 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2214 data
= cm_copy_private_data(private_data
, private_data_len
);
2216 return PTR_ERR(data
);
2218 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2220 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2221 switch(cm_id_priv
->id
.state
) {
2222 case IB_CM_REQ_RCVD
:
2223 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2227 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2228 CM_MSG_RESPONSE_REQ
, service_timeout
,
2229 private_data
, private_data_len
);
2230 ret
= ib_post_send_mad(msg
, NULL
);
2233 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2235 case IB_CM_REP_RCVD
:
2236 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2240 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2241 CM_MSG_RESPONSE_REP
, service_timeout
,
2242 private_data
, private_data_len
);
2243 ret
= ib_post_send_mad(msg
, NULL
);
2246 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2248 case IB_CM_ESTABLISHED
:
2249 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2253 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2254 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2255 private_data
, private_data_len
);
2256 ret
= ib_post_send_mad(msg
, NULL
);
2259 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2265 cm_id_priv
->service_timeout
= service_timeout
;
2266 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2267 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2270 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2274 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2279 EXPORT_SYMBOL(ib_send_cm_mra
);
2281 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2283 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2284 case CM_MSG_RESPONSE_REQ
:
2285 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2286 case CM_MSG_RESPONSE_REP
:
2287 case CM_MSG_RESPONSE_OTHER
:
2288 return cm_acquire_id(mra_msg
->remote_comm_id
,
2289 mra_msg
->local_comm_id
);
2295 static int cm_mra_handler(struct cm_work
*work
)
2297 struct cm_id_private
*cm_id_priv
;
2298 struct cm_mra_msg
*mra_msg
;
2299 unsigned long flags
;
2302 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2303 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2307 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2308 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2309 cm_mra_get_service_timeout(mra_msg
);
2310 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2311 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2313 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2314 switch (cm_id_priv
->id
.state
) {
2315 case IB_CM_REQ_SENT
:
2316 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2317 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2318 cm_id_priv
->msg
, timeout
))
2320 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2322 case IB_CM_REP_SENT
:
2323 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2324 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2325 cm_id_priv
->msg
, timeout
))
2327 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2329 case IB_CM_ESTABLISHED
:
2330 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2331 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2332 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2333 cm_id_priv
->msg
, timeout
))
2335 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2341 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2342 cm_id_priv
->id
.state
;
2343 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2345 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2346 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2349 cm_process_work(cm_id_priv
, work
);
2351 cm_deref_id(cm_id_priv
);
2354 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2355 cm_deref_id(cm_id_priv
);
2359 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2360 struct cm_id_private
*cm_id_priv
,
2361 struct ib_sa_path_rec
*alternate_path
,
2362 const void *private_data
,
2363 u8 private_data_len
)
2365 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2366 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2367 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2368 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2369 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2370 /* todo: need remote CM response timeout */
2371 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2372 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2373 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2374 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2375 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2376 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2377 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2378 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2379 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2380 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2381 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2382 cm_lap_set_local_ack_timeout(lap_msg
,
2383 min(31, alternate_path
->packet_life_time
+ 1));
2385 if (private_data
&& private_data_len
)
2386 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2389 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2390 struct ib_sa_path_rec
*alternate_path
,
2391 const void *private_data
,
2392 u8 private_data_len
)
2394 struct cm_id_private
*cm_id_priv
;
2395 struct ib_mad_send_buf
*msg
;
2396 unsigned long flags
;
2399 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2402 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2403 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2404 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2405 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2410 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2414 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2415 alternate_path
, private_data
, private_data_len
);
2416 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2417 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2419 ret
= ib_post_send_mad(msg
, NULL
);
2421 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2426 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2427 cm_id_priv
->msg
= msg
;
2429 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2432 EXPORT_SYMBOL(ib_send_cm_lap
);
2434 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2435 struct cm_lap_msg
*lap_msg
)
2437 memset(path
, 0, sizeof *path
);
2438 path
->dgid
= lap_msg
->alt_local_gid
;
2439 path
->sgid
= lap_msg
->alt_remote_gid
;
2440 path
->dlid
= lap_msg
->alt_local_lid
;
2441 path
->slid
= lap_msg
->alt_remote_lid
;
2442 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2443 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2444 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2445 path
->reversible
= 1;
2446 /* pkey is same as in REQ */
2447 path
->sl
= cm_lap_get_sl(lap_msg
);
2448 path
->mtu_selector
= IB_SA_EQ
;
2449 /* mtu is same as in REQ */
2450 path
->rate_selector
= IB_SA_EQ
;
2451 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2452 path
->packet_life_time_selector
= IB_SA_EQ
;
2453 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2454 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2457 static int cm_lap_handler(struct cm_work
*work
)
2459 struct cm_id_private
*cm_id_priv
;
2460 struct cm_lap_msg
*lap_msg
;
2461 struct ib_cm_lap_event_param
*param
;
2462 struct ib_mad_send_buf
*msg
= NULL
;
2463 unsigned long flags
;
2466 /* todo: verify LAP request and send reject APR if invalid. */
2467 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2468 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2469 lap_msg
->local_comm_id
);
2473 param
= &work
->cm_event
.param
.lap_rcvd
;
2474 param
->alternate_path
= &work
->path
[0];
2475 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2476 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2478 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2479 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2482 switch (cm_id_priv
->id
.lap_state
) {
2483 case IB_CM_LAP_IDLE
:
2485 case IB_CM_MRA_LAP_SENT
:
2486 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2489 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2490 CM_MSG_RESPONSE_OTHER
,
2491 cm_id_priv
->service_timeout
,
2492 cm_id_priv
->private_data
,
2493 cm_id_priv
->private_data_len
);
2494 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2496 if (ib_post_send_mad(msg
, NULL
))
2503 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2504 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2505 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2507 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2508 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2511 cm_process_work(cm_id_priv
, work
);
2513 cm_deref_id(cm_id_priv
);
2516 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2517 deref
: cm_deref_id(cm_id_priv
);
2521 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2522 struct cm_id_private
*cm_id_priv
,
2523 enum ib_cm_apr_status status
,
2526 const void *private_data
,
2527 u8 private_data_len
)
2529 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2530 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2531 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2532 apr_msg
->ap_status
= (u8
) status
;
2534 if (info
&& info_length
) {
2535 apr_msg
->info_length
= info_length
;
2536 memcpy(apr_msg
->info
, info
, info_length
);
2539 if (private_data
&& private_data_len
)
2540 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2543 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2544 enum ib_cm_apr_status status
,
2547 const void *private_data
,
2548 u8 private_data_len
)
2550 struct cm_id_private
*cm_id_priv
;
2551 struct ib_mad_send_buf
*msg
;
2552 unsigned long flags
;
2555 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2556 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2559 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2560 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2561 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2562 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2563 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2568 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2572 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2573 info
, info_length
, private_data
, private_data_len
);
2574 ret
= ib_post_send_mad(msg
, NULL
);
2576 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2581 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2582 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2585 EXPORT_SYMBOL(ib_send_cm_apr
);
2587 static int cm_apr_handler(struct cm_work
*work
)
2589 struct cm_id_private
*cm_id_priv
;
2590 struct cm_apr_msg
*apr_msg
;
2591 unsigned long flags
;
2594 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2595 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2596 apr_msg
->local_comm_id
);
2598 return -EINVAL
; /* Unmatched reply. */
2600 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2601 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2602 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2603 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2605 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2606 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2607 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2608 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2609 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2612 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2613 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2614 cm_id_priv
->msg
= NULL
;
2616 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2618 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2619 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2622 cm_process_work(cm_id_priv
, work
);
2624 cm_deref_id(cm_id_priv
);
2627 cm_deref_id(cm_id_priv
);
2631 static int cm_timewait_handler(struct cm_work
*work
)
2633 struct cm_timewait_info
*timewait_info
;
2634 struct cm_id_private
*cm_id_priv
;
2637 timewait_info
= (struct cm_timewait_info
*)work
;
2638 spin_lock_irq(&cm
.lock
);
2639 list_del(&timewait_info
->list
);
2640 spin_unlock_irq(&cm
.lock
);
2642 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2643 timewait_info
->work
.remote_id
);
2647 spin_lock_irq(&cm_id_priv
->lock
);
2648 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2649 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2650 spin_unlock_irq(&cm_id_priv
->lock
);
2653 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2654 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2656 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2657 spin_unlock_irq(&cm_id_priv
->lock
);
2660 cm_process_work(cm_id_priv
, work
);
2662 cm_deref_id(cm_id_priv
);
2665 cm_deref_id(cm_id_priv
);
2669 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2670 struct cm_id_private
*cm_id_priv
,
2671 struct ib_cm_sidr_req_param
*param
)
2673 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2674 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2675 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2676 sidr_req_msg
->pkey
= cpu_to_be16(param
->path
->pkey
);
2677 sidr_req_msg
->service_id
= param
->service_id
;
2679 if (param
->private_data
&& param
->private_data_len
)
2680 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2681 param
->private_data_len
);
2684 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2685 struct ib_cm_sidr_req_param
*param
)
2687 struct cm_id_private
*cm_id_priv
;
2688 struct ib_mad_send_buf
*msg
;
2689 unsigned long flags
;
2692 if (!param
->path
|| (param
->private_data
&&
2693 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2696 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2697 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2701 cm_id
->service_id
= param
->service_id
;
2702 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2703 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2704 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2705 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2709 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2711 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2712 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2714 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2715 if (cm_id
->state
== IB_CM_IDLE
)
2716 ret
= ib_post_send_mad(msg
, NULL
);
2721 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2725 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2726 cm_id_priv
->msg
= msg
;
2727 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2731 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2733 static void cm_format_sidr_req_event(struct cm_work
*work
,
2734 struct ib_cm_id
*listen_id
)
2736 struct cm_sidr_req_msg
*sidr_req_msg
;
2737 struct ib_cm_sidr_req_event_param
*param
;
2739 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2740 work
->mad_recv_wc
->recv_buf
.mad
;
2741 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2742 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2743 param
->listen_id
= listen_id
;
2744 param
->port
= work
->port
->port_num
;
2745 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2748 static int cm_sidr_req_handler(struct cm_work
*work
)
2750 struct ib_cm_id
*cm_id
;
2751 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2752 struct cm_sidr_req_msg
*sidr_req_msg
;
2754 unsigned long flags
;
2756 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2758 return PTR_ERR(cm_id
);
2759 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2761 /* Record SGID/SLID and request ID for lookup. */
2762 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2763 work
->mad_recv_wc
->recv_buf
.mad
;
2764 wc
= work
->mad_recv_wc
->wc
;
2765 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2766 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2767 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2768 work
->mad_recv_wc
->recv_buf
.grh
,
2770 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2771 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2772 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2773 atomic_inc(&cm_id_priv
->work_count
);
2775 spin_lock_irqsave(&cm
.lock
, flags
);
2776 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2777 if (cur_cm_id_priv
) {
2778 spin_unlock_irqrestore(&cm
.lock
, flags
);
2779 goto out
; /* Duplicate message. */
2781 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2782 sidr_req_msg
->service_id
,
2783 sidr_req_msg
->private_data
);
2784 if (!cur_cm_id_priv
) {
2785 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2786 spin_unlock_irqrestore(&cm
.lock
, flags
);
2787 /* todo: reply with no match */
2788 goto out
; /* No match. */
2790 atomic_inc(&cur_cm_id_priv
->refcount
);
2791 spin_unlock_irqrestore(&cm
.lock
, flags
);
2793 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2794 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2795 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2796 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2798 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2799 cm_process_work(cm_id_priv
, work
);
2800 cm_deref_id(cur_cm_id_priv
);
2803 ib_destroy_cm_id(&cm_id_priv
->id
);
2807 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2808 struct cm_id_private
*cm_id_priv
,
2809 struct ib_cm_sidr_rep_param
*param
)
2811 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2813 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2814 sidr_rep_msg
->status
= param
->status
;
2815 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2816 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2817 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2819 if (param
->info
&& param
->info_length
)
2820 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2822 if (param
->private_data
&& param
->private_data_len
)
2823 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2824 param
->private_data_len
);
2827 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2828 struct ib_cm_sidr_rep_param
*param
)
2830 struct cm_id_private
*cm_id_priv
;
2831 struct ib_mad_send_buf
*msg
;
2832 unsigned long flags
;
2835 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2836 (param
->private_data
&&
2837 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2840 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2841 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2842 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2847 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2851 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2853 ret
= ib_post_send_mad(msg
, NULL
);
2855 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2859 cm_id
->state
= IB_CM_IDLE
;
2860 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2862 spin_lock_irqsave(&cm
.lock
, flags
);
2863 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2864 spin_unlock_irqrestore(&cm
.lock
, flags
);
2867 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2870 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2872 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2874 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2875 struct ib_cm_sidr_rep_event_param
*param
;
2877 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2878 work
->mad_recv_wc
->recv_buf
.mad
;
2879 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2880 param
->status
= sidr_rep_msg
->status
;
2881 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2882 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2883 param
->info
= &sidr_rep_msg
->info
;
2884 param
->info_len
= sidr_rep_msg
->info_length
;
2885 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2888 static int cm_sidr_rep_handler(struct cm_work
*work
)
2890 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2891 struct cm_id_private
*cm_id_priv
;
2892 unsigned long flags
;
2894 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2895 work
->mad_recv_wc
->recv_buf
.mad
;
2896 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2898 return -EINVAL
; /* Unmatched reply. */
2900 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2901 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2902 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2905 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2906 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2907 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2909 cm_format_sidr_rep_event(work
);
2910 cm_process_work(cm_id_priv
, work
);
2913 cm_deref_id(cm_id_priv
);
2917 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2918 enum ib_wc_status wc_status
)
2920 struct cm_id_private
*cm_id_priv
;
2921 struct ib_cm_event cm_event
;
2922 enum ib_cm_state state
;
2923 unsigned long flags
;
2926 memset(&cm_event
, 0, sizeof cm_event
);
2927 cm_id_priv
= msg
->context
[0];
2929 /* Discard old sends or ones without a response. */
2930 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2931 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2932 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2936 case IB_CM_REQ_SENT
:
2937 case IB_CM_MRA_REQ_RCVD
:
2938 cm_reset_to_idle(cm_id_priv
);
2939 cm_event
.event
= IB_CM_REQ_ERROR
;
2941 case IB_CM_REP_SENT
:
2942 case IB_CM_MRA_REP_RCVD
:
2943 cm_reset_to_idle(cm_id_priv
);
2944 cm_event
.event
= IB_CM_REP_ERROR
;
2946 case IB_CM_DREQ_SENT
:
2947 cm_enter_timewait(cm_id_priv
);
2948 cm_event
.event
= IB_CM_DREQ_ERROR
;
2950 case IB_CM_SIDR_REQ_SENT
:
2951 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2952 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2957 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2958 cm_event
.param
.send_status
= wc_status
;
2960 /* No other events can occur on the cm_id at this point. */
2961 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2964 ib_destroy_cm_id(&cm_id_priv
->id
);
2967 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2971 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2972 struct ib_mad_send_wc
*mad_send_wc
)
2974 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2976 switch (mad_send_wc
->status
) {
2978 case IB_WC_WR_FLUSH_ERR
:
2982 if (msg
->context
[0] && msg
->context
[1])
2983 cm_process_send_error(msg
, mad_send_wc
->status
);
2990 static void cm_work_handler(void *data
)
2992 struct cm_work
*work
= data
;
2995 switch (work
->cm_event
.event
) {
2996 case IB_CM_REQ_RECEIVED
:
2997 ret
= cm_req_handler(work
);
2999 case IB_CM_MRA_RECEIVED
:
3000 ret
= cm_mra_handler(work
);
3002 case IB_CM_REJ_RECEIVED
:
3003 ret
= cm_rej_handler(work
);
3005 case IB_CM_REP_RECEIVED
:
3006 ret
= cm_rep_handler(work
);
3008 case IB_CM_RTU_RECEIVED
:
3009 ret
= cm_rtu_handler(work
);
3011 case IB_CM_USER_ESTABLISHED
:
3012 ret
= cm_establish_handler(work
);
3014 case IB_CM_DREQ_RECEIVED
:
3015 ret
= cm_dreq_handler(work
);
3017 case IB_CM_DREP_RECEIVED
:
3018 ret
= cm_drep_handler(work
);
3020 case IB_CM_SIDR_REQ_RECEIVED
:
3021 ret
= cm_sidr_req_handler(work
);
3023 case IB_CM_SIDR_REP_RECEIVED
:
3024 ret
= cm_sidr_rep_handler(work
);
3026 case IB_CM_LAP_RECEIVED
:
3027 ret
= cm_lap_handler(work
);
3029 case IB_CM_APR_RECEIVED
:
3030 ret
= cm_apr_handler(work
);
3032 case IB_CM_TIMEWAIT_EXIT
:
3033 ret
= cm_timewait_handler(work
);
3043 int ib_cm_establish(struct ib_cm_id
*cm_id
)
3045 struct cm_id_private
*cm_id_priv
;
3046 struct cm_work
*work
;
3047 unsigned long flags
;
3050 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
3054 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3055 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3056 switch (cm_id
->state
)
3058 case IB_CM_REP_SENT
:
3059 case IB_CM_MRA_REP_RCVD
:
3060 cm_id
->state
= IB_CM_ESTABLISHED
;
3062 case IB_CM_ESTABLISHED
:
3069 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3077 * The CM worker thread may try to destroy the cm_id before it
3078 * can execute this work item. To prevent potential deadlock,
3079 * we need to find the cm_id once we're in the context of the
3080 * worker thread, rather than holding a reference on it.
3082 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3083 work
->local_id
= cm_id
->local_id
;
3084 work
->remote_id
= cm_id
->remote_id
;
3085 work
->mad_recv_wc
= NULL
;
3086 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
3087 queue_work(cm
.wq
, &work
->work
);
3091 EXPORT_SYMBOL(ib_cm_establish
);
3093 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
3094 struct ib_mad_recv_wc
*mad_recv_wc
)
3096 struct cm_work
*work
;
3097 enum ib_cm_event_type event
;
3100 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
3101 case CM_REQ_ATTR_ID
:
3102 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
3103 alt_local_lid
!= 0);
3104 event
= IB_CM_REQ_RECEIVED
;
3106 case CM_MRA_ATTR_ID
:
3107 event
= IB_CM_MRA_RECEIVED
;
3109 case CM_REJ_ATTR_ID
:
3110 event
= IB_CM_REJ_RECEIVED
;
3112 case CM_REP_ATTR_ID
:
3113 event
= IB_CM_REP_RECEIVED
;
3115 case CM_RTU_ATTR_ID
:
3116 event
= IB_CM_RTU_RECEIVED
;
3118 case CM_DREQ_ATTR_ID
:
3119 event
= IB_CM_DREQ_RECEIVED
;
3121 case CM_DREP_ATTR_ID
:
3122 event
= IB_CM_DREP_RECEIVED
;
3124 case CM_SIDR_REQ_ATTR_ID
:
3125 event
= IB_CM_SIDR_REQ_RECEIVED
;
3127 case CM_SIDR_REP_ATTR_ID
:
3128 event
= IB_CM_SIDR_REP_RECEIVED
;
3130 case CM_LAP_ATTR_ID
:
3132 event
= IB_CM_LAP_RECEIVED
;
3134 case CM_APR_ATTR_ID
:
3135 event
= IB_CM_APR_RECEIVED
;
3138 ib_free_recv_mad(mad_recv_wc
);
3142 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3145 ib_free_recv_mad(mad_recv_wc
);
3149 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3150 work
->cm_event
.event
= event
;
3151 work
->mad_recv_wc
= mad_recv_wc
;
3152 work
->port
= (struct cm_port
*)mad_agent
->context
;
3153 queue_work(cm
.wq
, &work
->work
);
3156 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3157 struct ib_qp_attr
*qp_attr
,
3160 unsigned long flags
;
3163 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3164 switch (cm_id_priv
->id
.state
) {
3165 case IB_CM_REQ_SENT
:
3166 case IB_CM_MRA_REQ_RCVD
:
3167 case IB_CM_REQ_RCVD
:
3168 case IB_CM_MRA_REQ_SENT
:
3169 case IB_CM_REP_RCVD
:
3170 case IB_CM_MRA_REP_SENT
:
3171 case IB_CM_REP_SENT
:
3172 case IB_CM_MRA_REP_RCVD
:
3173 case IB_CM_ESTABLISHED
:
3174 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3175 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3176 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
3177 IB_ACCESS_REMOTE_WRITE
;
3178 if (cm_id_priv
->responder_resources
)
3179 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
|
3180 IB_ACCESS_REMOTE_ATOMIC
;
3181 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3182 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3189 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3193 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3194 struct ib_qp_attr
*qp_attr
,
3197 unsigned long flags
;
3200 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3201 switch (cm_id_priv
->id
.state
) {
3202 case IB_CM_REQ_RCVD
:
3203 case IB_CM_MRA_REQ_SENT
:
3204 case IB_CM_REP_RCVD
:
3205 case IB_CM_MRA_REP_SENT
:
3206 case IB_CM_REP_SENT
:
3207 case IB_CM_MRA_REP_RCVD
:
3208 case IB_CM_ESTABLISHED
:
3209 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3210 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3211 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3212 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3213 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3214 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3215 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3216 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3217 IB_QP_MIN_RNR_TIMER
;
3218 qp_attr
->max_dest_rd_atomic
=
3219 cm_id_priv
->responder_resources
;
3220 qp_attr
->min_rnr_timer
= 0;
3222 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3223 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3224 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3225 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3233 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3237 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3238 struct ib_qp_attr
*qp_attr
,
3241 unsigned long flags
;
3244 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3245 switch (cm_id_priv
->id
.state
) {
3246 case IB_CM_REP_RCVD
:
3247 case IB_CM_MRA_REP_SENT
:
3248 case IB_CM_REP_SENT
:
3249 case IB_CM_MRA_REP_RCVD
:
3250 case IB_CM_ESTABLISHED
:
3251 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3252 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3253 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3254 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3256 IB_QP_MAX_QP_RD_ATOMIC
;
3257 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3258 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3259 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3260 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3262 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3263 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3264 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3272 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3276 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3277 struct ib_qp_attr
*qp_attr
,
3280 struct cm_id_private
*cm_id_priv
;
3283 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3284 switch (qp_attr
->qp_state
) {
3286 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3289 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3292 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3300 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3302 static void cm_add_one(struct ib_device
*device
)
3304 struct cm_device
*cm_dev
;
3305 struct cm_port
*port
;
3306 struct ib_mad_reg_req reg_req
= {
3307 .mgmt_class
= IB_MGMT_CLASS_CM
,
3308 .mgmt_class_version
= IB_CM_CLASS_VERSION
3310 struct ib_port_modify port_modify
= {
3311 .set_port_cap_mask
= IB_PORT_CM_SUP
3313 unsigned long flags
;
3317 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
3320 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3321 device
->phys_port_cnt
, GFP_KERNEL
);
3325 cm_dev
->device
= device
;
3326 cm_dev
->ca_guid
= device
->node_guid
;
3328 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3329 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3330 port
= &cm_dev
->port
[i
-1];
3331 port
->cm_dev
= cm_dev
;
3333 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3340 if (IS_ERR(port
->mad_agent
))
3343 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3347 ib_set_client_data(device
, &cm_client
, cm_dev
);
3349 write_lock_irqsave(&cm
.device_lock
, flags
);
3350 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3351 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3355 ib_unregister_mad_agent(port
->mad_agent
);
3357 port_modify
.set_port_cap_mask
= 0;
3358 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3360 port
= &cm_dev
->port
[i
-1];
3361 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3362 ib_unregister_mad_agent(port
->mad_agent
);
3367 static void cm_remove_one(struct ib_device
*device
)
3369 struct cm_device
*cm_dev
;
3370 struct cm_port
*port
;
3371 struct ib_port_modify port_modify
= {
3372 .clr_port_cap_mask
= IB_PORT_CM_SUP
3374 unsigned long flags
;
3377 cm_dev
= ib_get_client_data(device
, &cm_client
);
3381 write_lock_irqsave(&cm
.device_lock
, flags
);
3382 list_del(&cm_dev
->list
);
3383 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3385 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3386 port
= &cm_dev
->port
[i
-1];
3387 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3388 ib_unregister_mad_agent(port
->mad_agent
);
3393 static int __init
ib_cm_init(void)
3397 memset(&cm
, 0, sizeof cm
);
3398 INIT_LIST_HEAD(&cm
.device_list
);
3399 rwlock_init(&cm
.device_lock
);
3400 spin_lock_init(&cm
.lock
);
3401 cm
.listen_service_table
= RB_ROOT
;
3402 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3403 cm
.remote_id_table
= RB_ROOT
;
3404 cm
.remote_qp_table
= RB_ROOT
;
3405 cm
.remote_sidr_table
= RB_ROOT
;
3406 idr_init(&cm
.local_id_table
);
3407 get_random_bytes(&cm
.random_id_operand
, sizeof cm
.random_id_operand
);
3408 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3409 INIT_LIST_HEAD(&cm
.timewait_list
);
3411 cm
.wq
= create_workqueue("ib_cm");
3415 ret
= ib_register_client(&cm_client
);
3421 destroy_workqueue(cm
.wq
);
3425 static void __exit
ib_cm_cleanup(void)
3427 struct cm_timewait_info
*timewait_info
, *tmp
;
3429 spin_lock_irq(&cm
.lock
);
3430 list_for_each_entry(timewait_info
, &cm
.timewait_list
, list
)
3431 cancel_delayed_work(&timewait_info
->work
.work
);
3432 spin_unlock_irq(&cm
.lock
);
3434 destroy_workqueue(cm
.wq
);
3436 list_for_each_entry_safe(timewait_info
, tmp
, &cm
.timewait_list
, list
) {
3437 list_del(&timewait_info
->list
);
3438 kfree(timewait_info
);
3441 ib_unregister_client(&cm_client
);
3442 idr_destroy(&cm
.local_id_table
);
3445 module_init(ib_cm_init
);
3446 module_exit(ib_cm_cleanup
);