2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/random.h>
45 #include <linux/rbtree.h>
46 #include <linux/spinlock.h>
47 #include <linux/workqueue.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("InfiniBand CM");
55 MODULE_LICENSE("Dual BSD/GPL");
57 static void cm_add_one(struct ib_device
*device
);
58 static void cm_remove_one(struct ib_device
*device
);
60 static struct ib_client cm_client
= {
63 .remove
= cm_remove_one
68 struct list_head device_list
;
70 struct rb_root listen_service_table
;
71 u64 listen_service_id
;
72 /* struct rb_root peer_service_table; todo: fix peer to peer */
73 struct rb_root remote_qp_table
;
74 struct rb_root remote_id_table
;
75 struct rb_root remote_sidr_table
;
76 struct idr local_id_table
;
77 __be32 random_id_operand
;
78 struct list_head timewait_list
;
79 struct workqueue_struct
*wq
;
83 struct cm_device
*cm_dev
;
84 struct ib_mad_agent
*mad_agent
;
89 struct list_head list
;
90 struct ib_device
*device
;
92 struct cm_port port
[0];
98 struct ib_ah_attr ah_attr
;
104 struct delayed_work work
;
105 struct list_head list
;
106 struct cm_port
*port
;
107 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
108 __be32 local_id
; /* Established / timewait */
110 struct ib_cm_event cm_event
;
111 struct ib_sa_path_rec path
[0];
114 struct cm_timewait_info
{
115 struct cm_work work
; /* Must be first. */
116 struct list_head list
;
117 struct rb_node remote_qp_node
;
118 struct rb_node remote_id_node
;
119 __be64 remote_ca_guid
;
121 u8 inserted_remote_qp
;
122 u8 inserted_remote_id
;
125 struct cm_id_private
{
128 struct rb_node service_node
;
129 struct rb_node sidr_id_node
;
130 spinlock_t lock
; /* Do not acquire inside cm.lock */
131 struct completion comp
;
134 struct ib_mad_send_buf
*msg
;
135 struct cm_timewait_info
*timewait_info
;
136 /* todo: use alternate port on send failure */
139 struct ib_cm_compare_data
*compare_data
;
145 enum ib_qp_type qp_type
;
149 enum ib_mtu path_mtu
;
154 u8 responder_resources
;
160 struct list_head work_list
;
164 static void cm_work_handler(struct work_struct
*work
);
166 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
168 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
169 complete(&cm_id_priv
->comp
);
172 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
173 struct ib_mad_send_buf
**msg
)
175 struct ib_mad_agent
*mad_agent
;
176 struct ib_mad_send_buf
*m
;
179 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
180 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
184 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
185 cm_id_priv
->av
.pkey_index
,
186 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
193 /* Timeout set by caller if response is expected. */
195 m
->retries
= cm_id_priv
->max_cm_retries
;
197 atomic_inc(&cm_id_priv
->refcount
);
198 m
->context
[0] = cm_id_priv
;
203 static int cm_alloc_response_msg(struct cm_port
*port
,
204 struct ib_mad_recv_wc
*mad_recv_wc
,
205 struct ib_mad_send_buf
**msg
)
207 struct ib_mad_send_buf
*m
;
210 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
211 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
215 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
216 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
227 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
229 ib_destroy_ah(msg
->ah
);
231 cm_deref_id(msg
->context
[0]);
232 ib_free_send_mad(msg
);
235 static void * cm_copy_private_data(const void *private_data
,
240 if (!private_data
|| !private_data_len
)
243 data
= kmemdup(private_data
, private_data_len
, GFP_KERNEL
);
245 return ERR_PTR(-ENOMEM
);
250 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
251 void *private_data
, u8 private_data_len
)
253 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
254 kfree(cm_id_priv
->private_data
);
256 cm_id_priv
->private_data
= private_data
;
257 cm_id_priv
->private_data_len
= private_data_len
;
260 static void cm_init_av_for_response(struct cm_port
*port
, struct ib_wc
*wc
,
261 struct ib_grh
*grh
, struct cm_av
*av
)
264 av
->pkey_index
= wc
->pkey_index
;
265 ib_init_ah_from_wc(port
->cm_dev
->device
, port
->port_num
, wc
,
269 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
271 struct cm_device
*cm_dev
;
272 struct cm_port
*port
= NULL
;
277 read_lock_irqsave(&cm
.device_lock
, flags
);
278 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
279 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
281 port
= &cm_dev
->port
[p
-1];
285 read_unlock_irqrestore(&cm
.device_lock
, flags
);
290 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
291 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
296 ib_init_ah_from_path(cm_dev
->device
, port
->port_num
, path
,
298 av
->packet_life_time
= path
->packet_life_time
;
302 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
309 spin_lock_irqsave(&cm
.lock
, flags
);
310 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
,
312 spin_unlock_irqrestore(&cm
.lock
, flags
);
313 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
315 cm_id_priv
->id
.local_id
= (__force __be32
) (id
^ cm
.random_id_operand
);
319 static void cm_free_id(__be32 local_id
)
323 spin_lock_irqsave(&cm
.lock
, flags
);
324 idr_remove(&cm
.local_id_table
,
325 (__force
int) (local_id
^ cm
.random_id_operand
));
326 spin_unlock_irqrestore(&cm
.lock
, flags
);
329 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
331 struct cm_id_private
*cm_id_priv
;
333 cm_id_priv
= idr_find(&cm
.local_id_table
,
334 (__force
int) (local_id
^ cm
.random_id_operand
));
336 if (cm_id_priv
->id
.remote_id
== remote_id
)
337 atomic_inc(&cm_id_priv
->refcount
);
345 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
347 struct cm_id_private
*cm_id_priv
;
350 spin_lock_irqsave(&cm
.lock
, flags
);
351 cm_id_priv
= cm_get_id(local_id
, remote_id
);
352 spin_unlock_irqrestore(&cm
.lock
, flags
);
357 static void cm_mask_copy(u8
*dst
, u8
*src
, u8
*mask
)
361 for (i
= 0; i
< IB_CM_COMPARE_SIZE
/ sizeof(unsigned long); i
++)
362 ((unsigned long *) dst
)[i
] = ((unsigned long *) src
)[i
] &
363 ((unsigned long *) mask
)[i
];
366 static int cm_compare_data(struct ib_cm_compare_data
*src_data
,
367 struct ib_cm_compare_data
*dst_data
)
369 u8 src
[IB_CM_COMPARE_SIZE
];
370 u8 dst
[IB_CM_COMPARE_SIZE
];
372 if (!src_data
|| !dst_data
)
375 cm_mask_copy(src
, src_data
->data
, dst_data
->mask
);
376 cm_mask_copy(dst
, dst_data
->data
, src_data
->mask
);
377 return memcmp(src
, dst
, IB_CM_COMPARE_SIZE
);
380 static int cm_compare_private_data(u8
*private_data
,
381 struct ib_cm_compare_data
*dst_data
)
383 u8 src
[IB_CM_COMPARE_SIZE
];
388 cm_mask_copy(src
, private_data
, dst_data
->mask
);
389 return memcmp(src
, dst_data
->data
, IB_CM_COMPARE_SIZE
);
392 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
394 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
395 struct rb_node
*parent
= NULL
;
396 struct cm_id_private
*cur_cm_id_priv
;
397 __be64 service_id
= cm_id_priv
->id
.service_id
;
398 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
403 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
405 data_cmp
= cm_compare_data(cm_id_priv
->compare_data
,
406 cur_cm_id_priv
->compare_data
);
407 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
408 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
409 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
) &&
411 return cur_cm_id_priv
;
413 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
414 link
= &(*link
)->rb_left
;
415 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
416 link
= &(*link
)->rb_right
;
417 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
418 link
= &(*link
)->rb_left
;
419 else if (service_id
> cur_cm_id_priv
->id
.service_id
)
420 link
= &(*link
)->rb_right
;
421 else if (data_cmp
< 0)
422 link
= &(*link
)->rb_left
;
424 link
= &(*link
)->rb_right
;
426 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
427 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
431 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
435 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
436 struct cm_id_private
*cm_id_priv
;
440 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
441 data_cmp
= cm_compare_private_data(private_data
,
442 cm_id_priv
->compare_data
);
443 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
444 cm_id_priv
->id
.service_id
&&
445 (cm_id_priv
->id
.device
== device
) && !data_cmp
)
448 if (device
< cm_id_priv
->id
.device
)
449 node
= node
->rb_left
;
450 else if (device
> cm_id_priv
->id
.device
)
451 node
= node
->rb_right
;
452 else if (service_id
< cm_id_priv
->id
.service_id
)
453 node
= node
->rb_left
;
454 else if (service_id
> cm_id_priv
->id
.service_id
)
455 node
= node
->rb_right
;
456 else if (data_cmp
< 0)
457 node
= node
->rb_left
;
459 node
= node
->rb_right
;
464 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
467 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
468 struct rb_node
*parent
= NULL
;
469 struct cm_timewait_info
*cur_timewait_info
;
470 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
471 __be32 remote_id
= timewait_info
->work
.remote_id
;
475 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
477 if (remote_id
< cur_timewait_info
->work
.remote_id
)
478 link
= &(*link
)->rb_left
;
479 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
480 link
= &(*link
)->rb_right
;
481 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
482 link
= &(*link
)->rb_left
;
483 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
484 link
= &(*link
)->rb_right
;
486 return cur_timewait_info
;
488 timewait_info
->inserted_remote_id
= 1;
489 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
490 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
494 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
497 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
498 struct cm_timewait_info
*timewait_info
;
501 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
503 if (remote_id
< timewait_info
->work
.remote_id
)
504 node
= node
->rb_left
;
505 else if (remote_id
> timewait_info
->work
.remote_id
)
506 node
= node
->rb_right
;
507 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
508 node
= node
->rb_left
;
509 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
510 node
= node
->rb_right
;
512 return timewait_info
;
517 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
520 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
521 struct rb_node
*parent
= NULL
;
522 struct cm_timewait_info
*cur_timewait_info
;
523 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
524 __be32 remote_qpn
= timewait_info
->remote_qpn
;
528 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
530 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
531 link
= &(*link
)->rb_left
;
532 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
533 link
= &(*link
)->rb_right
;
534 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
535 link
= &(*link
)->rb_left
;
536 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
537 link
= &(*link
)->rb_right
;
539 return cur_timewait_info
;
541 timewait_info
->inserted_remote_qp
= 1;
542 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
543 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
547 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
550 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
551 struct rb_node
*parent
= NULL
;
552 struct cm_id_private
*cur_cm_id_priv
;
553 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
554 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
558 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
560 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
561 link
= &(*link
)->rb_left
;
562 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
563 link
= &(*link
)->rb_right
;
566 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
569 link
= &(*link
)->rb_left
;
571 link
= &(*link
)->rb_right
;
573 return cur_cm_id_priv
;
576 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
577 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
581 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
582 enum ib_cm_sidr_status status
)
584 struct ib_cm_sidr_rep_param param
;
586 memset(¶m
, 0, sizeof param
);
587 param
.status
= status
;
588 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
591 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
592 ib_cm_handler cm_handler
,
595 struct cm_id_private
*cm_id_priv
;
598 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
600 return ERR_PTR(-ENOMEM
);
602 cm_id_priv
->id
.state
= IB_CM_IDLE
;
603 cm_id_priv
->id
.device
= device
;
604 cm_id_priv
->id
.cm_handler
= cm_handler
;
605 cm_id_priv
->id
.context
= context
;
606 cm_id_priv
->id
.remote_cm_qpn
= 1;
607 ret
= cm_alloc_id(cm_id_priv
);
611 spin_lock_init(&cm_id_priv
->lock
);
612 init_completion(&cm_id_priv
->comp
);
613 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
614 atomic_set(&cm_id_priv
->work_count
, -1);
615 atomic_set(&cm_id_priv
->refcount
, 1);
616 return &cm_id_priv
->id
;
620 return ERR_PTR(-ENOMEM
);
622 EXPORT_SYMBOL(ib_create_cm_id
);
624 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
626 struct cm_work
*work
;
628 if (list_empty(&cm_id_priv
->work_list
))
631 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
632 list_del(&work
->list
);
636 static void cm_free_work(struct cm_work
*work
)
638 if (work
->mad_recv_wc
)
639 ib_free_recv_mad(work
->mad_recv_wc
);
643 static inline int cm_convert_to_ms(int iba_time
)
645 /* approximate conversion to ms from 4.096us x 2^iba_time */
646 return 1 << max(iba_time
- 8, 0);
649 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
651 if (timewait_info
->inserted_remote_id
) {
652 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
653 timewait_info
->inserted_remote_id
= 0;
656 if (timewait_info
->inserted_remote_qp
) {
657 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
658 timewait_info
->inserted_remote_qp
= 0;
662 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
664 struct cm_timewait_info
*timewait_info
;
666 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
668 return ERR_PTR(-ENOMEM
);
670 timewait_info
->work
.local_id
= local_id
;
671 INIT_DELAYED_WORK(&timewait_info
->work
.work
, cm_work_handler
);
672 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
673 return timewait_info
;
676 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
681 spin_lock_irqsave(&cm
.lock
, flags
);
682 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
683 list_add_tail(&cm_id_priv
->timewait_info
->list
, &cm
.timewait_list
);
684 spin_unlock_irqrestore(&cm
.lock
, flags
);
687 * The cm_id could be destroyed by the user before we exit timewait.
688 * To protect against this, we search for the cm_id after exiting
689 * timewait before notifying the user that we've exited timewait.
691 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
692 wait_time
= cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
+ 1);
693 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
694 msecs_to_jiffies(wait_time
));
695 cm_id_priv
->timewait_info
= NULL
;
698 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
702 cm_id_priv
->id
.state
= IB_CM_IDLE
;
703 if (cm_id_priv
->timewait_info
) {
704 spin_lock_irqsave(&cm
.lock
, flags
);
705 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
706 spin_unlock_irqrestore(&cm
.lock
, flags
);
707 kfree(cm_id_priv
->timewait_info
);
708 cm_id_priv
->timewait_info
= NULL
;
712 static void cm_destroy_id(struct ib_cm_id
*cm_id
, int err
)
714 struct cm_id_private
*cm_id_priv
;
715 struct cm_work
*work
;
718 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
720 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
721 switch (cm_id
->state
) {
723 cm_id
->state
= IB_CM_IDLE
;
724 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
725 spin_lock_irqsave(&cm
.lock
, flags
);
726 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
727 spin_unlock_irqrestore(&cm
.lock
, flags
);
729 case IB_CM_SIDR_REQ_SENT
:
730 cm_id
->state
= IB_CM_IDLE
;
731 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
732 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
734 case IB_CM_SIDR_REQ_RCVD
:
735 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
736 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
739 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
740 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
741 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
742 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
743 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
747 if (err
== -ENOMEM
) {
748 /* Do not reject to allow future retries. */
749 cm_reset_to_idle(cm_id_priv
);
750 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
752 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
753 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
757 case IB_CM_MRA_REQ_RCVD
:
759 case IB_CM_MRA_REP_RCVD
:
760 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
762 case IB_CM_MRA_REQ_SENT
:
764 case IB_CM_MRA_REP_SENT
:
765 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
766 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
769 case IB_CM_ESTABLISHED
:
770 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
771 ib_send_cm_dreq(cm_id
, NULL
, 0);
773 case IB_CM_DREQ_SENT
:
774 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
775 cm_enter_timewait(cm_id_priv
);
776 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
778 case IB_CM_DREQ_RCVD
:
779 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
780 ib_send_cm_drep(cm_id
, NULL
, 0);
783 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
787 cm_free_id(cm_id
->local_id
);
788 cm_deref_id(cm_id_priv
);
789 wait_for_completion(&cm_id_priv
->comp
);
790 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
792 kfree(cm_id_priv
->compare_data
);
793 kfree(cm_id_priv
->private_data
);
797 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
799 cm_destroy_id(cm_id
, 0);
801 EXPORT_SYMBOL(ib_destroy_cm_id
);
803 int ib_cm_listen(struct ib_cm_id
*cm_id
, __be64 service_id
, __be64 service_mask
,
804 struct ib_cm_compare_data
*compare_data
)
806 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
810 service_mask
= service_mask
? service_mask
:
811 __constant_cpu_to_be64(~0ULL);
812 service_id
&= service_mask
;
813 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
814 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
817 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
818 if (cm_id
->state
!= IB_CM_IDLE
)
822 cm_id_priv
->compare_data
= kzalloc(sizeof *compare_data
,
824 if (!cm_id_priv
->compare_data
)
826 cm_mask_copy(cm_id_priv
->compare_data
->data
,
827 compare_data
->data
, compare_data
->mask
);
828 memcpy(cm_id_priv
->compare_data
->mask
, compare_data
->mask
,
832 cm_id
->state
= IB_CM_LISTEN
;
834 spin_lock_irqsave(&cm
.lock
, flags
);
835 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
836 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
837 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
839 cm_id
->service_id
= service_id
;
840 cm_id
->service_mask
= service_mask
;
842 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
843 spin_unlock_irqrestore(&cm
.lock
, flags
);
845 if (cur_cm_id_priv
) {
846 cm_id
->state
= IB_CM_IDLE
;
847 kfree(cm_id_priv
->compare_data
);
848 cm_id_priv
->compare_data
= NULL
;
853 EXPORT_SYMBOL(ib_cm_listen
);
855 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
856 enum cm_msg_sequence msg_seq
)
860 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
861 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
863 return cpu_to_be64(hi_tid
| low_tid
);
866 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
867 __be16 attr_id
, __be64 tid
)
869 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
870 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
871 hdr
->class_version
= IB_CM_CLASS_VERSION
;
872 hdr
->method
= IB_MGMT_METHOD_SEND
;
873 hdr
->attr_id
= attr_id
;
877 static void cm_format_req(struct cm_req_msg
*req_msg
,
878 struct cm_id_private
*cm_id_priv
,
879 struct ib_cm_req_param
*param
)
881 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
882 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
884 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
885 req_msg
->service_id
= param
->service_id
;
886 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
887 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
888 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
889 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
890 cm_req_set_remote_resp_timeout(req_msg
,
891 param
->remote_cm_response_timeout
);
892 cm_req_set_qp_type(req_msg
, param
->qp_type
);
893 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
894 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
895 cm_req_set_local_resp_timeout(req_msg
,
896 param
->local_cm_response_timeout
);
897 cm_req_set_retry_count(req_msg
, param
->retry_count
);
898 req_msg
->pkey
= param
->primary_path
->pkey
;
899 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
900 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
901 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
902 cm_req_set_srq(req_msg
, param
->srq
);
904 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
905 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
906 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
907 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
908 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
909 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
910 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
911 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
912 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
913 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
914 cm_req_set_primary_local_ack_timeout(req_msg
,
915 min(31, param
->primary_path
->packet_life_time
+ 1));
917 if (param
->alternate_path
) {
918 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
919 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
920 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
921 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
922 cm_req_set_alt_flow_label(req_msg
,
923 param
->alternate_path
->flow_label
);
924 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
925 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
926 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
927 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
928 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
929 cm_req_set_alt_local_ack_timeout(req_msg
,
930 min(31, param
->alternate_path
->packet_life_time
+ 1));
933 if (param
->private_data
&& param
->private_data_len
)
934 memcpy(req_msg
->private_data
, param
->private_data
,
935 param
->private_data_len
);
938 static int cm_validate_req_param(struct ib_cm_req_param
*param
)
940 /* peer-to-peer not supported */
941 if (param
->peer_to_peer
)
944 if (!param
->primary_path
)
947 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
950 if (param
->private_data
&&
951 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
954 if (param
->alternate_path
&&
955 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
956 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
962 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
963 struct ib_cm_req_param
*param
)
965 struct cm_id_private
*cm_id_priv
;
966 struct cm_req_msg
*req_msg
;
970 ret
= cm_validate_req_param(param
);
974 /* Verify that we're not in timewait. */
975 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
976 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
977 if (cm_id
->state
!= IB_CM_IDLE
) {
978 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
982 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
984 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
986 if (IS_ERR(cm_id_priv
->timewait_info
)) {
987 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
991 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
994 if (param
->alternate_path
) {
995 ret
= cm_init_av_by_path(param
->alternate_path
,
996 &cm_id_priv
->alt_av
);
1000 cm_id
->service_id
= param
->service_id
;
1001 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
1002 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1003 param
->primary_path
->packet_life_time
) * 2 +
1005 param
->remote_cm_response_timeout
);
1006 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
1007 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1008 cm_id_priv
->responder_resources
= param
->responder_resources
;
1009 cm_id_priv
->retry_count
= param
->retry_count
;
1010 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
1011 cm_id_priv
->pkey
= param
->primary_path
->pkey
;
1012 cm_id_priv
->qp_type
= param
->qp_type
;
1014 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
1018 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
1019 cm_format_req(req_msg
, cm_id_priv
, param
);
1020 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1021 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1022 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
1024 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
1025 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
1027 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1028 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
1030 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1033 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
1034 cm_id
->state
= IB_CM_REQ_SENT
;
1035 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1038 error2
: cm_free_msg(cm_id_priv
->msg
);
1039 error1
: kfree(cm_id_priv
->timewait_info
);
1042 EXPORT_SYMBOL(ib_send_cm_req
);
1044 static int cm_issue_rej(struct cm_port
*port
,
1045 struct ib_mad_recv_wc
*mad_recv_wc
,
1046 enum ib_cm_rej_reason reason
,
1047 enum cm_msg_response msg_rejected
,
1048 void *ari
, u8 ari_length
)
1050 struct ib_mad_send_buf
*msg
= NULL
;
1051 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
1054 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1058 /* We just need common CM header information. Cast to any message. */
1059 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
1060 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
1062 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
1063 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
1064 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
1065 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
1066 rej_msg
->reason
= cpu_to_be16(reason
);
1068 if (ari
&& ari_length
) {
1069 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1070 memcpy(rej_msg
->ari
, ari
, ari_length
);
1073 ret
= ib_post_send_mad(msg
, NULL
);
1080 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1081 __be32 local_qpn
, __be32 remote_qpn
)
1083 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1084 ((local_ca_guid
== remote_ca_guid
) &&
1085 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1088 static void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1089 struct ib_sa_path_rec
*primary_path
,
1090 struct ib_sa_path_rec
*alt_path
)
1092 memset(primary_path
, 0, sizeof *primary_path
);
1093 primary_path
->dgid
= req_msg
->primary_local_gid
;
1094 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1095 primary_path
->dlid
= req_msg
->primary_local_lid
;
1096 primary_path
->slid
= req_msg
->primary_remote_lid
;
1097 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1098 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1099 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1100 primary_path
->reversible
= 1;
1101 primary_path
->pkey
= req_msg
->pkey
;
1102 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1103 primary_path
->mtu_selector
= IB_SA_EQ
;
1104 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1105 primary_path
->rate_selector
= IB_SA_EQ
;
1106 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1107 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1108 primary_path
->packet_life_time
=
1109 cm_req_get_primary_local_ack_timeout(req_msg
);
1110 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1112 if (req_msg
->alt_local_lid
) {
1113 memset(alt_path
, 0, sizeof *alt_path
);
1114 alt_path
->dgid
= req_msg
->alt_local_gid
;
1115 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1116 alt_path
->dlid
= req_msg
->alt_local_lid
;
1117 alt_path
->slid
= req_msg
->alt_remote_lid
;
1118 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1119 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1120 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1121 alt_path
->reversible
= 1;
1122 alt_path
->pkey
= req_msg
->pkey
;
1123 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1124 alt_path
->mtu_selector
= IB_SA_EQ
;
1125 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1126 alt_path
->rate_selector
= IB_SA_EQ
;
1127 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1128 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1129 alt_path
->packet_life_time
=
1130 cm_req_get_alt_local_ack_timeout(req_msg
);
1131 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1135 static void cm_format_req_event(struct cm_work
*work
,
1136 struct cm_id_private
*cm_id_priv
,
1137 struct ib_cm_id
*listen_id
)
1139 struct cm_req_msg
*req_msg
;
1140 struct ib_cm_req_event_param
*param
;
1142 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1143 param
= &work
->cm_event
.param
.req_rcvd
;
1144 param
->listen_id
= listen_id
;
1145 param
->port
= cm_id_priv
->av
.port
->port_num
;
1146 param
->primary_path
= &work
->path
[0];
1147 if (req_msg
->alt_local_lid
)
1148 param
->alternate_path
= &work
->path
[1];
1150 param
->alternate_path
= NULL
;
1151 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1152 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1153 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1154 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1155 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1156 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1157 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1158 param
->local_cm_response_timeout
=
1159 cm_req_get_remote_resp_timeout(req_msg
);
1160 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1161 param
->remote_cm_response_timeout
=
1162 cm_req_get_local_resp_timeout(req_msg
);
1163 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1164 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1165 param
->srq
= cm_req_get_srq(req_msg
);
1166 work
->cm_event
.private_data
= &req_msg
->private_data
;
1169 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1170 struct cm_work
*work
)
1172 unsigned long flags
;
1175 /* We will typically only have the current event to report. */
1176 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1179 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1180 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1181 work
= cm_dequeue_work(cm_id_priv
);
1182 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1184 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1188 cm_deref_id(cm_id_priv
);
1190 cm_destroy_id(&cm_id_priv
->id
, ret
);
1193 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1194 struct cm_id_private
*cm_id_priv
,
1195 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1196 const void *private_data
, u8 private_data_len
)
1198 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1199 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1200 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1201 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1202 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1204 if (private_data
&& private_data_len
)
1205 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1208 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1209 struct cm_id_private
*cm_id_priv
,
1210 enum ib_cm_rej_reason reason
,
1213 const void *private_data
,
1214 u8 private_data_len
)
1216 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1217 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1219 switch(cm_id_priv
->id
.state
) {
1220 case IB_CM_REQ_RCVD
:
1221 rej_msg
->local_comm_id
= 0;
1222 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1224 case IB_CM_MRA_REQ_SENT
:
1225 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1226 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1228 case IB_CM_REP_RCVD
:
1229 case IB_CM_MRA_REP_SENT
:
1230 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1231 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1234 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1235 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1239 rej_msg
->reason
= cpu_to_be16(reason
);
1240 if (ari
&& ari_length
) {
1241 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1242 memcpy(rej_msg
->ari
, ari
, ari_length
);
1245 if (private_data
&& private_data_len
)
1246 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1249 static void cm_dup_req_handler(struct cm_work
*work
,
1250 struct cm_id_private
*cm_id_priv
)
1252 struct ib_mad_send_buf
*msg
= NULL
;
1253 unsigned long flags
;
1256 /* Quick state check to discard duplicate REQs. */
1257 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1260 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1264 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1265 switch (cm_id_priv
->id
.state
) {
1266 case IB_CM_MRA_REQ_SENT
:
1267 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1268 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1269 cm_id_priv
->private_data
,
1270 cm_id_priv
->private_data_len
);
1272 case IB_CM_TIMEWAIT
:
1273 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1274 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1279 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1281 ret
= ib_post_send_mad(msg
, NULL
);
1286 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1287 free
: cm_free_msg(msg
);
1290 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1291 struct cm_id_private
*cm_id_priv
)
1293 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1294 struct cm_timewait_info
*timewait_info
;
1295 struct cm_req_msg
*req_msg
;
1296 unsigned long flags
;
1298 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1300 /* Check for duplicate REQ and stale connections. */
1301 spin_lock_irqsave(&cm
.lock
, flags
);
1302 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1304 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1306 if (timewait_info
) {
1307 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1308 timewait_info
->work
.remote_id
);
1309 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1310 spin_unlock_irqrestore(&cm
.lock
, flags
);
1311 if (cur_cm_id_priv
) {
1312 cm_dup_req_handler(work
, cur_cm_id_priv
);
1313 cm_deref_id(cur_cm_id_priv
);
1315 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1316 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1318 listen_cm_id_priv
= NULL
;
1322 /* Find matching listen request. */
1323 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1324 req_msg
->service_id
,
1325 req_msg
->private_data
);
1326 if (!listen_cm_id_priv
) {
1327 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1328 spin_unlock_irqrestore(&cm
.lock
, flags
);
1329 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1330 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1334 atomic_inc(&listen_cm_id_priv
->refcount
);
1335 atomic_inc(&cm_id_priv
->refcount
);
1336 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1337 atomic_inc(&cm_id_priv
->work_count
);
1338 spin_unlock_irqrestore(&cm
.lock
, flags
);
1340 return listen_cm_id_priv
;
1343 static int cm_req_handler(struct cm_work
*work
)
1345 struct ib_cm_id
*cm_id
;
1346 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1347 struct cm_req_msg
*req_msg
;
1350 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1352 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1354 return PTR_ERR(cm_id
);
1356 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1357 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1358 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1359 work
->mad_recv_wc
->recv_buf
.grh
,
1361 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1363 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1364 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1367 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1368 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1369 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1371 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1372 if (!listen_cm_id_priv
) {
1374 kfree(cm_id_priv
->timewait_info
);
1378 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1379 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1380 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1381 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1383 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1384 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1386 ib_get_cached_gid(work
->port
->cm_dev
->device
,
1387 work
->port
->port_num
, 0, &work
->path
[0].sgid
);
1388 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_GID
,
1389 &work
->path
[0].sgid
, sizeof work
->path
[0].sgid
,
1393 if (req_msg
->alt_local_lid
) {
1394 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1396 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_ALT_GID
,
1397 &work
->path
[0].sgid
,
1398 sizeof work
->path
[0].sgid
, NULL
, 0);
1402 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1403 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1404 cm_req_get_local_resp_timeout(req_msg
));
1405 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1406 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1407 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1408 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1409 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1410 cm_id_priv
->pkey
= req_msg
->pkey
;
1411 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1412 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1413 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1414 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1416 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1417 cm_process_work(cm_id_priv
, work
);
1418 cm_deref_id(listen_cm_id_priv
);
1422 atomic_dec(&cm_id_priv
->refcount
);
1423 cm_deref_id(listen_cm_id_priv
);
1425 ib_destroy_cm_id(cm_id
);
1429 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1430 struct cm_id_private
*cm_id_priv
,
1431 struct ib_cm_rep_param
*param
)
1433 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1434 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1435 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1436 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1437 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1438 rep_msg
->resp_resources
= param
->responder_resources
;
1439 rep_msg
->initiator_depth
= param
->initiator_depth
;
1440 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1441 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1442 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1443 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1444 cm_rep_set_srq(rep_msg
, param
->srq
);
1445 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1447 if (param
->private_data
&& param
->private_data_len
)
1448 memcpy(rep_msg
->private_data
, param
->private_data
,
1449 param
->private_data_len
);
1452 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1453 struct ib_cm_rep_param
*param
)
1455 struct cm_id_private
*cm_id_priv
;
1456 struct ib_mad_send_buf
*msg
;
1457 struct cm_rep_msg
*rep_msg
;
1458 unsigned long flags
;
1461 if (param
->private_data
&&
1462 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1465 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1466 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1467 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1468 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1473 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1477 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1478 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1479 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1480 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1482 ret
= ib_post_send_mad(msg
, NULL
);
1484 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1489 cm_id
->state
= IB_CM_REP_SENT
;
1490 cm_id_priv
->msg
= msg
;
1491 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1492 cm_id_priv
->responder_resources
= param
->responder_resources
;
1493 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1494 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1496 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1499 EXPORT_SYMBOL(ib_send_cm_rep
);
1501 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1502 struct cm_id_private
*cm_id_priv
,
1503 const void *private_data
,
1504 u8 private_data_len
)
1506 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1507 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1508 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1510 if (private_data
&& private_data_len
)
1511 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1514 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1515 const void *private_data
,
1516 u8 private_data_len
)
1518 struct cm_id_private
*cm_id_priv
;
1519 struct ib_mad_send_buf
*msg
;
1520 unsigned long flags
;
1524 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1527 data
= cm_copy_private_data(private_data
, private_data_len
);
1529 return PTR_ERR(data
);
1531 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1532 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1533 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1534 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1539 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1543 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1544 private_data
, private_data_len
);
1546 ret
= ib_post_send_mad(msg
, NULL
);
1548 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1554 cm_id
->state
= IB_CM_ESTABLISHED
;
1555 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1556 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1559 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1563 EXPORT_SYMBOL(ib_send_cm_rtu
);
1565 static void cm_format_rep_event(struct cm_work
*work
)
1567 struct cm_rep_msg
*rep_msg
;
1568 struct ib_cm_rep_event_param
*param
;
1570 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1571 param
= &work
->cm_event
.param
.rep_rcvd
;
1572 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1573 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1574 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1575 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1576 param
->responder_resources
= rep_msg
->initiator_depth
;
1577 param
->initiator_depth
= rep_msg
->resp_resources
;
1578 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1579 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1580 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1581 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1582 param
->srq
= cm_rep_get_srq(rep_msg
);
1583 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1586 static void cm_dup_rep_handler(struct cm_work
*work
)
1588 struct cm_id_private
*cm_id_priv
;
1589 struct cm_rep_msg
*rep_msg
;
1590 struct ib_mad_send_buf
*msg
= NULL
;
1591 unsigned long flags
;
1594 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1595 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1596 rep_msg
->local_comm_id
);
1600 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1604 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1605 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1606 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1607 cm_id_priv
->private_data
,
1608 cm_id_priv
->private_data_len
);
1609 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1610 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1611 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1612 cm_id_priv
->private_data
,
1613 cm_id_priv
->private_data_len
);
1616 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1618 ret
= ib_post_send_mad(msg
, NULL
);
1623 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1624 free
: cm_free_msg(msg
);
1625 deref
: cm_deref_id(cm_id_priv
);
1628 static int cm_rep_handler(struct cm_work
*work
)
1630 struct cm_id_private
*cm_id_priv
;
1631 struct cm_rep_msg
*rep_msg
;
1632 unsigned long flags
;
1635 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1636 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1638 cm_dup_rep_handler(work
);
1642 cm_format_rep_event(work
);
1644 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1645 switch (cm_id_priv
->id
.state
) {
1646 case IB_CM_REQ_SENT
:
1647 case IB_CM_MRA_REQ_RCVD
:
1650 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1655 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1656 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1657 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1659 spin_lock(&cm
.lock
);
1660 /* Check for duplicate REP. */
1661 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1662 spin_unlock(&cm
.lock
);
1663 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1667 /* Check for a stale connection. */
1668 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1669 rb_erase(&cm_id_priv
->timewait_info
->remote_id_node
,
1670 &cm
.remote_id_table
);
1671 cm_id_priv
->timewait_info
->inserted_remote_id
= 0;
1672 spin_unlock(&cm
.lock
);
1673 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1674 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1675 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1680 spin_unlock(&cm
.lock
);
1682 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1683 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1684 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1685 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1686 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1687 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1688 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1690 /* todo: handle peer_to_peer */
1692 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1693 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1695 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1696 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1699 cm_process_work(cm_id_priv
, work
);
1701 cm_deref_id(cm_id_priv
);
1705 cm_deref_id(cm_id_priv
);
1709 static int cm_establish_handler(struct cm_work
*work
)
1711 struct cm_id_private
*cm_id_priv
;
1712 unsigned long flags
;
1715 /* See comment in cm_establish about lookup. */
1716 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1720 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1721 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1722 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1726 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1727 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1729 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1730 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1733 cm_process_work(cm_id_priv
, work
);
1735 cm_deref_id(cm_id_priv
);
1738 cm_deref_id(cm_id_priv
);
1742 static int cm_rtu_handler(struct cm_work
*work
)
1744 struct cm_id_private
*cm_id_priv
;
1745 struct cm_rtu_msg
*rtu_msg
;
1746 unsigned long flags
;
1749 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1750 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1751 rtu_msg
->local_comm_id
);
1755 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1757 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1758 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1759 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1760 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1763 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1765 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1766 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1768 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1769 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1772 cm_process_work(cm_id_priv
, work
);
1774 cm_deref_id(cm_id_priv
);
1777 cm_deref_id(cm_id_priv
);
1781 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1782 struct cm_id_private
*cm_id_priv
,
1783 const void *private_data
,
1784 u8 private_data_len
)
1786 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1787 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1788 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1789 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1790 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1792 if (private_data
&& private_data_len
)
1793 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1796 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1797 const void *private_data
,
1798 u8 private_data_len
)
1800 struct cm_id_private
*cm_id_priv
;
1801 struct ib_mad_send_buf
*msg
;
1802 unsigned long flags
;
1805 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1808 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1809 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1810 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1815 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1817 cm_enter_timewait(cm_id_priv
);
1821 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1822 private_data
, private_data_len
);
1823 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1824 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1826 ret
= ib_post_send_mad(msg
, NULL
);
1828 cm_enter_timewait(cm_id_priv
);
1829 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1834 cm_id
->state
= IB_CM_DREQ_SENT
;
1835 cm_id_priv
->msg
= msg
;
1836 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1839 EXPORT_SYMBOL(ib_send_cm_dreq
);
1841 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1842 struct cm_id_private
*cm_id_priv
,
1843 const void *private_data
,
1844 u8 private_data_len
)
1846 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1847 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1848 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1850 if (private_data
&& private_data_len
)
1851 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1854 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1855 const void *private_data
,
1856 u8 private_data_len
)
1858 struct cm_id_private
*cm_id_priv
;
1859 struct ib_mad_send_buf
*msg
;
1860 unsigned long flags
;
1864 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1867 data
= cm_copy_private_data(private_data
, private_data_len
);
1869 return PTR_ERR(data
);
1871 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1872 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1873 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1874 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1879 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1880 cm_enter_timewait(cm_id_priv
);
1882 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1886 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1887 private_data
, private_data_len
);
1889 ret
= ib_post_send_mad(msg
, NULL
);
1891 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1896 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1899 EXPORT_SYMBOL(ib_send_cm_drep
);
1901 static int cm_issue_drep(struct cm_port
*port
,
1902 struct ib_mad_recv_wc
*mad_recv_wc
)
1904 struct ib_mad_send_buf
*msg
= NULL
;
1905 struct cm_dreq_msg
*dreq_msg
;
1906 struct cm_drep_msg
*drep_msg
;
1909 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1913 dreq_msg
= (struct cm_dreq_msg
*) mad_recv_wc
->recv_buf
.mad
;
1914 drep_msg
= (struct cm_drep_msg
*) msg
->mad
;
1916 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, dreq_msg
->hdr
.tid
);
1917 drep_msg
->remote_comm_id
= dreq_msg
->local_comm_id
;
1918 drep_msg
->local_comm_id
= dreq_msg
->remote_comm_id
;
1920 ret
= ib_post_send_mad(msg
, NULL
);
1927 static int cm_dreq_handler(struct cm_work
*work
)
1929 struct cm_id_private
*cm_id_priv
;
1930 struct cm_dreq_msg
*dreq_msg
;
1931 struct ib_mad_send_buf
*msg
= NULL
;
1932 unsigned long flags
;
1935 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1936 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1937 dreq_msg
->local_comm_id
);
1939 cm_issue_drep(work
->port
, work
->mad_recv_wc
);
1943 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1945 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1946 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1949 switch (cm_id_priv
->id
.state
) {
1950 case IB_CM_REP_SENT
:
1951 case IB_CM_DREQ_SENT
:
1952 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1954 case IB_CM_ESTABLISHED
:
1955 case IB_CM_MRA_REP_RCVD
:
1957 case IB_CM_TIMEWAIT
:
1958 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1961 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1962 cm_id_priv
->private_data
,
1963 cm_id_priv
->private_data_len
);
1964 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1966 if (ib_post_send_mad(msg
, NULL
))
1972 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1973 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1974 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1976 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1977 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1980 cm_process_work(cm_id_priv
, work
);
1982 cm_deref_id(cm_id_priv
);
1985 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1986 deref
: cm_deref_id(cm_id_priv
);
1990 static int cm_drep_handler(struct cm_work
*work
)
1992 struct cm_id_private
*cm_id_priv
;
1993 struct cm_drep_msg
*drep_msg
;
1994 unsigned long flags
;
1997 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1998 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1999 drep_msg
->local_comm_id
);
2003 work
->cm_event
.private_data
= &drep_msg
->private_data
;
2005 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2006 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
2007 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
2008 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2011 cm_enter_timewait(cm_id_priv
);
2013 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2014 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2016 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2017 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2020 cm_process_work(cm_id_priv
, work
);
2022 cm_deref_id(cm_id_priv
);
2025 cm_deref_id(cm_id_priv
);
2029 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
2030 enum ib_cm_rej_reason reason
,
2033 const void *private_data
,
2034 u8 private_data_len
)
2036 struct cm_id_private
*cm_id_priv
;
2037 struct ib_mad_send_buf
*msg
;
2038 unsigned long flags
;
2041 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
2042 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
2045 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2047 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2048 switch (cm_id
->state
) {
2049 case IB_CM_REQ_SENT
:
2050 case IB_CM_MRA_REQ_RCVD
:
2051 case IB_CM_REQ_RCVD
:
2052 case IB_CM_MRA_REQ_SENT
:
2053 case IB_CM_REP_RCVD
:
2054 case IB_CM_MRA_REP_SENT
:
2055 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2057 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2058 cm_id_priv
, reason
, ari
, ari_length
,
2059 private_data
, private_data_len
);
2061 cm_reset_to_idle(cm_id_priv
);
2063 case IB_CM_REP_SENT
:
2064 case IB_CM_MRA_REP_RCVD
:
2065 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2067 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2068 cm_id_priv
, reason
, ari
, ari_length
,
2069 private_data
, private_data_len
);
2071 cm_enter_timewait(cm_id_priv
);
2081 ret
= ib_post_send_mad(msg
, NULL
);
2085 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2088 EXPORT_SYMBOL(ib_send_cm_rej
);
2090 static void cm_format_rej_event(struct cm_work
*work
)
2092 struct cm_rej_msg
*rej_msg
;
2093 struct ib_cm_rej_event_param
*param
;
2095 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2096 param
= &work
->cm_event
.param
.rej_rcvd
;
2097 param
->ari
= rej_msg
->ari
;
2098 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
2099 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
2100 work
->cm_event
.private_data
= &rej_msg
->private_data
;
2103 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
2105 struct cm_timewait_info
*timewait_info
;
2106 struct cm_id_private
*cm_id_priv
;
2107 unsigned long flags
;
2110 remote_id
= rej_msg
->local_comm_id
;
2112 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
2113 spin_lock_irqsave(&cm
.lock
, flags
);
2114 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
2116 if (!timewait_info
) {
2117 spin_unlock_irqrestore(&cm
.lock
, flags
);
2120 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int)
2121 (timewait_info
->work
.local_id
^
2122 cm
.random_id_operand
));
2124 if (cm_id_priv
->id
.remote_id
== remote_id
)
2125 atomic_inc(&cm_id_priv
->refcount
);
2129 spin_unlock_irqrestore(&cm
.lock
, flags
);
2130 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2131 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2133 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2138 static int cm_rej_handler(struct cm_work
*work
)
2140 struct cm_id_private
*cm_id_priv
;
2141 struct cm_rej_msg
*rej_msg
;
2142 unsigned long flags
;
2145 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2146 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2150 cm_format_rej_event(work
);
2152 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2153 switch (cm_id_priv
->id
.state
) {
2154 case IB_CM_REQ_SENT
:
2155 case IB_CM_MRA_REQ_RCVD
:
2156 case IB_CM_REP_SENT
:
2157 case IB_CM_MRA_REP_RCVD
:
2158 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2160 case IB_CM_REQ_RCVD
:
2161 case IB_CM_MRA_REQ_SENT
:
2162 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2163 cm_enter_timewait(cm_id_priv
);
2165 cm_reset_to_idle(cm_id_priv
);
2167 case IB_CM_DREQ_SENT
:
2168 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2170 case IB_CM_REP_RCVD
:
2171 case IB_CM_MRA_REP_SENT
:
2172 case IB_CM_ESTABLISHED
:
2173 cm_enter_timewait(cm_id_priv
);
2176 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2181 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2183 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2184 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2187 cm_process_work(cm_id_priv
, work
);
2189 cm_deref_id(cm_id_priv
);
2192 cm_deref_id(cm_id_priv
);
2196 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2198 const void *private_data
,
2199 u8 private_data_len
)
2201 struct cm_id_private
*cm_id_priv
;
2202 struct ib_mad_send_buf
*msg
;
2204 unsigned long flags
;
2207 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2210 data
= cm_copy_private_data(private_data
, private_data_len
);
2212 return PTR_ERR(data
);
2214 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2216 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2217 switch(cm_id_priv
->id
.state
) {
2218 case IB_CM_REQ_RCVD
:
2219 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2223 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2224 CM_MSG_RESPONSE_REQ
, service_timeout
,
2225 private_data
, private_data_len
);
2226 ret
= ib_post_send_mad(msg
, NULL
);
2229 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2231 case IB_CM_REP_RCVD
:
2232 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2236 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2237 CM_MSG_RESPONSE_REP
, service_timeout
,
2238 private_data
, private_data_len
);
2239 ret
= ib_post_send_mad(msg
, NULL
);
2242 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2244 case IB_CM_ESTABLISHED
:
2245 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2249 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2250 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2251 private_data
, private_data_len
);
2252 ret
= ib_post_send_mad(msg
, NULL
);
2255 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2261 cm_id_priv
->service_timeout
= service_timeout
;
2262 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2263 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2266 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2270 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2275 EXPORT_SYMBOL(ib_send_cm_mra
);
2277 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2279 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2280 case CM_MSG_RESPONSE_REQ
:
2281 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2282 case CM_MSG_RESPONSE_REP
:
2283 case CM_MSG_RESPONSE_OTHER
:
2284 return cm_acquire_id(mra_msg
->remote_comm_id
,
2285 mra_msg
->local_comm_id
);
2291 static int cm_mra_handler(struct cm_work
*work
)
2293 struct cm_id_private
*cm_id_priv
;
2294 struct cm_mra_msg
*mra_msg
;
2295 unsigned long flags
;
2298 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2299 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2303 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2304 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2305 cm_mra_get_service_timeout(mra_msg
);
2306 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2307 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2309 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2310 switch (cm_id_priv
->id
.state
) {
2311 case IB_CM_REQ_SENT
:
2312 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2313 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2314 cm_id_priv
->msg
, timeout
))
2316 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2318 case IB_CM_REP_SENT
:
2319 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2320 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2321 cm_id_priv
->msg
, timeout
))
2323 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2325 case IB_CM_ESTABLISHED
:
2326 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2327 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2328 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2329 cm_id_priv
->msg
, timeout
))
2331 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2337 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2338 cm_id_priv
->id
.state
;
2339 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2341 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2342 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2345 cm_process_work(cm_id_priv
, work
);
2347 cm_deref_id(cm_id_priv
);
2350 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2351 cm_deref_id(cm_id_priv
);
2355 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2356 struct cm_id_private
*cm_id_priv
,
2357 struct ib_sa_path_rec
*alternate_path
,
2358 const void *private_data
,
2359 u8 private_data_len
)
2361 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2362 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2363 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2364 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2365 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2366 /* todo: need remote CM response timeout */
2367 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2368 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2369 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2370 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2371 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2372 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2373 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2374 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2375 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2376 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2377 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2378 cm_lap_set_local_ack_timeout(lap_msg
,
2379 min(31, alternate_path
->packet_life_time
+ 1));
2381 if (private_data
&& private_data_len
)
2382 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2385 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2386 struct ib_sa_path_rec
*alternate_path
,
2387 const void *private_data
,
2388 u8 private_data_len
)
2390 struct cm_id_private
*cm_id_priv
;
2391 struct ib_mad_send_buf
*msg
;
2392 unsigned long flags
;
2395 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2398 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2399 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2400 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2401 (cm_id
->lap_state
!= IB_CM_LAP_UNINIT
&&
2402 cm_id
->lap_state
!= IB_CM_LAP_IDLE
)) {
2407 ret
= cm_init_av_by_path(alternate_path
, &cm_id_priv
->alt_av
);
2411 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2415 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2416 alternate_path
, private_data
, private_data_len
);
2417 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2418 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2420 ret
= ib_post_send_mad(msg
, NULL
);
2422 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2427 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2428 cm_id_priv
->msg
= msg
;
2430 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2433 EXPORT_SYMBOL(ib_send_cm_lap
);
2435 static void cm_format_path_from_lap(struct cm_id_private
*cm_id_priv
,
2436 struct ib_sa_path_rec
*path
,
2437 struct cm_lap_msg
*lap_msg
)
2439 memset(path
, 0, sizeof *path
);
2440 path
->dgid
= lap_msg
->alt_local_gid
;
2441 path
->sgid
= lap_msg
->alt_remote_gid
;
2442 path
->dlid
= lap_msg
->alt_local_lid
;
2443 path
->slid
= lap_msg
->alt_remote_lid
;
2444 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2445 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2446 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2447 path
->reversible
= 1;
2448 path
->pkey
= cm_id_priv
->pkey
;
2449 path
->sl
= cm_lap_get_sl(lap_msg
);
2450 path
->mtu_selector
= IB_SA_EQ
;
2451 path
->mtu
= cm_id_priv
->path_mtu
;
2452 path
->rate_selector
= IB_SA_EQ
;
2453 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2454 path
->packet_life_time_selector
= IB_SA_EQ
;
2455 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2456 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2459 static int cm_lap_handler(struct cm_work
*work
)
2461 struct cm_id_private
*cm_id_priv
;
2462 struct cm_lap_msg
*lap_msg
;
2463 struct ib_cm_lap_event_param
*param
;
2464 struct ib_mad_send_buf
*msg
= NULL
;
2465 unsigned long flags
;
2468 /* todo: verify LAP request and send reject APR if invalid. */
2469 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2470 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2471 lap_msg
->local_comm_id
);
2475 param
= &work
->cm_event
.param
.lap_rcvd
;
2476 param
->alternate_path
= &work
->path
[0];
2477 cm_format_path_from_lap(cm_id_priv
, param
->alternate_path
, lap_msg
);
2478 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2480 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2481 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2484 switch (cm_id_priv
->id
.lap_state
) {
2485 case IB_CM_LAP_UNINIT
:
2486 case IB_CM_LAP_IDLE
:
2488 case IB_CM_MRA_LAP_SENT
:
2489 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2492 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2493 CM_MSG_RESPONSE_OTHER
,
2494 cm_id_priv
->service_timeout
,
2495 cm_id_priv
->private_data
,
2496 cm_id_priv
->private_data_len
);
2497 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2499 if (ib_post_send_mad(msg
, NULL
))
2506 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2507 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2508 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2509 work
->mad_recv_wc
->recv_buf
.grh
,
2511 cm_init_av_by_path(param
->alternate_path
, &cm_id_priv
->alt_av
);
2512 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2514 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2515 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2518 cm_process_work(cm_id_priv
, work
);
2520 cm_deref_id(cm_id_priv
);
2523 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2524 deref
: cm_deref_id(cm_id_priv
);
2528 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2529 struct cm_id_private
*cm_id_priv
,
2530 enum ib_cm_apr_status status
,
2533 const void *private_data
,
2534 u8 private_data_len
)
2536 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2537 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2538 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2539 apr_msg
->ap_status
= (u8
) status
;
2541 if (info
&& info_length
) {
2542 apr_msg
->info_length
= info_length
;
2543 memcpy(apr_msg
->info
, info
, info_length
);
2546 if (private_data
&& private_data_len
)
2547 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2550 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2551 enum ib_cm_apr_status status
,
2554 const void *private_data
,
2555 u8 private_data_len
)
2557 struct cm_id_private
*cm_id_priv
;
2558 struct ib_mad_send_buf
*msg
;
2559 unsigned long flags
;
2562 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2563 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2566 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2567 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2568 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2569 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2570 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2575 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2579 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2580 info
, info_length
, private_data
, private_data_len
);
2581 ret
= ib_post_send_mad(msg
, NULL
);
2583 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2588 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2589 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2592 EXPORT_SYMBOL(ib_send_cm_apr
);
2594 static int cm_apr_handler(struct cm_work
*work
)
2596 struct cm_id_private
*cm_id_priv
;
2597 struct cm_apr_msg
*apr_msg
;
2598 unsigned long flags
;
2601 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2602 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2603 apr_msg
->local_comm_id
);
2605 return -EINVAL
; /* Unmatched reply. */
2607 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2608 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2609 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2610 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2612 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2613 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2614 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2615 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2616 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2619 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2620 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2621 cm_id_priv
->msg
= NULL
;
2623 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2625 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2626 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2629 cm_process_work(cm_id_priv
, work
);
2631 cm_deref_id(cm_id_priv
);
2634 cm_deref_id(cm_id_priv
);
2638 static int cm_timewait_handler(struct cm_work
*work
)
2640 struct cm_timewait_info
*timewait_info
;
2641 struct cm_id_private
*cm_id_priv
;
2644 timewait_info
= (struct cm_timewait_info
*)work
;
2645 spin_lock_irq(&cm
.lock
);
2646 list_del(&timewait_info
->list
);
2647 spin_unlock_irq(&cm
.lock
);
2649 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2650 timewait_info
->work
.remote_id
);
2654 spin_lock_irq(&cm_id_priv
->lock
);
2655 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2656 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2657 spin_unlock_irq(&cm_id_priv
->lock
);
2660 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2661 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2663 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2664 spin_unlock_irq(&cm_id_priv
->lock
);
2667 cm_process_work(cm_id_priv
, work
);
2669 cm_deref_id(cm_id_priv
);
2672 cm_deref_id(cm_id_priv
);
2676 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2677 struct cm_id_private
*cm_id_priv
,
2678 struct ib_cm_sidr_req_param
*param
)
2680 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2681 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2682 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2683 sidr_req_msg
->pkey
= cpu_to_be16(param
->path
->pkey
);
2684 sidr_req_msg
->service_id
= param
->service_id
;
2686 if (param
->private_data
&& param
->private_data_len
)
2687 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2688 param
->private_data_len
);
2691 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2692 struct ib_cm_sidr_req_param
*param
)
2694 struct cm_id_private
*cm_id_priv
;
2695 struct ib_mad_send_buf
*msg
;
2696 unsigned long flags
;
2699 if (!param
->path
|| (param
->private_data
&&
2700 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2703 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2704 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2708 cm_id
->service_id
= param
->service_id
;
2709 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2710 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2711 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2712 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2716 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2718 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2719 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2721 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2722 if (cm_id
->state
== IB_CM_IDLE
)
2723 ret
= ib_post_send_mad(msg
, NULL
);
2728 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2732 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2733 cm_id_priv
->msg
= msg
;
2734 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2738 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2740 static void cm_format_sidr_req_event(struct cm_work
*work
,
2741 struct ib_cm_id
*listen_id
)
2743 struct cm_sidr_req_msg
*sidr_req_msg
;
2744 struct ib_cm_sidr_req_event_param
*param
;
2746 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2747 work
->mad_recv_wc
->recv_buf
.mad
;
2748 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2749 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2750 param
->listen_id
= listen_id
;
2751 param
->port
= work
->port
->port_num
;
2752 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2755 static int cm_sidr_req_handler(struct cm_work
*work
)
2757 struct ib_cm_id
*cm_id
;
2758 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2759 struct cm_sidr_req_msg
*sidr_req_msg
;
2761 unsigned long flags
;
2763 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2765 return PTR_ERR(cm_id
);
2766 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2768 /* Record SGID/SLID and request ID for lookup. */
2769 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2770 work
->mad_recv_wc
->recv_buf
.mad
;
2771 wc
= work
->mad_recv_wc
->wc
;
2772 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2773 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2774 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2775 work
->mad_recv_wc
->recv_buf
.grh
,
2777 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2778 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2779 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2780 atomic_inc(&cm_id_priv
->work_count
);
2782 spin_lock_irqsave(&cm
.lock
, flags
);
2783 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2784 if (cur_cm_id_priv
) {
2785 spin_unlock_irqrestore(&cm
.lock
, flags
);
2786 goto out
; /* Duplicate message. */
2788 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2789 sidr_req_msg
->service_id
,
2790 sidr_req_msg
->private_data
);
2791 if (!cur_cm_id_priv
) {
2792 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2793 spin_unlock_irqrestore(&cm
.lock
, flags
);
2794 /* todo: reply with no match */
2795 goto out
; /* No match. */
2797 atomic_inc(&cur_cm_id_priv
->refcount
);
2798 spin_unlock_irqrestore(&cm
.lock
, flags
);
2800 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2801 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2802 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2803 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2805 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2806 cm_process_work(cm_id_priv
, work
);
2807 cm_deref_id(cur_cm_id_priv
);
2810 ib_destroy_cm_id(&cm_id_priv
->id
);
2814 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2815 struct cm_id_private
*cm_id_priv
,
2816 struct ib_cm_sidr_rep_param
*param
)
2818 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2820 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2821 sidr_rep_msg
->status
= param
->status
;
2822 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2823 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2824 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2826 if (param
->info
&& param
->info_length
)
2827 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2829 if (param
->private_data
&& param
->private_data_len
)
2830 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2831 param
->private_data_len
);
2834 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2835 struct ib_cm_sidr_rep_param
*param
)
2837 struct cm_id_private
*cm_id_priv
;
2838 struct ib_mad_send_buf
*msg
;
2839 unsigned long flags
;
2842 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2843 (param
->private_data
&&
2844 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2847 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2848 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2849 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2854 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2858 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2860 ret
= ib_post_send_mad(msg
, NULL
);
2862 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2866 cm_id
->state
= IB_CM_IDLE
;
2867 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2869 spin_lock_irqsave(&cm
.lock
, flags
);
2870 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2871 spin_unlock_irqrestore(&cm
.lock
, flags
);
2874 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2877 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2879 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2881 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2882 struct ib_cm_sidr_rep_event_param
*param
;
2884 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2885 work
->mad_recv_wc
->recv_buf
.mad
;
2886 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2887 param
->status
= sidr_rep_msg
->status
;
2888 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2889 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2890 param
->info
= &sidr_rep_msg
->info
;
2891 param
->info_len
= sidr_rep_msg
->info_length
;
2892 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2895 static int cm_sidr_rep_handler(struct cm_work
*work
)
2897 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2898 struct cm_id_private
*cm_id_priv
;
2899 unsigned long flags
;
2901 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2902 work
->mad_recv_wc
->recv_buf
.mad
;
2903 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2905 return -EINVAL
; /* Unmatched reply. */
2907 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2908 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2909 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2912 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2913 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2914 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2916 cm_format_sidr_rep_event(work
);
2917 cm_process_work(cm_id_priv
, work
);
2920 cm_deref_id(cm_id_priv
);
2924 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2925 enum ib_wc_status wc_status
)
2927 struct cm_id_private
*cm_id_priv
;
2928 struct ib_cm_event cm_event
;
2929 enum ib_cm_state state
;
2930 unsigned long flags
;
2933 memset(&cm_event
, 0, sizeof cm_event
);
2934 cm_id_priv
= msg
->context
[0];
2936 /* Discard old sends or ones without a response. */
2937 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2938 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2939 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2943 case IB_CM_REQ_SENT
:
2944 case IB_CM_MRA_REQ_RCVD
:
2945 cm_reset_to_idle(cm_id_priv
);
2946 cm_event
.event
= IB_CM_REQ_ERROR
;
2948 case IB_CM_REP_SENT
:
2949 case IB_CM_MRA_REP_RCVD
:
2950 cm_reset_to_idle(cm_id_priv
);
2951 cm_event
.event
= IB_CM_REP_ERROR
;
2953 case IB_CM_DREQ_SENT
:
2954 cm_enter_timewait(cm_id_priv
);
2955 cm_event
.event
= IB_CM_DREQ_ERROR
;
2957 case IB_CM_SIDR_REQ_SENT
:
2958 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2959 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2964 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2965 cm_event
.param
.send_status
= wc_status
;
2967 /* No other events can occur on the cm_id at this point. */
2968 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2971 ib_destroy_cm_id(&cm_id_priv
->id
);
2974 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2978 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2979 struct ib_mad_send_wc
*mad_send_wc
)
2981 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2983 switch (mad_send_wc
->status
) {
2985 case IB_WC_WR_FLUSH_ERR
:
2989 if (msg
->context
[0] && msg
->context
[1])
2990 cm_process_send_error(msg
, mad_send_wc
->status
);
2997 static void cm_work_handler(struct work_struct
*_work
)
2999 struct cm_work
*work
= container_of(_work
, struct cm_work
, work
.work
);
3002 switch (work
->cm_event
.event
) {
3003 case IB_CM_REQ_RECEIVED
:
3004 ret
= cm_req_handler(work
);
3006 case IB_CM_MRA_RECEIVED
:
3007 ret
= cm_mra_handler(work
);
3009 case IB_CM_REJ_RECEIVED
:
3010 ret
= cm_rej_handler(work
);
3012 case IB_CM_REP_RECEIVED
:
3013 ret
= cm_rep_handler(work
);
3015 case IB_CM_RTU_RECEIVED
:
3016 ret
= cm_rtu_handler(work
);
3018 case IB_CM_USER_ESTABLISHED
:
3019 ret
= cm_establish_handler(work
);
3021 case IB_CM_DREQ_RECEIVED
:
3022 ret
= cm_dreq_handler(work
);
3024 case IB_CM_DREP_RECEIVED
:
3025 ret
= cm_drep_handler(work
);
3027 case IB_CM_SIDR_REQ_RECEIVED
:
3028 ret
= cm_sidr_req_handler(work
);
3030 case IB_CM_SIDR_REP_RECEIVED
:
3031 ret
= cm_sidr_rep_handler(work
);
3033 case IB_CM_LAP_RECEIVED
:
3034 ret
= cm_lap_handler(work
);
3036 case IB_CM_APR_RECEIVED
:
3037 ret
= cm_apr_handler(work
);
3039 case IB_CM_TIMEWAIT_EXIT
:
3040 ret
= cm_timewait_handler(work
);
3050 static int cm_establish(struct ib_cm_id
*cm_id
)
3052 struct cm_id_private
*cm_id_priv
;
3053 struct cm_work
*work
;
3054 unsigned long flags
;
3057 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
3061 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3062 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3063 switch (cm_id
->state
)
3065 case IB_CM_REP_SENT
:
3066 case IB_CM_MRA_REP_RCVD
:
3067 cm_id
->state
= IB_CM_ESTABLISHED
;
3069 case IB_CM_ESTABLISHED
:
3076 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3084 * The CM worker thread may try to destroy the cm_id before it
3085 * can execute this work item. To prevent potential deadlock,
3086 * we need to find the cm_id once we're in the context of the
3087 * worker thread, rather than holding a reference on it.
3089 INIT_DELAYED_WORK(&work
->work
, cm_work_handler
);
3090 work
->local_id
= cm_id
->local_id
;
3091 work
->remote_id
= cm_id
->remote_id
;
3092 work
->mad_recv_wc
= NULL
;
3093 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
3094 queue_delayed_work(cm
.wq
, &work
->work
, 0);
3099 static int cm_migrate(struct ib_cm_id
*cm_id
)
3101 struct cm_id_private
*cm_id_priv
;
3102 unsigned long flags
;
3105 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3106 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3107 if (cm_id
->state
== IB_CM_ESTABLISHED
&&
3108 (cm_id
->lap_state
== IB_CM_LAP_UNINIT
||
3109 cm_id
->lap_state
== IB_CM_LAP_IDLE
)) {
3110 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
3111 cm_id_priv
->av
= cm_id_priv
->alt_av
;
3114 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3119 int ib_cm_notify(struct ib_cm_id
*cm_id
, enum ib_event_type event
)
3124 case IB_EVENT_COMM_EST
:
3125 ret
= cm_establish(cm_id
);
3127 case IB_EVENT_PATH_MIG
:
3128 ret
= cm_migrate(cm_id
);
3135 EXPORT_SYMBOL(ib_cm_notify
);
3137 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
3138 struct ib_mad_recv_wc
*mad_recv_wc
)
3140 struct cm_work
*work
;
3141 enum ib_cm_event_type event
;
3144 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
3145 case CM_REQ_ATTR_ID
:
3146 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
3147 alt_local_lid
!= 0);
3148 event
= IB_CM_REQ_RECEIVED
;
3150 case CM_MRA_ATTR_ID
:
3151 event
= IB_CM_MRA_RECEIVED
;
3153 case CM_REJ_ATTR_ID
:
3154 event
= IB_CM_REJ_RECEIVED
;
3156 case CM_REP_ATTR_ID
:
3157 event
= IB_CM_REP_RECEIVED
;
3159 case CM_RTU_ATTR_ID
:
3160 event
= IB_CM_RTU_RECEIVED
;
3162 case CM_DREQ_ATTR_ID
:
3163 event
= IB_CM_DREQ_RECEIVED
;
3165 case CM_DREP_ATTR_ID
:
3166 event
= IB_CM_DREP_RECEIVED
;
3168 case CM_SIDR_REQ_ATTR_ID
:
3169 event
= IB_CM_SIDR_REQ_RECEIVED
;
3171 case CM_SIDR_REP_ATTR_ID
:
3172 event
= IB_CM_SIDR_REP_RECEIVED
;
3174 case CM_LAP_ATTR_ID
:
3176 event
= IB_CM_LAP_RECEIVED
;
3178 case CM_APR_ATTR_ID
:
3179 event
= IB_CM_APR_RECEIVED
;
3182 ib_free_recv_mad(mad_recv_wc
);
3186 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3189 ib_free_recv_mad(mad_recv_wc
);
3193 INIT_DELAYED_WORK(&work
->work
, cm_work_handler
);
3194 work
->cm_event
.event
= event
;
3195 work
->mad_recv_wc
= mad_recv_wc
;
3196 work
->port
= (struct cm_port
*)mad_agent
->context
;
3197 queue_delayed_work(cm
.wq
, &work
->work
, 0);
3200 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3201 struct ib_qp_attr
*qp_attr
,
3204 unsigned long flags
;
3207 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3208 switch (cm_id_priv
->id
.state
) {
3209 case IB_CM_REQ_SENT
:
3210 case IB_CM_MRA_REQ_RCVD
:
3211 case IB_CM_REQ_RCVD
:
3212 case IB_CM_MRA_REQ_SENT
:
3213 case IB_CM_REP_RCVD
:
3214 case IB_CM_MRA_REP_SENT
:
3215 case IB_CM_REP_SENT
:
3216 case IB_CM_MRA_REP_RCVD
:
3217 case IB_CM_ESTABLISHED
:
3218 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3219 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3220 qp_attr
->qp_access_flags
= IB_ACCESS_REMOTE_WRITE
;
3221 if (cm_id_priv
->responder_resources
)
3222 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
|
3223 IB_ACCESS_REMOTE_ATOMIC
;
3224 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3225 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3232 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3236 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3237 struct ib_qp_attr
*qp_attr
,
3240 unsigned long flags
;
3243 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3244 switch (cm_id_priv
->id
.state
) {
3245 case IB_CM_REQ_RCVD
:
3246 case IB_CM_MRA_REQ_SENT
:
3247 case IB_CM_REP_RCVD
:
3248 case IB_CM_MRA_REP_SENT
:
3249 case IB_CM_REP_SENT
:
3250 case IB_CM_MRA_REP_RCVD
:
3251 case IB_CM_ESTABLISHED
:
3252 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3253 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3254 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3255 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3256 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3257 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3258 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3259 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3260 IB_QP_MIN_RNR_TIMER
;
3261 qp_attr
->max_dest_rd_atomic
=
3262 cm_id_priv
->responder_resources
;
3263 qp_attr
->min_rnr_timer
= 0;
3265 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3266 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3267 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3268 qp_attr
->alt_pkey_index
= cm_id_priv
->alt_av
.pkey_index
;
3269 qp_attr
->alt_timeout
=
3270 cm_id_priv
->alt_av
.packet_life_time
+ 1;
3271 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3279 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3283 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3284 struct ib_qp_attr
*qp_attr
,
3287 unsigned long flags
;
3290 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3291 switch (cm_id_priv
->id
.state
) {
3292 case IB_CM_REP_RCVD
:
3293 case IB_CM_MRA_REP_SENT
:
3294 case IB_CM_REP_SENT
:
3295 case IB_CM_MRA_REP_RCVD
:
3296 case IB_CM_ESTABLISHED
:
3297 if (cm_id_priv
->id
.lap_state
== IB_CM_LAP_UNINIT
) {
3298 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3299 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3300 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3301 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3303 IB_QP_MAX_QP_RD_ATOMIC
;
3305 cm_id_priv
->av
.packet_life_time
+ 1;
3306 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3307 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3308 qp_attr
->max_rd_atomic
=
3309 cm_id_priv
->initiator_depth
;
3311 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3312 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3313 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3316 *qp_attr_mask
= IB_QP_ALT_PATH
| IB_QP_PATH_MIG_STATE
;
3317 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3318 qp_attr
->alt_pkey_index
= cm_id_priv
->alt_av
.pkey_index
;
3319 qp_attr
->alt_timeout
=
3320 cm_id_priv
->alt_av
.packet_life_time
+ 1;
3321 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3322 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3330 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3334 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3335 struct ib_qp_attr
*qp_attr
,
3338 struct cm_id_private
*cm_id_priv
;
3341 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3342 switch (qp_attr
->qp_state
) {
3344 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3347 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3350 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3358 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3360 static void cm_add_one(struct ib_device
*device
)
3362 struct cm_device
*cm_dev
;
3363 struct cm_port
*port
;
3364 struct ib_mad_reg_req reg_req
= {
3365 .mgmt_class
= IB_MGMT_CLASS_CM
,
3366 .mgmt_class_version
= IB_CM_CLASS_VERSION
3368 struct ib_port_modify port_modify
= {
3369 .set_port_cap_mask
= IB_PORT_CM_SUP
3371 unsigned long flags
;
3375 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
3378 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3379 device
->phys_port_cnt
, GFP_KERNEL
);
3383 cm_dev
->device
= device
;
3384 cm_dev
->ca_guid
= device
->node_guid
;
3386 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3387 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3388 port
= &cm_dev
->port
[i
-1];
3389 port
->cm_dev
= cm_dev
;
3391 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3398 if (IS_ERR(port
->mad_agent
))
3401 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3405 ib_set_client_data(device
, &cm_client
, cm_dev
);
3407 write_lock_irqsave(&cm
.device_lock
, flags
);
3408 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3409 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3413 ib_unregister_mad_agent(port
->mad_agent
);
3415 port_modify
.set_port_cap_mask
= 0;
3416 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3418 port
= &cm_dev
->port
[i
-1];
3419 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3420 ib_unregister_mad_agent(port
->mad_agent
);
3425 static void cm_remove_one(struct ib_device
*device
)
3427 struct cm_device
*cm_dev
;
3428 struct cm_port
*port
;
3429 struct ib_port_modify port_modify
= {
3430 .clr_port_cap_mask
= IB_PORT_CM_SUP
3432 unsigned long flags
;
3435 cm_dev
= ib_get_client_data(device
, &cm_client
);
3439 write_lock_irqsave(&cm
.device_lock
, flags
);
3440 list_del(&cm_dev
->list
);
3441 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3443 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3444 port
= &cm_dev
->port
[i
-1];
3445 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3446 ib_unregister_mad_agent(port
->mad_agent
);
3451 static int __init
ib_cm_init(void)
3455 memset(&cm
, 0, sizeof cm
);
3456 INIT_LIST_HEAD(&cm
.device_list
);
3457 rwlock_init(&cm
.device_lock
);
3458 spin_lock_init(&cm
.lock
);
3459 cm
.listen_service_table
= RB_ROOT
;
3460 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3461 cm
.remote_id_table
= RB_ROOT
;
3462 cm
.remote_qp_table
= RB_ROOT
;
3463 cm
.remote_sidr_table
= RB_ROOT
;
3464 idr_init(&cm
.local_id_table
);
3465 get_random_bytes(&cm
.random_id_operand
, sizeof cm
.random_id_operand
);
3466 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3467 INIT_LIST_HEAD(&cm
.timewait_list
);
3469 cm
.wq
= create_workqueue("ib_cm");
3473 ret
= ib_register_client(&cm_client
);
3479 destroy_workqueue(cm
.wq
);
3483 static void __exit
ib_cm_cleanup(void)
3485 struct cm_timewait_info
*timewait_info
, *tmp
;
3487 spin_lock_irq(&cm
.lock
);
3488 list_for_each_entry(timewait_info
, &cm
.timewait_list
, list
)
3489 cancel_delayed_work(&timewait_info
->work
.work
);
3490 spin_unlock_irq(&cm
.lock
);
3492 destroy_workqueue(cm
.wq
);
3494 list_for_each_entry_safe(timewait_info
, tmp
, &cm
.timewait_list
, list
) {
3495 list_del(&timewait_info
->list
);
3496 kfree(timewait_info
);
3499 ib_unregister_client(&cm_client
);
3500 idr_destroy(&cm
.local_id_table
);
3503 module_init(ib_cm_init
);
3504 module_exit(ib_cm_cleanup
);