2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/random.h>
45 #include <linux/rbtree.h>
46 #include <linux/spinlock.h>
47 #include <linux/workqueue.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/ib_cm.h>
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("InfiniBand CM");
55 MODULE_LICENSE("Dual BSD/GPL");
57 static void cm_add_one(struct ib_device
*device
);
58 static void cm_remove_one(struct ib_device
*device
);
60 static struct ib_client cm_client
= {
63 .remove
= cm_remove_one
68 struct list_head device_list
;
70 struct rb_root listen_service_table
;
71 u64 listen_service_id
;
72 /* struct rb_root peer_service_table; todo: fix peer to peer */
73 struct rb_root remote_qp_table
;
74 struct rb_root remote_id_table
;
75 struct rb_root remote_sidr_table
;
76 struct idr local_id_table
;
77 __be32 random_id_operand
;
78 struct workqueue_struct
*wq
;
82 struct cm_device
*cm_dev
;
83 struct ib_mad_agent
*mad_agent
;
88 struct list_head list
;
89 struct ib_device
*device
;
91 struct cm_port port
[0];
97 struct ib_ah_attr ah_attr
;
103 struct work_struct work
;
104 struct list_head list
;
105 struct cm_port
*port
;
106 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
107 __be32 local_id
; /* Established / timewait */
109 struct ib_cm_event cm_event
;
110 struct ib_sa_path_rec path
[0];
113 struct cm_timewait_info
{
114 struct cm_work work
; /* Must be first. */
115 struct rb_node remote_qp_node
;
116 struct rb_node remote_id_node
;
117 __be64 remote_ca_guid
;
119 u8 inserted_remote_qp
;
120 u8 inserted_remote_id
;
123 struct cm_id_private
{
126 struct rb_node service_node
;
127 struct rb_node sidr_id_node
;
128 spinlock_t lock
; /* Do not acquire inside cm.lock */
129 struct completion comp
;
132 struct ib_mad_send_buf
*msg
;
133 struct cm_timewait_info
*timewait_info
;
134 /* todo: use alternate port on send failure */
137 struct ib_cm_compare_data
*compare_data
;
143 enum ib_qp_type qp_type
;
147 enum ib_mtu path_mtu
;
151 u8 responder_resources
;
153 u8 local_ack_timeout
;
158 struct list_head work_list
;
162 static void cm_work_handler(void *data
);
164 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
166 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
167 complete(&cm_id_priv
->comp
);
170 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
171 struct ib_mad_send_buf
**msg
)
173 struct ib_mad_agent
*mad_agent
;
174 struct ib_mad_send_buf
*m
;
177 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
178 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
182 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
183 cm_id_priv
->av
.pkey_index
,
184 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
191 /* Timeout set by caller if response is expected. */
193 m
->retries
= cm_id_priv
->max_cm_retries
;
195 atomic_inc(&cm_id_priv
->refcount
);
196 m
->context
[0] = cm_id_priv
;
201 static int cm_alloc_response_msg(struct cm_port
*port
,
202 struct ib_mad_recv_wc
*mad_recv_wc
,
203 struct ib_mad_send_buf
**msg
)
205 struct ib_mad_send_buf
*m
;
208 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
209 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
213 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
214 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
225 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
227 ib_destroy_ah(msg
->ah
);
229 cm_deref_id(msg
->context
[0]);
230 ib_free_send_mad(msg
);
233 static void * cm_copy_private_data(const void *private_data
,
238 if (!private_data
|| !private_data_len
)
241 data
= kmalloc(private_data_len
, GFP_KERNEL
);
243 return ERR_PTR(-ENOMEM
);
245 memcpy(data
, private_data
, private_data_len
);
249 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
250 void *private_data
, u8 private_data_len
)
252 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
253 kfree(cm_id_priv
->private_data
);
255 cm_id_priv
->private_data
= private_data
;
256 cm_id_priv
->private_data_len
= private_data_len
;
259 static void cm_init_av_for_response(struct cm_port
*port
, struct ib_wc
*wc
,
260 struct ib_grh
*grh
, struct cm_av
*av
)
263 av
->pkey_index
= wc
->pkey_index
;
264 ib_init_ah_from_wc(port
->cm_dev
->device
, port
->port_num
, wc
,
268 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
270 struct cm_device
*cm_dev
;
271 struct cm_port
*port
= NULL
;
276 read_lock_irqsave(&cm
.device_lock
, flags
);
277 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
278 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
280 port
= &cm_dev
->port
[p
-1];
284 read_unlock_irqrestore(&cm
.device_lock
, flags
);
289 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
290 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
295 ib_init_ah_from_path(cm_dev
->device
, port
->port_num
, path
,
297 av
->packet_life_time
= path
->packet_life_time
;
301 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
308 spin_lock_irqsave(&cm
.lock
, flags
);
309 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
,
311 spin_unlock_irqrestore(&cm
.lock
, flags
);
312 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
314 cm_id_priv
->id
.local_id
= (__force __be32
) (id
^ cm
.random_id_operand
);
318 static void cm_free_id(__be32 local_id
)
322 spin_lock_irqsave(&cm
.lock
, flags
);
323 idr_remove(&cm
.local_id_table
,
324 (__force
int) (local_id
^ cm
.random_id_operand
));
325 spin_unlock_irqrestore(&cm
.lock
, flags
);
328 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
330 struct cm_id_private
*cm_id_priv
;
332 cm_id_priv
= idr_find(&cm
.local_id_table
,
333 (__force
int) (local_id
^ cm
.random_id_operand
));
335 if (cm_id_priv
->id
.remote_id
== remote_id
)
336 atomic_inc(&cm_id_priv
->refcount
);
344 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
346 struct cm_id_private
*cm_id_priv
;
349 spin_lock_irqsave(&cm
.lock
, flags
);
350 cm_id_priv
= cm_get_id(local_id
, remote_id
);
351 spin_unlock_irqrestore(&cm
.lock
, flags
);
356 static void cm_mask_copy(u8
*dst
, u8
*src
, u8
*mask
)
360 for (i
= 0; i
< IB_CM_COMPARE_SIZE
/ sizeof(unsigned long); i
++)
361 ((unsigned long *) dst
)[i
] = ((unsigned long *) src
)[i
] &
362 ((unsigned long *) mask
)[i
];
365 static int cm_compare_data(struct ib_cm_compare_data
*src_data
,
366 struct ib_cm_compare_data
*dst_data
)
368 u8 src
[IB_CM_COMPARE_SIZE
];
369 u8 dst
[IB_CM_COMPARE_SIZE
];
371 if (!src_data
|| !dst_data
)
374 cm_mask_copy(src
, src_data
->data
, dst_data
->mask
);
375 cm_mask_copy(dst
, dst_data
->data
, src_data
->mask
);
376 return memcmp(src
, dst
, IB_CM_COMPARE_SIZE
);
379 static int cm_compare_private_data(u8
*private_data
,
380 struct ib_cm_compare_data
*dst_data
)
382 u8 src
[IB_CM_COMPARE_SIZE
];
387 cm_mask_copy(src
, private_data
, dst_data
->mask
);
388 return memcmp(src
, dst_data
->data
, IB_CM_COMPARE_SIZE
);
391 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
393 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
394 struct rb_node
*parent
= NULL
;
395 struct cm_id_private
*cur_cm_id_priv
;
396 __be64 service_id
= cm_id_priv
->id
.service_id
;
397 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
402 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
404 data_cmp
= cm_compare_data(cm_id_priv
->compare_data
,
405 cur_cm_id_priv
->compare_data
);
406 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
407 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
408 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
) &&
410 return cur_cm_id_priv
;
412 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
413 link
= &(*link
)->rb_left
;
414 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
415 link
= &(*link
)->rb_right
;
416 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
417 link
= &(*link
)->rb_left
;
418 else if (service_id
> cur_cm_id_priv
->id
.service_id
)
419 link
= &(*link
)->rb_right
;
420 else if (data_cmp
< 0)
421 link
= &(*link
)->rb_left
;
423 link
= &(*link
)->rb_right
;
425 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
426 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
430 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
434 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
435 struct cm_id_private
*cm_id_priv
;
439 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
440 data_cmp
= cm_compare_private_data(private_data
,
441 cm_id_priv
->compare_data
);
442 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
443 cm_id_priv
->id
.service_id
&&
444 (cm_id_priv
->id
.device
== device
) && !data_cmp
)
447 if (device
< cm_id_priv
->id
.device
)
448 node
= node
->rb_left
;
449 else if (device
> cm_id_priv
->id
.device
)
450 node
= node
->rb_right
;
451 else if (service_id
< cm_id_priv
->id
.service_id
)
452 node
= node
->rb_left
;
453 else if (service_id
> cm_id_priv
->id
.service_id
)
454 node
= node
->rb_right
;
455 else if (data_cmp
< 0)
456 node
= node
->rb_left
;
458 node
= node
->rb_right
;
463 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
466 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
467 struct rb_node
*parent
= NULL
;
468 struct cm_timewait_info
*cur_timewait_info
;
469 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
470 __be32 remote_id
= timewait_info
->work
.remote_id
;
474 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
476 if (remote_id
< cur_timewait_info
->work
.remote_id
)
477 link
= &(*link
)->rb_left
;
478 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
479 link
= &(*link
)->rb_right
;
480 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
481 link
= &(*link
)->rb_left
;
482 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
483 link
= &(*link
)->rb_right
;
485 return cur_timewait_info
;
487 timewait_info
->inserted_remote_id
= 1;
488 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
489 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
493 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
496 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
497 struct cm_timewait_info
*timewait_info
;
500 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
502 if (remote_id
< timewait_info
->work
.remote_id
)
503 node
= node
->rb_left
;
504 else if (remote_id
> timewait_info
->work
.remote_id
)
505 node
= node
->rb_right
;
506 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
507 node
= node
->rb_left
;
508 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
509 node
= node
->rb_right
;
511 return timewait_info
;
516 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
519 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
520 struct rb_node
*parent
= NULL
;
521 struct cm_timewait_info
*cur_timewait_info
;
522 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
523 __be32 remote_qpn
= timewait_info
->remote_qpn
;
527 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
529 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
530 link
= &(*link
)->rb_left
;
531 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
532 link
= &(*link
)->rb_right
;
533 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
534 link
= &(*link
)->rb_left
;
535 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
536 link
= &(*link
)->rb_right
;
538 return cur_timewait_info
;
540 timewait_info
->inserted_remote_qp
= 1;
541 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
542 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
546 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
549 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
550 struct rb_node
*parent
= NULL
;
551 struct cm_id_private
*cur_cm_id_priv
;
552 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
553 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
557 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
559 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
560 link
= &(*link
)->rb_left
;
561 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
562 link
= &(*link
)->rb_right
;
565 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
568 link
= &(*link
)->rb_left
;
570 link
= &(*link
)->rb_right
;
572 return cur_cm_id_priv
;
575 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
576 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
580 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
581 enum ib_cm_sidr_status status
)
583 struct ib_cm_sidr_rep_param param
;
585 memset(¶m
, 0, sizeof param
);
586 param
.status
= status
;
587 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
590 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
591 ib_cm_handler cm_handler
,
594 struct cm_id_private
*cm_id_priv
;
597 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
599 return ERR_PTR(-ENOMEM
);
601 cm_id_priv
->id
.state
= IB_CM_IDLE
;
602 cm_id_priv
->id
.device
= device
;
603 cm_id_priv
->id
.cm_handler
= cm_handler
;
604 cm_id_priv
->id
.context
= context
;
605 cm_id_priv
->id
.remote_cm_qpn
= 1;
606 ret
= cm_alloc_id(cm_id_priv
);
610 spin_lock_init(&cm_id_priv
->lock
);
611 init_completion(&cm_id_priv
->comp
);
612 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
613 atomic_set(&cm_id_priv
->work_count
, -1);
614 atomic_set(&cm_id_priv
->refcount
, 1);
615 return &cm_id_priv
->id
;
619 return ERR_PTR(-ENOMEM
);
621 EXPORT_SYMBOL(ib_create_cm_id
);
623 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
625 struct cm_work
*work
;
627 if (list_empty(&cm_id_priv
->work_list
))
630 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
631 list_del(&work
->list
);
635 static void cm_free_work(struct cm_work
*work
)
637 if (work
->mad_recv_wc
)
638 ib_free_recv_mad(work
->mad_recv_wc
);
642 static inline int cm_convert_to_ms(int iba_time
)
644 /* approximate conversion to ms from 4.096us x 2^iba_time */
645 return 1 << max(iba_time
- 8, 0);
648 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
652 if (!timewait_info
->inserted_remote_id
&&
653 !timewait_info
->inserted_remote_qp
)
656 spin_lock_irqsave(&cm
.lock
, flags
);
657 if (timewait_info
->inserted_remote_id
) {
658 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
659 timewait_info
->inserted_remote_id
= 0;
662 if (timewait_info
->inserted_remote_qp
) {
663 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
664 timewait_info
->inserted_remote_qp
= 0;
666 spin_unlock_irqrestore(&cm
.lock
, flags
);
669 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
671 struct cm_timewait_info
*timewait_info
;
673 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
675 return ERR_PTR(-ENOMEM
);
677 timewait_info
->work
.local_id
= local_id
;
678 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
679 &timewait_info
->work
);
680 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
681 return timewait_info
;
684 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
688 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
691 * The cm_id could be destroyed by the user before we exit timewait.
692 * To protect against this, we search for the cm_id after exiting
693 * timewait before notifying the user that we've exited timewait.
695 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
696 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
697 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
698 msecs_to_jiffies(wait_time
));
699 cm_id_priv
->timewait_info
= NULL
;
702 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
704 cm_id_priv
->id
.state
= IB_CM_IDLE
;
705 if (cm_id_priv
->timewait_info
) {
706 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
707 kfree(cm_id_priv
->timewait_info
);
708 cm_id_priv
->timewait_info
= NULL
;
712 static void cm_destroy_id(struct ib_cm_id
*cm_id
, int err
)
714 struct cm_id_private
*cm_id_priv
;
715 struct cm_work
*work
;
718 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
720 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
721 switch (cm_id
->state
) {
723 cm_id
->state
= IB_CM_IDLE
;
724 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
725 spin_lock_irqsave(&cm
.lock
, flags
);
726 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
727 spin_unlock_irqrestore(&cm
.lock
, flags
);
729 case IB_CM_SIDR_REQ_SENT
:
730 cm_id
->state
= IB_CM_IDLE
;
731 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
732 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
734 case IB_CM_SIDR_REQ_RCVD
:
735 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
736 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
739 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
740 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
741 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
742 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
743 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
747 if (err
== -ENOMEM
) {
748 /* Do not reject to allow future retries. */
749 cm_reset_to_idle(cm_id_priv
);
750 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
752 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
753 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
757 case IB_CM_MRA_REQ_RCVD
:
759 case IB_CM_MRA_REP_RCVD
:
760 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
762 case IB_CM_MRA_REQ_SENT
:
764 case IB_CM_MRA_REP_SENT
:
765 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
766 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
769 case IB_CM_ESTABLISHED
:
770 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
771 ib_send_cm_dreq(cm_id
, NULL
, 0);
773 case IB_CM_DREQ_SENT
:
774 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
775 cm_enter_timewait(cm_id_priv
);
776 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
778 case IB_CM_DREQ_RCVD
:
779 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
780 ib_send_cm_drep(cm_id
, NULL
, 0);
783 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
787 cm_free_id(cm_id
->local_id
);
788 cm_deref_id(cm_id_priv
);
789 wait_for_completion(&cm_id_priv
->comp
);
790 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
792 kfree(cm_id_priv
->compare_data
);
793 kfree(cm_id_priv
->private_data
);
797 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
799 cm_destroy_id(cm_id
, 0);
801 EXPORT_SYMBOL(ib_destroy_cm_id
);
803 int ib_cm_listen(struct ib_cm_id
*cm_id
, __be64 service_id
, __be64 service_mask
,
804 struct ib_cm_compare_data
*compare_data
)
806 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
810 service_mask
= service_mask
? service_mask
:
811 __constant_cpu_to_be64(~0ULL);
812 service_id
&= service_mask
;
813 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
814 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
817 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
818 if (cm_id
->state
!= IB_CM_IDLE
)
822 cm_id_priv
->compare_data
= kzalloc(sizeof *compare_data
,
824 if (!cm_id_priv
->compare_data
)
826 cm_mask_copy(cm_id_priv
->compare_data
->data
,
827 compare_data
->data
, compare_data
->mask
);
828 memcpy(cm_id_priv
->compare_data
->mask
, compare_data
->mask
,
832 cm_id
->state
= IB_CM_LISTEN
;
834 spin_lock_irqsave(&cm
.lock
, flags
);
835 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
836 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
837 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
839 cm_id
->service_id
= service_id
;
840 cm_id
->service_mask
= service_mask
;
842 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
843 spin_unlock_irqrestore(&cm
.lock
, flags
);
845 if (cur_cm_id_priv
) {
846 cm_id
->state
= IB_CM_IDLE
;
847 kfree(cm_id_priv
->compare_data
);
848 cm_id_priv
->compare_data
= NULL
;
853 EXPORT_SYMBOL(ib_cm_listen
);
855 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
856 enum cm_msg_sequence msg_seq
)
860 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
861 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
863 return cpu_to_be64(hi_tid
| low_tid
);
866 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
867 __be16 attr_id
, __be64 tid
)
869 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
870 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
871 hdr
->class_version
= IB_CM_CLASS_VERSION
;
872 hdr
->method
= IB_MGMT_METHOD_SEND
;
873 hdr
->attr_id
= attr_id
;
877 static void cm_format_req(struct cm_req_msg
*req_msg
,
878 struct cm_id_private
*cm_id_priv
,
879 struct ib_cm_req_param
*param
)
881 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
882 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
884 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
885 req_msg
->service_id
= param
->service_id
;
886 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
887 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
888 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
889 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
890 cm_req_set_remote_resp_timeout(req_msg
,
891 param
->remote_cm_response_timeout
);
892 cm_req_set_qp_type(req_msg
, param
->qp_type
);
893 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
894 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
895 cm_req_set_local_resp_timeout(req_msg
,
896 param
->local_cm_response_timeout
);
897 cm_req_set_retry_count(req_msg
, param
->retry_count
);
898 req_msg
->pkey
= param
->primary_path
->pkey
;
899 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
900 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
901 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
902 cm_req_set_srq(req_msg
, param
->srq
);
904 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
905 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
906 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
907 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
908 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
909 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
910 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
911 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
912 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
913 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
914 cm_req_set_primary_local_ack_timeout(req_msg
,
915 min(31, param
->primary_path
->packet_life_time
+ 1));
917 if (param
->alternate_path
) {
918 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
919 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
920 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
921 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
922 cm_req_set_alt_flow_label(req_msg
,
923 param
->alternate_path
->flow_label
);
924 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
925 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
926 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
927 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
928 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
929 cm_req_set_alt_local_ack_timeout(req_msg
,
930 min(31, param
->alternate_path
->packet_life_time
+ 1));
933 if (param
->private_data
&& param
->private_data_len
)
934 memcpy(req_msg
->private_data
, param
->private_data
,
935 param
->private_data_len
);
938 static int cm_validate_req_param(struct ib_cm_req_param
*param
)
940 /* peer-to-peer not supported */
941 if (param
->peer_to_peer
)
944 if (!param
->primary_path
)
947 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
950 if (param
->private_data
&&
951 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
954 if (param
->alternate_path
&&
955 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
956 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
962 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
963 struct ib_cm_req_param
*param
)
965 struct cm_id_private
*cm_id_priv
;
966 struct cm_req_msg
*req_msg
;
970 ret
= cm_validate_req_param(param
);
974 /* Verify that we're not in timewait. */
975 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
976 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
977 if (cm_id
->state
!= IB_CM_IDLE
) {
978 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
982 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
984 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
986 if (IS_ERR(cm_id_priv
->timewait_info
)) {
987 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
991 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
994 if (param
->alternate_path
) {
995 ret
= cm_init_av_by_path(param
->alternate_path
,
996 &cm_id_priv
->alt_av
);
1000 cm_id
->service_id
= param
->service_id
;
1001 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
1002 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1003 param
->primary_path
->packet_life_time
) * 2 +
1005 param
->remote_cm_response_timeout
);
1006 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
1007 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1008 cm_id_priv
->responder_resources
= param
->responder_resources
;
1009 cm_id_priv
->retry_count
= param
->retry_count
;
1010 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
1011 cm_id_priv
->qp_type
= param
->qp_type
;
1013 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
1017 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
1018 cm_format_req(req_msg
, cm_id_priv
, param
);
1019 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1020 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1021 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
1023 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
1024 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
1025 cm_id_priv
->local_ack_timeout
=
1026 cm_req_get_primary_local_ack_timeout(req_msg
);
1028 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1029 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
1031 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1034 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
1035 cm_id
->state
= IB_CM_REQ_SENT
;
1036 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1039 error2
: cm_free_msg(cm_id_priv
->msg
);
1040 error1
: kfree(cm_id_priv
->timewait_info
);
1043 EXPORT_SYMBOL(ib_send_cm_req
);
1045 static int cm_issue_rej(struct cm_port
*port
,
1046 struct ib_mad_recv_wc
*mad_recv_wc
,
1047 enum ib_cm_rej_reason reason
,
1048 enum cm_msg_response msg_rejected
,
1049 void *ari
, u8 ari_length
)
1051 struct ib_mad_send_buf
*msg
= NULL
;
1052 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
1055 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
1059 /* We just need common CM header information. Cast to any message. */
1060 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
1061 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
1063 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
1064 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
1065 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
1066 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
1067 rej_msg
->reason
= cpu_to_be16(reason
);
1069 if (ari
&& ari_length
) {
1070 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1071 memcpy(rej_msg
->ari
, ari
, ari_length
);
1074 ret
= ib_post_send_mad(msg
, NULL
);
1081 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1082 __be32 local_qpn
, __be32 remote_qpn
)
1084 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1085 ((local_ca_guid
== remote_ca_guid
) &&
1086 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1089 static void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1090 struct ib_sa_path_rec
*primary_path
,
1091 struct ib_sa_path_rec
*alt_path
)
1093 memset(primary_path
, 0, sizeof *primary_path
);
1094 primary_path
->dgid
= req_msg
->primary_local_gid
;
1095 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1096 primary_path
->dlid
= req_msg
->primary_local_lid
;
1097 primary_path
->slid
= req_msg
->primary_remote_lid
;
1098 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1099 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1100 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1101 primary_path
->reversible
= 1;
1102 primary_path
->pkey
= req_msg
->pkey
;
1103 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1104 primary_path
->mtu_selector
= IB_SA_EQ
;
1105 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1106 primary_path
->rate_selector
= IB_SA_EQ
;
1107 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1108 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1109 primary_path
->packet_life_time
=
1110 cm_req_get_primary_local_ack_timeout(req_msg
);
1111 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1113 if (req_msg
->alt_local_lid
) {
1114 memset(alt_path
, 0, sizeof *alt_path
);
1115 alt_path
->dgid
= req_msg
->alt_local_gid
;
1116 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1117 alt_path
->dlid
= req_msg
->alt_local_lid
;
1118 alt_path
->slid
= req_msg
->alt_remote_lid
;
1119 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1120 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1121 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1122 alt_path
->reversible
= 1;
1123 alt_path
->pkey
= req_msg
->pkey
;
1124 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1125 alt_path
->mtu_selector
= IB_SA_EQ
;
1126 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1127 alt_path
->rate_selector
= IB_SA_EQ
;
1128 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1129 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1130 alt_path
->packet_life_time
=
1131 cm_req_get_alt_local_ack_timeout(req_msg
);
1132 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1136 static void cm_format_req_event(struct cm_work
*work
,
1137 struct cm_id_private
*cm_id_priv
,
1138 struct ib_cm_id
*listen_id
)
1140 struct cm_req_msg
*req_msg
;
1141 struct ib_cm_req_event_param
*param
;
1143 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1144 param
= &work
->cm_event
.param
.req_rcvd
;
1145 param
->listen_id
= listen_id
;
1146 param
->port
= cm_id_priv
->av
.port
->port_num
;
1147 param
->primary_path
= &work
->path
[0];
1148 if (req_msg
->alt_local_lid
)
1149 param
->alternate_path
= &work
->path
[1];
1151 param
->alternate_path
= NULL
;
1152 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1153 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1154 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1155 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1156 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1157 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1158 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1159 param
->local_cm_response_timeout
=
1160 cm_req_get_remote_resp_timeout(req_msg
);
1161 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1162 param
->remote_cm_response_timeout
=
1163 cm_req_get_local_resp_timeout(req_msg
);
1164 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1165 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1166 param
->srq
= cm_req_get_srq(req_msg
);
1167 work
->cm_event
.private_data
= &req_msg
->private_data
;
1170 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1171 struct cm_work
*work
)
1173 unsigned long flags
;
1176 /* We will typically only have the current event to report. */
1177 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1180 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1181 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1182 work
= cm_dequeue_work(cm_id_priv
);
1183 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1185 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1189 cm_deref_id(cm_id_priv
);
1191 cm_destroy_id(&cm_id_priv
->id
, ret
);
1194 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1195 struct cm_id_private
*cm_id_priv
,
1196 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1197 const void *private_data
, u8 private_data_len
)
1199 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1200 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1201 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1202 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1203 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1205 if (private_data
&& private_data_len
)
1206 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1209 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1210 struct cm_id_private
*cm_id_priv
,
1211 enum ib_cm_rej_reason reason
,
1214 const void *private_data
,
1215 u8 private_data_len
)
1217 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1218 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1220 switch(cm_id_priv
->id
.state
) {
1221 case IB_CM_REQ_RCVD
:
1222 rej_msg
->local_comm_id
= 0;
1223 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1225 case IB_CM_MRA_REQ_SENT
:
1226 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1227 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1229 case IB_CM_REP_RCVD
:
1230 case IB_CM_MRA_REP_SENT
:
1231 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1232 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1235 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1236 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1240 rej_msg
->reason
= cpu_to_be16(reason
);
1241 if (ari
&& ari_length
) {
1242 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1243 memcpy(rej_msg
->ari
, ari
, ari_length
);
1246 if (private_data
&& private_data_len
)
1247 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1250 static void cm_dup_req_handler(struct cm_work
*work
,
1251 struct cm_id_private
*cm_id_priv
)
1253 struct ib_mad_send_buf
*msg
= NULL
;
1254 unsigned long flags
;
1257 /* Quick state check to discard duplicate REQs. */
1258 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1261 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1265 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1266 switch (cm_id_priv
->id
.state
) {
1267 case IB_CM_MRA_REQ_SENT
:
1268 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1269 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1270 cm_id_priv
->private_data
,
1271 cm_id_priv
->private_data_len
);
1273 case IB_CM_TIMEWAIT
:
1274 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1275 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1280 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1282 ret
= ib_post_send_mad(msg
, NULL
);
1287 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1288 free
: cm_free_msg(msg
);
1291 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1292 struct cm_id_private
*cm_id_priv
)
1294 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1295 struct cm_timewait_info
*timewait_info
;
1296 struct cm_req_msg
*req_msg
;
1297 unsigned long flags
;
1299 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1301 /* Check for duplicate REQ and stale connections. */
1302 spin_lock_irqsave(&cm
.lock
, flags
);
1303 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1305 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1307 if (timewait_info
) {
1308 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1309 timewait_info
->work
.remote_id
);
1310 spin_unlock_irqrestore(&cm
.lock
, flags
);
1311 if (cur_cm_id_priv
) {
1312 cm_dup_req_handler(work
, cur_cm_id_priv
);
1313 cm_deref_id(cur_cm_id_priv
);
1315 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1316 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1321 /* Find matching listen request. */
1322 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1323 req_msg
->service_id
,
1324 req_msg
->private_data
);
1325 if (!listen_cm_id_priv
) {
1326 spin_unlock_irqrestore(&cm
.lock
, flags
);
1327 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1328 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1332 atomic_inc(&listen_cm_id_priv
->refcount
);
1333 atomic_inc(&cm_id_priv
->refcount
);
1334 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1335 atomic_inc(&cm_id_priv
->work_count
);
1336 spin_unlock_irqrestore(&cm
.lock
, flags
);
1337 return listen_cm_id_priv
;
1339 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1343 static int cm_req_handler(struct cm_work
*work
)
1345 struct ib_cm_id
*cm_id
;
1346 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1347 struct cm_req_msg
*req_msg
;
1350 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1352 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1354 return PTR_ERR(cm_id
);
1356 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1357 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1358 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1359 work
->mad_recv_wc
->recv_buf
.grh
,
1361 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1363 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1364 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1367 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1368 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1369 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1371 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1372 if (!listen_cm_id_priv
) {
1374 kfree(cm_id_priv
->timewait_info
);
1378 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1379 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1380 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1381 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1383 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1384 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1386 ib_get_cached_gid(work
->port
->cm_dev
->device
,
1387 work
->port
->port_num
, 0, &work
->path
[0].sgid
);
1388 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_GID
,
1389 &work
->path
[0].sgid
, sizeof work
->path
[0].sgid
,
1393 if (req_msg
->alt_local_lid
) {
1394 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1396 ib_send_cm_rej(cm_id
, IB_CM_REJ_INVALID_ALT_GID
,
1397 &work
->path
[0].sgid
,
1398 sizeof work
->path
[0].sgid
, NULL
, 0);
1402 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1403 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1404 cm_req_get_local_resp_timeout(req_msg
));
1405 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1406 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1407 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1408 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1409 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1410 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1411 cm_id_priv
->local_ack_timeout
=
1412 cm_req_get_primary_local_ack_timeout(req_msg
);
1413 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1414 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1415 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1417 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1418 cm_process_work(cm_id_priv
, work
);
1419 cm_deref_id(listen_cm_id_priv
);
1423 atomic_dec(&cm_id_priv
->refcount
);
1424 cm_deref_id(listen_cm_id_priv
);
1426 ib_destroy_cm_id(cm_id
);
1430 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1431 struct cm_id_private
*cm_id_priv
,
1432 struct ib_cm_rep_param
*param
)
1434 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1435 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1436 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1437 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1438 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1439 rep_msg
->resp_resources
= param
->responder_resources
;
1440 rep_msg
->initiator_depth
= param
->initiator_depth
;
1441 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1442 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1443 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1444 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1445 cm_rep_set_srq(rep_msg
, param
->srq
);
1446 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1448 if (param
->private_data
&& param
->private_data_len
)
1449 memcpy(rep_msg
->private_data
, param
->private_data
,
1450 param
->private_data_len
);
1453 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1454 struct ib_cm_rep_param
*param
)
1456 struct cm_id_private
*cm_id_priv
;
1457 struct ib_mad_send_buf
*msg
;
1458 struct cm_rep_msg
*rep_msg
;
1459 unsigned long flags
;
1462 if (param
->private_data
&&
1463 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1466 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1467 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1468 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1469 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1474 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1478 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1479 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1480 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1481 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1483 ret
= ib_post_send_mad(msg
, NULL
);
1485 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1490 cm_id
->state
= IB_CM_REP_SENT
;
1491 cm_id_priv
->msg
= msg
;
1492 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1493 cm_id_priv
->responder_resources
= param
->responder_resources
;
1494 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1495 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1497 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1500 EXPORT_SYMBOL(ib_send_cm_rep
);
1502 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1503 struct cm_id_private
*cm_id_priv
,
1504 const void *private_data
,
1505 u8 private_data_len
)
1507 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1508 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1509 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1511 if (private_data
&& private_data_len
)
1512 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1515 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1516 const void *private_data
,
1517 u8 private_data_len
)
1519 struct cm_id_private
*cm_id_priv
;
1520 struct ib_mad_send_buf
*msg
;
1521 unsigned long flags
;
1525 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1528 data
= cm_copy_private_data(private_data
, private_data_len
);
1530 return PTR_ERR(data
);
1532 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1533 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1534 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1535 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1540 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1544 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1545 private_data
, private_data_len
);
1547 ret
= ib_post_send_mad(msg
, NULL
);
1549 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1555 cm_id
->state
= IB_CM_ESTABLISHED
;
1556 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1557 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1560 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1564 EXPORT_SYMBOL(ib_send_cm_rtu
);
1566 static void cm_format_rep_event(struct cm_work
*work
)
1568 struct cm_rep_msg
*rep_msg
;
1569 struct ib_cm_rep_event_param
*param
;
1571 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1572 param
= &work
->cm_event
.param
.rep_rcvd
;
1573 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1574 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1575 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1576 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1577 param
->responder_resources
= rep_msg
->initiator_depth
;
1578 param
->initiator_depth
= rep_msg
->resp_resources
;
1579 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1580 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1581 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1582 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1583 param
->srq
= cm_rep_get_srq(rep_msg
);
1584 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1587 static void cm_dup_rep_handler(struct cm_work
*work
)
1589 struct cm_id_private
*cm_id_priv
;
1590 struct cm_rep_msg
*rep_msg
;
1591 struct ib_mad_send_buf
*msg
= NULL
;
1592 unsigned long flags
;
1595 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1596 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1597 rep_msg
->local_comm_id
);
1601 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1605 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1606 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1607 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1608 cm_id_priv
->private_data
,
1609 cm_id_priv
->private_data_len
);
1610 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1611 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1612 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1613 cm_id_priv
->private_data
,
1614 cm_id_priv
->private_data_len
);
1617 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1619 ret
= ib_post_send_mad(msg
, NULL
);
1624 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1625 free
: cm_free_msg(msg
);
1626 deref
: cm_deref_id(cm_id_priv
);
1629 static int cm_rep_handler(struct cm_work
*work
)
1631 struct cm_id_private
*cm_id_priv
;
1632 struct cm_rep_msg
*rep_msg
;
1633 unsigned long flags
;
1636 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1637 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1639 cm_dup_rep_handler(work
);
1643 cm_format_rep_event(work
);
1645 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1646 switch (cm_id_priv
->id
.state
) {
1647 case IB_CM_REQ_SENT
:
1648 case IB_CM_MRA_REQ_RCVD
:
1651 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1656 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1657 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1658 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1660 spin_lock(&cm
.lock
);
1661 /* Check for duplicate REP. */
1662 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1663 spin_unlock(&cm
.lock
);
1664 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1668 /* Check for a stale connection. */
1669 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1670 rb_erase(&cm_id_priv
->timewait_info
->remote_id_node
,
1671 &cm
.remote_id_table
);
1672 cm_id_priv
->timewait_info
->inserted_remote_id
= 0;
1673 spin_unlock(&cm
.lock
);
1674 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1675 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1676 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1681 spin_unlock(&cm
.lock
);
1683 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1684 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1685 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1686 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1687 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1688 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1689 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1691 /* todo: handle peer_to_peer */
1693 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1694 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1696 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1697 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1700 cm_process_work(cm_id_priv
, work
);
1702 cm_deref_id(cm_id_priv
);
1706 cm_deref_id(cm_id_priv
);
1710 static int cm_establish_handler(struct cm_work
*work
)
1712 struct cm_id_private
*cm_id_priv
;
1713 unsigned long flags
;
1716 /* See comment in ib_cm_establish about lookup. */
1717 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1721 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1722 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1723 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1727 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1728 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1730 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1731 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1734 cm_process_work(cm_id_priv
, work
);
1736 cm_deref_id(cm_id_priv
);
1739 cm_deref_id(cm_id_priv
);
1743 static int cm_rtu_handler(struct cm_work
*work
)
1745 struct cm_id_private
*cm_id_priv
;
1746 struct cm_rtu_msg
*rtu_msg
;
1747 unsigned long flags
;
1750 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1751 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1752 rtu_msg
->local_comm_id
);
1756 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1758 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1759 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1760 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1761 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1764 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1766 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1767 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1769 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1770 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1773 cm_process_work(cm_id_priv
, work
);
1775 cm_deref_id(cm_id_priv
);
1778 cm_deref_id(cm_id_priv
);
1782 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1783 struct cm_id_private
*cm_id_priv
,
1784 const void *private_data
,
1785 u8 private_data_len
)
1787 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1788 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1789 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1790 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1791 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1793 if (private_data
&& private_data_len
)
1794 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1797 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1798 const void *private_data
,
1799 u8 private_data_len
)
1801 struct cm_id_private
*cm_id_priv
;
1802 struct ib_mad_send_buf
*msg
;
1803 unsigned long flags
;
1806 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1809 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1810 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1811 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1816 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1818 cm_enter_timewait(cm_id_priv
);
1822 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1823 private_data
, private_data_len
);
1824 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1825 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1827 ret
= ib_post_send_mad(msg
, NULL
);
1829 cm_enter_timewait(cm_id_priv
);
1830 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1835 cm_id
->state
= IB_CM_DREQ_SENT
;
1836 cm_id_priv
->msg
= msg
;
1837 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1840 EXPORT_SYMBOL(ib_send_cm_dreq
);
1842 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1843 struct cm_id_private
*cm_id_priv
,
1844 const void *private_data
,
1845 u8 private_data_len
)
1847 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1848 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1849 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1851 if (private_data
&& private_data_len
)
1852 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1855 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1856 const void *private_data
,
1857 u8 private_data_len
)
1859 struct cm_id_private
*cm_id_priv
;
1860 struct ib_mad_send_buf
*msg
;
1861 unsigned long flags
;
1865 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1868 data
= cm_copy_private_data(private_data
, private_data_len
);
1870 return PTR_ERR(data
);
1872 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1873 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1874 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1875 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1880 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1881 cm_enter_timewait(cm_id_priv
);
1883 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1887 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1888 private_data
, private_data_len
);
1890 ret
= ib_post_send_mad(msg
, NULL
);
1892 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1897 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1900 EXPORT_SYMBOL(ib_send_cm_drep
);
1902 static int cm_dreq_handler(struct cm_work
*work
)
1904 struct cm_id_private
*cm_id_priv
;
1905 struct cm_dreq_msg
*dreq_msg
;
1906 struct ib_mad_send_buf
*msg
= NULL
;
1907 unsigned long flags
;
1910 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1911 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1912 dreq_msg
->local_comm_id
);
1916 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1918 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1919 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1922 switch (cm_id_priv
->id
.state
) {
1923 case IB_CM_REP_SENT
:
1924 case IB_CM_DREQ_SENT
:
1925 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1927 case IB_CM_ESTABLISHED
:
1928 case IB_CM_MRA_REP_RCVD
:
1930 case IB_CM_TIMEWAIT
:
1931 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1934 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1935 cm_id_priv
->private_data
,
1936 cm_id_priv
->private_data_len
);
1937 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1939 if (ib_post_send_mad(msg
, NULL
))
1945 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1946 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1947 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1949 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1950 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1953 cm_process_work(cm_id_priv
, work
);
1955 cm_deref_id(cm_id_priv
);
1958 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1959 deref
: cm_deref_id(cm_id_priv
);
1963 static int cm_drep_handler(struct cm_work
*work
)
1965 struct cm_id_private
*cm_id_priv
;
1966 struct cm_drep_msg
*drep_msg
;
1967 unsigned long flags
;
1970 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1971 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1972 drep_msg
->local_comm_id
);
1976 work
->cm_event
.private_data
= &drep_msg
->private_data
;
1978 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1979 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
1980 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
1981 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1984 cm_enter_timewait(cm_id_priv
);
1986 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1987 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1989 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1990 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1993 cm_process_work(cm_id_priv
, work
);
1995 cm_deref_id(cm_id_priv
);
1998 cm_deref_id(cm_id_priv
);
2002 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
2003 enum ib_cm_rej_reason reason
,
2006 const void *private_data
,
2007 u8 private_data_len
)
2009 struct cm_id_private
*cm_id_priv
;
2010 struct ib_mad_send_buf
*msg
;
2011 unsigned long flags
;
2014 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
2015 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
2018 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2020 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2021 switch (cm_id
->state
) {
2022 case IB_CM_REQ_SENT
:
2023 case IB_CM_MRA_REQ_RCVD
:
2024 case IB_CM_REQ_RCVD
:
2025 case IB_CM_MRA_REQ_SENT
:
2026 case IB_CM_REP_RCVD
:
2027 case IB_CM_MRA_REP_SENT
:
2028 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2030 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2031 cm_id_priv
, reason
, ari
, ari_length
,
2032 private_data
, private_data_len
);
2034 cm_reset_to_idle(cm_id_priv
);
2036 case IB_CM_REP_SENT
:
2037 case IB_CM_MRA_REP_RCVD
:
2038 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2040 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
2041 cm_id_priv
, reason
, ari
, ari_length
,
2042 private_data
, private_data_len
);
2044 cm_enter_timewait(cm_id_priv
);
2054 ret
= ib_post_send_mad(msg
, NULL
);
2058 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2061 EXPORT_SYMBOL(ib_send_cm_rej
);
2063 static void cm_format_rej_event(struct cm_work
*work
)
2065 struct cm_rej_msg
*rej_msg
;
2066 struct ib_cm_rej_event_param
*param
;
2068 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2069 param
= &work
->cm_event
.param
.rej_rcvd
;
2070 param
->ari
= rej_msg
->ari
;
2071 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
2072 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
2073 work
->cm_event
.private_data
= &rej_msg
->private_data
;
2076 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
2078 struct cm_timewait_info
*timewait_info
;
2079 struct cm_id_private
*cm_id_priv
;
2080 unsigned long flags
;
2083 remote_id
= rej_msg
->local_comm_id
;
2085 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
2086 spin_lock_irqsave(&cm
.lock
, flags
);
2087 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
2089 if (!timewait_info
) {
2090 spin_unlock_irqrestore(&cm
.lock
, flags
);
2093 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int)
2094 (timewait_info
->work
.local_id
^
2095 cm
.random_id_operand
));
2097 if (cm_id_priv
->id
.remote_id
== remote_id
)
2098 atomic_inc(&cm_id_priv
->refcount
);
2102 spin_unlock_irqrestore(&cm
.lock
, flags
);
2103 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2104 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2106 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2111 static int cm_rej_handler(struct cm_work
*work
)
2113 struct cm_id_private
*cm_id_priv
;
2114 struct cm_rej_msg
*rej_msg
;
2115 unsigned long flags
;
2118 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2119 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2123 cm_format_rej_event(work
);
2125 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2126 switch (cm_id_priv
->id
.state
) {
2127 case IB_CM_REQ_SENT
:
2128 case IB_CM_MRA_REQ_RCVD
:
2129 case IB_CM_REP_SENT
:
2130 case IB_CM_MRA_REP_RCVD
:
2131 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2133 case IB_CM_REQ_RCVD
:
2134 case IB_CM_MRA_REQ_SENT
:
2135 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2136 cm_enter_timewait(cm_id_priv
);
2138 cm_reset_to_idle(cm_id_priv
);
2140 case IB_CM_DREQ_SENT
:
2141 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2143 case IB_CM_REP_RCVD
:
2144 case IB_CM_MRA_REP_SENT
:
2145 case IB_CM_ESTABLISHED
:
2146 cm_enter_timewait(cm_id_priv
);
2149 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2154 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2156 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2157 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2160 cm_process_work(cm_id_priv
, work
);
2162 cm_deref_id(cm_id_priv
);
2165 cm_deref_id(cm_id_priv
);
2169 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2171 const void *private_data
,
2172 u8 private_data_len
)
2174 struct cm_id_private
*cm_id_priv
;
2175 struct ib_mad_send_buf
*msg
;
2177 unsigned long flags
;
2180 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2183 data
= cm_copy_private_data(private_data
, private_data_len
);
2185 return PTR_ERR(data
);
2187 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2189 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2190 switch(cm_id_priv
->id
.state
) {
2191 case IB_CM_REQ_RCVD
:
2192 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2196 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2197 CM_MSG_RESPONSE_REQ
, service_timeout
,
2198 private_data
, private_data_len
);
2199 ret
= ib_post_send_mad(msg
, NULL
);
2202 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2204 case IB_CM_REP_RCVD
:
2205 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2209 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2210 CM_MSG_RESPONSE_REP
, service_timeout
,
2211 private_data
, private_data_len
);
2212 ret
= ib_post_send_mad(msg
, NULL
);
2215 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2217 case IB_CM_ESTABLISHED
:
2218 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2222 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2223 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2224 private_data
, private_data_len
);
2225 ret
= ib_post_send_mad(msg
, NULL
);
2228 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2234 cm_id_priv
->service_timeout
= service_timeout
;
2235 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2236 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2239 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2243 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2248 EXPORT_SYMBOL(ib_send_cm_mra
);
2250 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2252 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2253 case CM_MSG_RESPONSE_REQ
:
2254 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2255 case CM_MSG_RESPONSE_REP
:
2256 case CM_MSG_RESPONSE_OTHER
:
2257 return cm_acquire_id(mra_msg
->remote_comm_id
,
2258 mra_msg
->local_comm_id
);
2264 static int cm_mra_handler(struct cm_work
*work
)
2266 struct cm_id_private
*cm_id_priv
;
2267 struct cm_mra_msg
*mra_msg
;
2268 unsigned long flags
;
2271 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2272 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2276 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2277 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2278 cm_mra_get_service_timeout(mra_msg
);
2279 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2280 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2282 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2283 switch (cm_id_priv
->id
.state
) {
2284 case IB_CM_REQ_SENT
:
2285 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2286 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2287 cm_id_priv
->msg
, timeout
))
2289 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2291 case IB_CM_REP_SENT
:
2292 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2293 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2294 cm_id_priv
->msg
, timeout
))
2296 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2298 case IB_CM_ESTABLISHED
:
2299 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2300 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2301 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2302 cm_id_priv
->msg
, timeout
))
2304 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2310 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2311 cm_id_priv
->id
.state
;
2312 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2314 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2315 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2318 cm_process_work(cm_id_priv
, work
);
2320 cm_deref_id(cm_id_priv
);
2323 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2324 cm_deref_id(cm_id_priv
);
2328 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2329 struct cm_id_private
*cm_id_priv
,
2330 struct ib_sa_path_rec
*alternate_path
,
2331 const void *private_data
,
2332 u8 private_data_len
)
2334 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2335 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2336 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2337 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2338 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2339 /* todo: need remote CM response timeout */
2340 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2341 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2342 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2343 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2344 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2345 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2346 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2347 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2348 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2349 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2350 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2351 cm_lap_set_local_ack_timeout(lap_msg
,
2352 min(31, alternate_path
->packet_life_time
+ 1));
2354 if (private_data
&& private_data_len
)
2355 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2358 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2359 struct ib_sa_path_rec
*alternate_path
,
2360 const void *private_data
,
2361 u8 private_data_len
)
2363 struct cm_id_private
*cm_id_priv
;
2364 struct ib_mad_send_buf
*msg
;
2365 unsigned long flags
;
2368 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2371 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2372 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2373 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2374 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2379 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2383 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2384 alternate_path
, private_data
, private_data_len
);
2385 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2386 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2388 ret
= ib_post_send_mad(msg
, NULL
);
2390 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2395 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2396 cm_id_priv
->msg
= msg
;
2398 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2401 EXPORT_SYMBOL(ib_send_cm_lap
);
2403 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2404 struct cm_lap_msg
*lap_msg
)
2406 memset(path
, 0, sizeof *path
);
2407 path
->dgid
= lap_msg
->alt_local_gid
;
2408 path
->sgid
= lap_msg
->alt_remote_gid
;
2409 path
->dlid
= lap_msg
->alt_local_lid
;
2410 path
->slid
= lap_msg
->alt_remote_lid
;
2411 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2412 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2413 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2414 path
->reversible
= 1;
2415 /* pkey is same as in REQ */
2416 path
->sl
= cm_lap_get_sl(lap_msg
);
2417 path
->mtu_selector
= IB_SA_EQ
;
2418 /* mtu is same as in REQ */
2419 path
->rate_selector
= IB_SA_EQ
;
2420 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2421 path
->packet_life_time_selector
= IB_SA_EQ
;
2422 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2423 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2426 static int cm_lap_handler(struct cm_work
*work
)
2428 struct cm_id_private
*cm_id_priv
;
2429 struct cm_lap_msg
*lap_msg
;
2430 struct ib_cm_lap_event_param
*param
;
2431 struct ib_mad_send_buf
*msg
= NULL
;
2432 unsigned long flags
;
2435 /* todo: verify LAP request and send reject APR if invalid. */
2436 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2437 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2438 lap_msg
->local_comm_id
);
2442 param
= &work
->cm_event
.param
.lap_rcvd
;
2443 param
->alternate_path
= &work
->path
[0];
2444 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2445 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2447 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2448 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2451 switch (cm_id_priv
->id
.lap_state
) {
2452 case IB_CM_LAP_IDLE
:
2454 case IB_CM_MRA_LAP_SENT
:
2455 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2458 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2459 CM_MSG_RESPONSE_OTHER
,
2460 cm_id_priv
->service_timeout
,
2461 cm_id_priv
->private_data
,
2462 cm_id_priv
->private_data_len
);
2463 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2465 if (ib_post_send_mad(msg
, NULL
))
2472 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2473 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2474 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2476 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2477 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2480 cm_process_work(cm_id_priv
, work
);
2482 cm_deref_id(cm_id_priv
);
2485 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2486 deref
: cm_deref_id(cm_id_priv
);
2490 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2491 struct cm_id_private
*cm_id_priv
,
2492 enum ib_cm_apr_status status
,
2495 const void *private_data
,
2496 u8 private_data_len
)
2498 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2499 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2500 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2501 apr_msg
->ap_status
= (u8
) status
;
2503 if (info
&& info_length
) {
2504 apr_msg
->info_length
= info_length
;
2505 memcpy(apr_msg
->info
, info
, info_length
);
2508 if (private_data
&& private_data_len
)
2509 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2512 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2513 enum ib_cm_apr_status status
,
2516 const void *private_data
,
2517 u8 private_data_len
)
2519 struct cm_id_private
*cm_id_priv
;
2520 struct ib_mad_send_buf
*msg
;
2521 unsigned long flags
;
2524 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2525 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2528 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2529 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2530 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2531 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2532 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2537 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2541 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2542 info
, info_length
, private_data
, private_data_len
);
2543 ret
= ib_post_send_mad(msg
, NULL
);
2545 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2550 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2551 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2554 EXPORT_SYMBOL(ib_send_cm_apr
);
2556 static int cm_apr_handler(struct cm_work
*work
)
2558 struct cm_id_private
*cm_id_priv
;
2559 struct cm_apr_msg
*apr_msg
;
2560 unsigned long flags
;
2563 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2564 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2565 apr_msg
->local_comm_id
);
2567 return -EINVAL
; /* Unmatched reply. */
2569 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2570 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2571 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2572 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2574 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2575 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2576 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2577 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2578 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2581 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2582 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2583 cm_id_priv
->msg
= NULL
;
2585 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2587 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2588 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2591 cm_process_work(cm_id_priv
, work
);
2593 cm_deref_id(cm_id_priv
);
2596 cm_deref_id(cm_id_priv
);
2600 static int cm_timewait_handler(struct cm_work
*work
)
2602 struct cm_timewait_info
*timewait_info
;
2603 struct cm_id_private
*cm_id_priv
;
2604 unsigned long flags
;
2607 timewait_info
= (struct cm_timewait_info
*)work
;
2608 cm_cleanup_timewait(timewait_info
);
2610 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2611 timewait_info
->work
.remote_id
);
2615 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2616 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2617 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2618 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2621 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2622 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2624 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2625 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2628 cm_process_work(cm_id_priv
, work
);
2630 cm_deref_id(cm_id_priv
);
2633 cm_deref_id(cm_id_priv
);
2637 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2638 struct cm_id_private
*cm_id_priv
,
2639 struct ib_cm_sidr_req_param
*param
)
2641 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2642 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2643 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2644 sidr_req_msg
->pkey
= cpu_to_be16(param
->path
->pkey
);
2645 sidr_req_msg
->service_id
= param
->service_id
;
2647 if (param
->private_data
&& param
->private_data_len
)
2648 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2649 param
->private_data_len
);
2652 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2653 struct ib_cm_sidr_req_param
*param
)
2655 struct cm_id_private
*cm_id_priv
;
2656 struct ib_mad_send_buf
*msg
;
2657 unsigned long flags
;
2660 if (!param
->path
|| (param
->private_data
&&
2661 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2664 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2665 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2669 cm_id
->service_id
= param
->service_id
;
2670 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2671 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2672 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2673 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2677 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2679 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2680 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2682 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2683 if (cm_id
->state
== IB_CM_IDLE
)
2684 ret
= ib_post_send_mad(msg
, NULL
);
2689 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2693 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2694 cm_id_priv
->msg
= msg
;
2695 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2699 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2701 static void cm_format_sidr_req_event(struct cm_work
*work
,
2702 struct ib_cm_id
*listen_id
)
2704 struct cm_sidr_req_msg
*sidr_req_msg
;
2705 struct ib_cm_sidr_req_event_param
*param
;
2707 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2708 work
->mad_recv_wc
->recv_buf
.mad
;
2709 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2710 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2711 param
->listen_id
= listen_id
;
2712 param
->port
= work
->port
->port_num
;
2713 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2716 static int cm_sidr_req_handler(struct cm_work
*work
)
2718 struct ib_cm_id
*cm_id
;
2719 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2720 struct cm_sidr_req_msg
*sidr_req_msg
;
2722 unsigned long flags
;
2724 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2726 return PTR_ERR(cm_id
);
2727 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2729 /* Record SGID/SLID and request ID for lookup. */
2730 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2731 work
->mad_recv_wc
->recv_buf
.mad
;
2732 wc
= work
->mad_recv_wc
->wc
;
2733 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2734 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2735 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2736 work
->mad_recv_wc
->recv_buf
.grh
,
2738 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2739 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2740 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2741 atomic_inc(&cm_id_priv
->work_count
);
2743 spin_lock_irqsave(&cm
.lock
, flags
);
2744 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2745 if (cur_cm_id_priv
) {
2746 spin_unlock_irqrestore(&cm
.lock
, flags
);
2747 goto out
; /* Duplicate message. */
2749 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2750 sidr_req_msg
->service_id
,
2751 sidr_req_msg
->private_data
);
2752 if (!cur_cm_id_priv
) {
2753 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2754 spin_unlock_irqrestore(&cm
.lock
, flags
);
2755 /* todo: reply with no match */
2756 goto out
; /* No match. */
2758 atomic_inc(&cur_cm_id_priv
->refcount
);
2759 spin_unlock_irqrestore(&cm
.lock
, flags
);
2761 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2762 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2763 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2764 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2766 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2767 cm_process_work(cm_id_priv
, work
);
2768 cm_deref_id(cur_cm_id_priv
);
2771 ib_destroy_cm_id(&cm_id_priv
->id
);
2775 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2776 struct cm_id_private
*cm_id_priv
,
2777 struct ib_cm_sidr_rep_param
*param
)
2779 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2781 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2782 sidr_rep_msg
->status
= param
->status
;
2783 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2784 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2785 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2787 if (param
->info
&& param
->info_length
)
2788 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2790 if (param
->private_data
&& param
->private_data_len
)
2791 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2792 param
->private_data_len
);
2795 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2796 struct ib_cm_sidr_rep_param
*param
)
2798 struct cm_id_private
*cm_id_priv
;
2799 struct ib_mad_send_buf
*msg
;
2800 unsigned long flags
;
2803 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2804 (param
->private_data
&&
2805 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2808 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2809 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2810 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2815 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2819 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2821 ret
= ib_post_send_mad(msg
, NULL
);
2823 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2827 cm_id
->state
= IB_CM_IDLE
;
2828 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2830 spin_lock_irqsave(&cm
.lock
, flags
);
2831 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2832 spin_unlock_irqrestore(&cm
.lock
, flags
);
2835 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2838 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2840 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2842 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2843 struct ib_cm_sidr_rep_event_param
*param
;
2845 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2846 work
->mad_recv_wc
->recv_buf
.mad
;
2847 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2848 param
->status
= sidr_rep_msg
->status
;
2849 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2850 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2851 param
->info
= &sidr_rep_msg
->info
;
2852 param
->info_len
= sidr_rep_msg
->info_length
;
2853 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2856 static int cm_sidr_rep_handler(struct cm_work
*work
)
2858 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2859 struct cm_id_private
*cm_id_priv
;
2860 unsigned long flags
;
2862 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2863 work
->mad_recv_wc
->recv_buf
.mad
;
2864 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2866 return -EINVAL
; /* Unmatched reply. */
2868 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2869 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2870 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2873 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2874 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2875 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2877 cm_format_sidr_rep_event(work
);
2878 cm_process_work(cm_id_priv
, work
);
2881 cm_deref_id(cm_id_priv
);
2885 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2886 enum ib_wc_status wc_status
)
2888 struct cm_id_private
*cm_id_priv
;
2889 struct ib_cm_event cm_event
;
2890 enum ib_cm_state state
;
2891 unsigned long flags
;
2894 memset(&cm_event
, 0, sizeof cm_event
);
2895 cm_id_priv
= msg
->context
[0];
2897 /* Discard old sends or ones without a response. */
2898 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2899 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2900 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2904 case IB_CM_REQ_SENT
:
2905 case IB_CM_MRA_REQ_RCVD
:
2906 cm_reset_to_idle(cm_id_priv
);
2907 cm_event
.event
= IB_CM_REQ_ERROR
;
2909 case IB_CM_REP_SENT
:
2910 case IB_CM_MRA_REP_RCVD
:
2911 cm_reset_to_idle(cm_id_priv
);
2912 cm_event
.event
= IB_CM_REP_ERROR
;
2914 case IB_CM_DREQ_SENT
:
2915 cm_enter_timewait(cm_id_priv
);
2916 cm_event
.event
= IB_CM_DREQ_ERROR
;
2918 case IB_CM_SIDR_REQ_SENT
:
2919 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2920 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2925 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2926 cm_event
.param
.send_status
= wc_status
;
2928 /* No other events can occur on the cm_id at this point. */
2929 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2932 ib_destroy_cm_id(&cm_id_priv
->id
);
2935 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2939 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2940 struct ib_mad_send_wc
*mad_send_wc
)
2942 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2944 switch (mad_send_wc
->status
) {
2946 case IB_WC_WR_FLUSH_ERR
:
2950 if (msg
->context
[0] && msg
->context
[1])
2951 cm_process_send_error(msg
, mad_send_wc
->status
);
2958 static void cm_work_handler(void *data
)
2960 struct cm_work
*work
= data
;
2963 switch (work
->cm_event
.event
) {
2964 case IB_CM_REQ_RECEIVED
:
2965 ret
= cm_req_handler(work
);
2967 case IB_CM_MRA_RECEIVED
:
2968 ret
= cm_mra_handler(work
);
2970 case IB_CM_REJ_RECEIVED
:
2971 ret
= cm_rej_handler(work
);
2973 case IB_CM_REP_RECEIVED
:
2974 ret
= cm_rep_handler(work
);
2976 case IB_CM_RTU_RECEIVED
:
2977 ret
= cm_rtu_handler(work
);
2979 case IB_CM_USER_ESTABLISHED
:
2980 ret
= cm_establish_handler(work
);
2982 case IB_CM_DREQ_RECEIVED
:
2983 ret
= cm_dreq_handler(work
);
2985 case IB_CM_DREP_RECEIVED
:
2986 ret
= cm_drep_handler(work
);
2988 case IB_CM_SIDR_REQ_RECEIVED
:
2989 ret
= cm_sidr_req_handler(work
);
2991 case IB_CM_SIDR_REP_RECEIVED
:
2992 ret
= cm_sidr_rep_handler(work
);
2994 case IB_CM_LAP_RECEIVED
:
2995 ret
= cm_lap_handler(work
);
2997 case IB_CM_APR_RECEIVED
:
2998 ret
= cm_apr_handler(work
);
3000 case IB_CM_TIMEWAIT_EXIT
:
3001 ret
= cm_timewait_handler(work
);
3011 int ib_cm_establish(struct ib_cm_id
*cm_id
)
3013 struct cm_id_private
*cm_id_priv
;
3014 struct cm_work
*work
;
3015 unsigned long flags
;
3018 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
3022 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3023 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3024 switch (cm_id
->state
)
3026 case IB_CM_REP_SENT
:
3027 case IB_CM_MRA_REP_RCVD
:
3028 cm_id
->state
= IB_CM_ESTABLISHED
;
3030 case IB_CM_ESTABLISHED
:
3037 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3045 * The CM worker thread may try to destroy the cm_id before it
3046 * can execute this work item. To prevent potential deadlock,
3047 * we need to find the cm_id once we're in the context of the
3048 * worker thread, rather than holding a reference on it.
3050 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3051 work
->local_id
= cm_id
->local_id
;
3052 work
->remote_id
= cm_id
->remote_id
;
3053 work
->mad_recv_wc
= NULL
;
3054 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
3055 queue_work(cm
.wq
, &work
->work
);
3059 EXPORT_SYMBOL(ib_cm_establish
);
3061 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
3062 struct ib_mad_recv_wc
*mad_recv_wc
)
3064 struct cm_work
*work
;
3065 enum ib_cm_event_type event
;
3068 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
3069 case CM_REQ_ATTR_ID
:
3070 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
3071 alt_local_lid
!= 0);
3072 event
= IB_CM_REQ_RECEIVED
;
3074 case CM_MRA_ATTR_ID
:
3075 event
= IB_CM_MRA_RECEIVED
;
3077 case CM_REJ_ATTR_ID
:
3078 event
= IB_CM_REJ_RECEIVED
;
3080 case CM_REP_ATTR_ID
:
3081 event
= IB_CM_REP_RECEIVED
;
3083 case CM_RTU_ATTR_ID
:
3084 event
= IB_CM_RTU_RECEIVED
;
3086 case CM_DREQ_ATTR_ID
:
3087 event
= IB_CM_DREQ_RECEIVED
;
3089 case CM_DREP_ATTR_ID
:
3090 event
= IB_CM_DREP_RECEIVED
;
3092 case CM_SIDR_REQ_ATTR_ID
:
3093 event
= IB_CM_SIDR_REQ_RECEIVED
;
3095 case CM_SIDR_REP_ATTR_ID
:
3096 event
= IB_CM_SIDR_REP_RECEIVED
;
3098 case CM_LAP_ATTR_ID
:
3100 event
= IB_CM_LAP_RECEIVED
;
3102 case CM_APR_ATTR_ID
:
3103 event
= IB_CM_APR_RECEIVED
;
3106 ib_free_recv_mad(mad_recv_wc
);
3110 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3113 ib_free_recv_mad(mad_recv_wc
);
3117 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3118 work
->cm_event
.event
= event
;
3119 work
->mad_recv_wc
= mad_recv_wc
;
3120 work
->port
= (struct cm_port
*)mad_agent
->context
;
3121 queue_work(cm
.wq
, &work
->work
);
3124 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3125 struct ib_qp_attr
*qp_attr
,
3128 unsigned long flags
;
3131 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3132 switch (cm_id_priv
->id
.state
) {
3133 case IB_CM_REQ_SENT
:
3134 case IB_CM_MRA_REQ_RCVD
:
3135 case IB_CM_REQ_RCVD
:
3136 case IB_CM_MRA_REQ_SENT
:
3137 case IB_CM_REP_RCVD
:
3138 case IB_CM_MRA_REP_SENT
:
3139 case IB_CM_REP_SENT
:
3140 case IB_CM_MRA_REP_RCVD
:
3141 case IB_CM_ESTABLISHED
:
3142 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3143 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3144 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
3145 IB_ACCESS_REMOTE_WRITE
;
3146 if (cm_id_priv
->responder_resources
)
3147 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
|
3148 IB_ACCESS_REMOTE_ATOMIC
;
3149 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3150 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3157 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3161 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3162 struct ib_qp_attr
*qp_attr
,
3165 unsigned long flags
;
3168 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3169 switch (cm_id_priv
->id
.state
) {
3170 case IB_CM_REQ_RCVD
:
3171 case IB_CM_MRA_REQ_SENT
:
3172 case IB_CM_REP_RCVD
:
3173 case IB_CM_MRA_REP_SENT
:
3174 case IB_CM_REP_SENT
:
3175 case IB_CM_MRA_REP_RCVD
:
3176 case IB_CM_ESTABLISHED
:
3177 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3178 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3179 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3180 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3181 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3182 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3183 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3184 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3185 IB_QP_MIN_RNR_TIMER
;
3186 qp_attr
->max_dest_rd_atomic
=
3187 cm_id_priv
->responder_resources
;
3188 qp_attr
->min_rnr_timer
= 0;
3190 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3191 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3192 qp_attr
->alt_port_num
= cm_id_priv
->alt_av
.port
->port_num
;
3193 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3201 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3205 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3206 struct ib_qp_attr
*qp_attr
,
3209 unsigned long flags
;
3212 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3213 switch (cm_id_priv
->id
.state
) {
3214 case IB_CM_REP_RCVD
:
3215 case IB_CM_MRA_REP_SENT
:
3216 case IB_CM_REP_SENT
:
3217 case IB_CM_MRA_REP_RCVD
:
3218 case IB_CM_ESTABLISHED
:
3219 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3220 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3221 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3222 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3224 IB_QP_MAX_QP_RD_ATOMIC
;
3225 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3226 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3227 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3228 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3230 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3231 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3232 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3240 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3244 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3245 struct ib_qp_attr
*qp_attr
,
3248 struct cm_id_private
*cm_id_priv
;
3251 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3252 switch (qp_attr
->qp_state
) {
3254 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3257 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3260 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3268 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3270 static void cm_add_one(struct ib_device
*device
)
3272 struct cm_device
*cm_dev
;
3273 struct cm_port
*port
;
3274 struct ib_mad_reg_req reg_req
= {
3275 .mgmt_class
= IB_MGMT_CLASS_CM
,
3276 .mgmt_class_version
= IB_CM_CLASS_VERSION
3278 struct ib_port_modify port_modify
= {
3279 .set_port_cap_mask
= IB_PORT_CM_SUP
3281 unsigned long flags
;
3285 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
3288 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3289 device
->phys_port_cnt
, GFP_KERNEL
);
3293 cm_dev
->device
= device
;
3294 cm_dev
->ca_guid
= device
->node_guid
;
3296 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3297 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3298 port
= &cm_dev
->port
[i
-1];
3299 port
->cm_dev
= cm_dev
;
3301 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3308 if (IS_ERR(port
->mad_agent
))
3311 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3315 ib_set_client_data(device
, &cm_client
, cm_dev
);
3317 write_lock_irqsave(&cm
.device_lock
, flags
);
3318 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3319 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3323 ib_unregister_mad_agent(port
->mad_agent
);
3325 port_modify
.set_port_cap_mask
= 0;
3326 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3328 port
= &cm_dev
->port
[i
-1];
3329 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3330 ib_unregister_mad_agent(port
->mad_agent
);
3335 static void cm_remove_one(struct ib_device
*device
)
3337 struct cm_device
*cm_dev
;
3338 struct cm_port
*port
;
3339 struct ib_port_modify port_modify
= {
3340 .clr_port_cap_mask
= IB_PORT_CM_SUP
3342 unsigned long flags
;
3345 cm_dev
= ib_get_client_data(device
, &cm_client
);
3349 write_lock_irqsave(&cm
.device_lock
, flags
);
3350 list_del(&cm_dev
->list
);
3351 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3353 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3354 port
= &cm_dev
->port
[i
-1];
3355 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3356 ib_unregister_mad_agent(port
->mad_agent
);
3361 static int __init
ib_cm_init(void)
3365 memset(&cm
, 0, sizeof cm
);
3366 INIT_LIST_HEAD(&cm
.device_list
);
3367 rwlock_init(&cm
.device_lock
);
3368 spin_lock_init(&cm
.lock
);
3369 cm
.listen_service_table
= RB_ROOT
;
3370 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3371 cm
.remote_id_table
= RB_ROOT
;
3372 cm
.remote_qp_table
= RB_ROOT
;
3373 cm
.remote_sidr_table
= RB_ROOT
;
3374 idr_init(&cm
.local_id_table
);
3375 get_random_bytes(&cm
.random_id_operand
, sizeof cm
.random_id_operand
);
3376 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3378 cm
.wq
= create_workqueue("ib_cm");
3382 ret
= ib_register_client(&cm_client
);
3388 destroy_workqueue(cm
.wq
);
3392 static void __exit
ib_cm_cleanup(void)
3394 destroy_workqueue(cm
.wq
);
3395 ib_unregister_client(&cm_client
);
3396 idr_destroy(&cm
.local_id_table
);
3399 module_init(ib_cm_init
);
3400 module_exit(ib_cm_cleanup
);