2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
48 #define MLX4_MAC_VALID (1ull << 63)
49 #define MLX4_MAC_MASK 0x7fffffffffffffffULL
53 struct list_head list
;
59 struct list_head list
;
73 struct list_head list
;
75 enum mlx4_protocol prot
;
76 enum mlx4_steer_type steer
;
80 RES_QP_BUSY
= RES_ANY_BUSY
,
82 /* QP number was allocated */
85 /* ICM memory for QP context was mapped */
88 /* QP is in hw ownership */
93 struct res_common com
;
98 struct list_head mcg_list
;
103 enum res_mtt_states
{
104 RES_MTT_BUSY
= RES_ANY_BUSY
,
108 static inline const char *mtt_states_str(enum res_mtt_states state
)
111 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
118 struct res_common com
;
123 enum res_mpt_states
{
124 RES_MPT_BUSY
= RES_ANY_BUSY
,
131 struct res_common com
;
137 RES_EQ_BUSY
= RES_ANY_BUSY
,
143 struct res_common com
;
148 RES_CQ_BUSY
= RES_ANY_BUSY
,
154 struct res_common com
;
159 enum res_srq_states
{
160 RES_SRQ_BUSY
= RES_ANY_BUSY
,
166 struct res_common com
;
172 enum res_counter_states
{
173 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
174 RES_COUNTER_ALLOCATED
,
178 struct res_common com
;
182 enum res_xrcdn_states
{
183 RES_XRCD_BUSY
= RES_ANY_BUSY
,
188 struct res_common com
;
193 static const char *ResourceType(enum mlx4_resource rt
)
196 case RES_QP
: return "RES_QP";
197 case RES_CQ
: return "RES_CQ";
198 case RES_SRQ
: return "RES_SRQ";
199 case RES_MPT
: return "RES_MPT";
200 case RES_MTT
: return "RES_MTT";
201 case RES_MAC
: return "RES_MAC";
202 case RES_EQ
: return "RES_EQ";
203 case RES_COUNTER
: return "RES_COUNTER";
204 case RES_XRCD
: return "RES_XRCD";
205 default: return "Unknown resource type !!!";
209 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
211 struct mlx4_priv
*priv
= mlx4_priv(dev
);
215 priv
->mfunc
.master
.res_tracker
.slave_list
=
216 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
218 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
221 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
222 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
223 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
224 slave_list
[i
].res_list
[t
]);
225 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
228 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
230 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
231 INIT_RADIX_TREE(&priv
->mfunc
.master
.res_tracker
.res_tree
[i
],
232 GFP_ATOMIC
|__GFP_NOWARN
);
234 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
238 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
239 enum mlx4_res_tracker_free_type type
)
241 struct mlx4_priv
*priv
= mlx4_priv(dev
);
244 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
245 if (type
!= RES_TR_FREE_STRUCTS_ONLY
)
246 for (i
= 0 ; i
< dev
->num_slaves
; i
++)
247 if (type
== RES_TR_FREE_ALL
||
248 dev
->caps
.function
!= i
)
249 mlx4_delete_all_resources_for_slave(dev
, i
);
251 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
252 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
253 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
258 static void update_ud_gid(struct mlx4_dev
*dev
,
259 struct mlx4_qp_context
*qp_ctx
, u8 slave
)
261 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
263 if (MLX4_QP_ST_UD
== ts
)
264 qp_ctx
->pri_path
.mgid_index
= 0x80 | slave
;
266 mlx4_dbg(dev
, "slave %d, new gid index: 0x%x ",
267 slave
, qp_ctx
->pri_path
.mgid_index
);
270 static int mpt_mask(struct mlx4_dev
*dev
)
272 return dev
->caps
.num_mpts
- 1;
275 static void *find_res(struct mlx4_dev
*dev
, int res_id
,
276 enum mlx4_resource type
)
278 struct mlx4_priv
*priv
= mlx4_priv(dev
);
280 return radix_tree_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
284 static int get_res(struct mlx4_dev
*dev
, int slave
, int res_id
,
285 enum mlx4_resource type
,
288 struct res_common
*r
;
291 spin_lock_irq(mlx4_tlock(dev
));
292 r
= find_res(dev
, res_id
, type
);
298 if (r
->state
== RES_ANY_BUSY
) {
303 if (r
->owner
!= slave
) {
308 r
->from_state
= r
->state
;
309 r
->state
= RES_ANY_BUSY
;
310 mlx4_dbg(dev
, "res %s id 0x%x to busy\n",
311 ResourceType(type
), r
->res_id
);
314 *((struct res_common
**)res
) = r
;
317 spin_unlock_irq(mlx4_tlock(dev
));
321 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
322 enum mlx4_resource type
,
323 int res_id
, int *slave
)
326 struct res_common
*r
;
332 spin_lock(mlx4_tlock(dev
));
334 r
= find_res(dev
, id
, type
);
339 spin_unlock(mlx4_tlock(dev
));
344 static void put_res(struct mlx4_dev
*dev
, int slave
, int res_id
,
345 enum mlx4_resource type
)
347 struct res_common
*r
;
349 spin_lock_irq(mlx4_tlock(dev
));
350 r
= find_res(dev
, res_id
, type
);
352 r
->state
= r
->from_state
;
353 spin_unlock_irq(mlx4_tlock(dev
));
356 static struct res_common
*alloc_qp_tr(int id
)
360 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
364 ret
->com
.res_id
= id
;
365 ret
->com
.state
= RES_QP_RESERVED
;
367 INIT_LIST_HEAD(&ret
->mcg_list
);
368 spin_lock_init(&ret
->mcg_spl
);
373 static struct res_common
*alloc_mtt_tr(int id
, int order
)
377 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
381 ret
->com
.res_id
= id
;
383 ret
->com
.state
= RES_MTT_ALLOCATED
;
384 atomic_set(&ret
->ref_count
, 0);
389 static struct res_common
*alloc_mpt_tr(int id
, int key
)
393 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
397 ret
->com
.res_id
= id
;
398 ret
->com
.state
= RES_MPT_RESERVED
;
404 static struct res_common
*alloc_eq_tr(int id
)
408 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
412 ret
->com
.res_id
= id
;
413 ret
->com
.state
= RES_EQ_RESERVED
;
418 static struct res_common
*alloc_cq_tr(int id
)
422 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
426 ret
->com
.res_id
= id
;
427 ret
->com
.state
= RES_CQ_ALLOCATED
;
428 atomic_set(&ret
->ref_count
, 0);
433 static struct res_common
*alloc_srq_tr(int id
)
437 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
441 ret
->com
.res_id
= id
;
442 ret
->com
.state
= RES_SRQ_ALLOCATED
;
443 atomic_set(&ret
->ref_count
, 0);
448 static struct res_common
*alloc_counter_tr(int id
)
450 struct res_counter
*ret
;
452 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
456 ret
->com
.res_id
= id
;
457 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
462 static struct res_common
*alloc_xrcdn_tr(int id
)
464 struct res_xrcdn
*ret
;
466 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
470 ret
->com
.res_id
= id
;
471 ret
->com
.state
= RES_XRCD_ALLOCATED
;
476 static struct res_common
*alloc_tr(int id
, enum mlx4_resource type
, int slave
,
479 struct res_common
*ret
;
483 ret
= alloc_qp_tr(id
);
486 ret
= alloc_mpt_tr(id
, extra
);
489 ret
= alloc_mtt_tr(id
, extra
);
492 ret
= alloc_eq_tr(id
);
495 ret
= alloc_cq_tr(id
);
498 ret
= alloc_srq_tr(id
);
501 printk(KERN_ERR
"implementation missing\n");
504 ret
= alloc_counter_tr(id
);
507 ret
= alloc_xrcdn_tr(id
);
518 static int add_res_range(struct mlx4_dev
*dev
, int slave
, int base
, int count
,
519 enum mlx4_resource type
, int extra
)
523 struct mlx4_priv
*priv
= mlx4_priv(dev
);
524 struct res_common
**res_arr
;
525 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
526 struct radix_tree_root
*root
= &tracker
->res_tree
[type
];
528 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
532 for (i
= 0; i
< count
; ++i
) {
533 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
535 for (--i
; i
>= 0; --i
)
543 spin_lock_irq(mlx4_tlock(dev
));
544 for (i
= 0; i
< count
; ++i
) {
545 if (find_res(dev
, base
+ i
, type
)) {
549 err
= radix_tree_insert(root
, base
+ i
, res_arr
[i
]);
552 list_add_tail(&res_arr
[i
]->list
,
553 &tracker
->slave_list
[slave
].res_list
[type
]);
555 spin_unlock_irq(mlx4_tlock(dev
));
561 for (--i
; i
>= base
; --i
)
562 radix_tree_delete(&tracker
->res_tree
[type
], i
);
564 spin_unlock_irq(mlx4_tlock(dev
));
566 for (i
= 0; i
< count
; ++i
)
574 static int remove_qp_ok(struct res_qp
*res
)
576 if (res
->com
.state
== RES_QP_BUSY
)
578 else if (res
->com
.state
!= RES_QP_RESERVED
)
584 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
586 if (res
->com
.state
== RES_MTT_BUSY
||
587 atomic_read(&res
->ref_count
)) {
588 printk(KERN_DEBUG
"%s-%d: state %s, ref_count %d\n",
590 mtt_states_str(res
->com
.state
),
591 atomic_read(&res
->ref_count
));
593 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
595 else if (res
->order
!= order
)
601 static int remove_mpt_ok(struct res_mpt
*res
)
603 if (res
->com
.state
== RES_MPT_BUSY
)
605 else if (res
->com
.state
!= RES_MPT_RESERVED
)
611 static int remove_eq_ok(struct res_eq
*res
)
613 if (res
->com
.state
== RES_MPT_BUSY
)
615 else if (res
->com
.state
!= RES_MPT_RESERVED
)
621 static int remove_counter_ok(struct res_counter
*res
)
623 if (res
->com
.state
== RES_COUNTER_BUSY
)
625 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
631 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
633 if (res
->com
.state
== RES_XRCD_BUSY
)
635 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
641 static int remove_cq_ok(struct res_cq
*res
)
643 if (res
->com
.state
== RES_CQ_BUSY
)
645 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
651 static int remove_srq_ok(struct res_srq
*res
)
653 if (res
->com
.state
== RES_SRQ_BUSY
)
655 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
661 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
665 return remove_qp_ok((struct res_qp
*)res
);
667 return remove_cq_ok((struct res_cq
*)res
);
669 return remove_srq_ok((struct res_srq
*)res
);
671 return remove_mpt_ok((struct res_mpt
*)res
);
673 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
677 return remove_eq_ok((struct res_eq
*)res
);
679 return remove_counter_ok((struct res_counter
*)res
);
681 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
687 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, int base
, int count
,
688 enum mlx4_resource type
, int extra
)
692 struct mlx4_priv
*priv
= mlx4_priv(dev
);
693 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
694 struct res_common
*r
;
696 spin_lock_irq(mlx4_tlock(dev
));
697 for (i
= base
; i
< base
+ count
; ++i
) {
698 r
= radix_tree_lookup(&tracker
->res_tree
[type
], i
);
703 if (r
->owner
!= slave
) {
707 err
= remove_ok(r
, type
, extra
);
712 for (i
= base
; i
< base
+ count
; ++i
) {
713 r
= radix_tree_lookup(&tracker
->res_tree
[type
], i
);
714 radix_tree_delete(&tracker
->res_tree
[type
], i
);
721 spin_unlock_irq(mlx4_tlock(dev
));
726 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
727 enum res_qp_states state
, struct res_qp
**qp
,
730 struct mlx4_priv
*priv
= mlx4_priv(dev
);
731 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
735 spin_lock_irq(mlx4_tlock(dev
));
736 r
= radix_tree_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
739 else if (r
->com
.owner
!= slave
)
744 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%x\n",
745 __func__
, r
->com
.res_id
);
749 case RES_QP_RESERVED
:
750 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
753 mlx4_dbg(dev
, "failed RES_QP, 0x%x\n", r
->com
.res_id
);
758 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
759 r
->com
.state
== RES_QP_HW
)
762 mlx4_dbg(dev
, "failed RES_QP, 0x%x\n",
770 if (r
->com
.state
!= RES_QP_MAPPED
)
778 r
->com
.from_state
= r
->com
.state
;
779 r
->com
.to_state
= state
;
780 r
->com
.state
= RES_QP_BUSY
;
786 spin_unlock_irq(mlx4_tlock(dev
));
791 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
792 enum res_mpt_states state
, struct res_mpt
**mpt
)
794 struct mlx4_priv
*priv
= mlx4_priv(dev
);
795 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
799 spin_lock_irq(mlx4_tlock(dev
));
800 r
= radix_tree_lookup(&tracker
->res_tree
[RES_MPT
], index
);
803 else if (r
->com
.owner
!= slave
)
811 case RES_MPT_RESERVED
:
812 if (r
->com
.state
!= RES_MPT_MAPPED
)
817 if (r
->com
.state
!= RES_MPT_RESERVED
&&
818 r
->com
.state
!= RES_MPT_HW
)
823 if (r
->com
.state
!= RES_MPT_MAPPED
)
831 r
->com
.from_state
= r
->com
.state
;
832 r
->com
.to_state
= state
;
833 r
->com
.state
= RES_MPT_BUSY
;
839 spin_unlock_irq(mlx4_tlock(dev
));
844 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
845 enum res_eq_states state
, struct res_eq
**eq
)
847 struct mlx4_priv
*priv
= mlx4_priv(dev
);
848 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
852 spin_lock_irq(mlx4_tlock(dev
));
853 r
= radix_tree_lookup(&tracker
->res_tree
[RES_EQ
], index
);
856 else if (r
->com
.owner
!= slave
)
864 case RES_EQ_RESERVED
:
865 if (r
->com
.state
!= RES_EQ_HW
)
870 if (r
->com
.state
!= RES_EQ_RESERVED
)
879 r
->com
.from_state
= r
->com
.state
;
880 r
->com
.to_state
= state
;
881 r
->com
.state
= RES_EQ_BUSY
;
887 spin_unlock_irq(mlx4_tlock(dev
));
892 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
893 enum res_cq_states state
, struct res_cq
**cq
)
895 struct mlx4_priv
*priv
= mlx4_priv(dev
);
896 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
900 spin_lock_irq(mlx4_tlock(dev
));
901 r
= radix_tree_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
904 else if (r
->com
.owner
!= slave
)
912 case RES_CQ_ALLOCATED
:
913 if (r
->com
.state
!= RES_CQ_HW
)
915 else if (atomic_read(&r
->ref_count
))
922 if (r
->com
.state
!= RES_CQ_ALLOCATED
)
933 r
->com
.from_state
= r
->com
.state
;
934 r
->com
.to_state
= state
;
935 r
->com
.state
= RES_CQ_BUSY
;
941 spin_unlock_irq(mlx4_tlock(dev
));
946 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
947 enum res_cq_states state
, struct res_srq
**srq
)
949 struct mlx4_priv
*priv
= mlx4_priv(dev
);
950 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
954 spin_lock_irq(mlx4_tlock(dev
));
955 r
= radix_tree_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
958 else if (r
->com
.owner
!= slave
)
966 case RES_SRQ_ALLOCATED
:
967 if (r
->com
.state
!= RES_SRQ_HW
)
969 else if (atomic_read(&r
->ref_count
))
974 if (r
->com
.state
!= RES_SRQ_ALLOCATED
)
983 r
->com
.from_state
= r
->com
.state
;
984 r
->com
.to_state
= state
;
985 r
->com
.state
= RES_SRQ_BUSY
;
991 spin_unlock_irq(mlx4_tlock(dev
));
996 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
997 enum mlx4_resource type
, int id
)
999 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1000 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1001 struct res_common
*r
;
1003 spin_lock_irq(mlx4_tlock(dev
));
1004 r
= radix_tree_lookup(&tracker
->res_tree
[type
], id
);
1005 if (r
&& (r
->owner
== slave
))
1006 r
->state
= r
->from_state
;
1007 spin_unlock_irq(mlx4_tlock(dev
));
1010 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1011 enum mlx4_resource type
, int id
)
1013 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1014 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1015 struct res_common
*r
;
1017 spin_lock_irq(mlx4_tlock(dev
));
1018 r
= radix_tree_lookup(&tracker
->res_tree
[type
], id
);
1019 if (r
&& (r
->owner
== slave
))
1020 r
->state
= r
->to_state
;
1021 spin_unlock_irq(mlx4_tlock(dev
));
1024 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1026 return mlx4_is_qp_reserved(dev
, qpn
);
1029 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1030 u64 in_param
, u64
*out_param
)
1039 case RES_OP_RESERVE
:
1040 count
= get_param_l(&in_param
);
1041 align
= get_param_h(&in_param
);
1042 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
);
1046 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1048 __mlx4_qp_release_range(dev
, base
, count
);
1051 set_param_l(out_param
, base
);
1053 case RES_OP_MAP_ICM
:
1054 qpn
= get_param_l(&in_param
) & 0x7fffff;
1055 if (valid_reserved(dev
, slave
, qpn
)) {
1056 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1061 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1066 if (!valid_reserved(dev
, slave
, qpn
)) {
1067 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1069 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1074 res_end_move(dev
, slave
, RES_QP
, qpn
);
1084 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1085 u64 in_param
, u64
*out_param
)
1091 if (op
!= RES_OP_RESERVE_AND_MAP
)
1094 order
= get_param_l(&in_param
);
1095 base
= __mlx4_alloc_mtt_range(dev
, order
);
1099 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1101 __mlx4_free_mtt_range(dev
, base
, order
);
1103 set_param_l(out_param
, base
);
1108 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1109 u64 in_param
, u64
*out_param
)
1114 struct res_mpt
*mpt
;
1117 case RES_OP_RESERVE
:
1118 index
= __mlx4_mr_reserve(dev
);
1121 id
= index
& mpt_mask(dev
);
1123 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1125 __mlx4_mr_release(dev
, index
);
1128 set_param_l(out_param
, index
);
1130 case RES_OP_MAP_ICM
:
1131 index
= get_param_l(&in_param
);
1132 id
= index
& mpt_mask(dev
);
1133 err
= mr_res_start_move_to(dev
, slave
, id
,
1134 RES_MPT_MAPPED
, &mpt
);
1138 err
= __mlx4_mr_alloc_icm(dev
, mpt
->key
);
1140 res_abort_move(dev
, slave
, RES_MPT
, id
);
1144 res_end_move(dev
, slave
, RES_MPT
, id
);
1150 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1151 u64 in_param
, u64
*out_param
)
1157 case RES_OP_RESERVE_AND_MAP
:
1158 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1162 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1164 __mlx4_cq_free_icm(dev
, cqn
);
1168 set_param_l(out_param
, cqn
);
1178 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1179 u64 in_param
, u64
*out_param
)
1185 case RES_OP_RESERVE_AND_MAP
:
1186 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1190 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1192 __mlx4_srq_free_icm(dev
, srqn
);
1196 set_param_l(out_param
, srqn
);
1206 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
)
1208 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1209 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1210 struct mac_res
*res
;
1212 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1216 res
->port
= (u8
) port
;
1217 list_add_tail(&res
->list
,
1218 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1222 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1225 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1226 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1227 struct list_head
*mac_list
=
1228 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1229 struct mac_res
*res
, *tmp
;
1231 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1232 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1233 list_del(&res
->list
);
1240 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1242 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1243 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1244 struct list_head
*mac_list
=
1245 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1246 struct mac_res
*res
, *tmp
;
1248 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1249 list_del(&res
->list
);
1250 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1255 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1256 u64 in_param
, u64
*out_param
)
1262 if (op
!= RES_OP_RESERVE_AND_MAP
)
1265 port
= get_param_l(out_param
);
1268 err
= __mlx4_register_mac(dev
, port
, mac
);
1270 set_param_l(out_param
, err
);
1275 err
= mac_add_to_slave(dev
, slave
, mac
, port
);
1277 __mlx4_unregister_mac(dev
, port
, mac
);
1282 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1283 u64 in_param
, u64
*out_param
)
1288 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1289 u64 in_param
, u64
*out_param
)
1294 if (op
!= RES_OP_RESERVE
)
1297 err
= __mlx4_counter_alloc(dev
, &index
);
1301 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1303 __mlx4_counter_free(dev
, index
);
1305 set_param_l(out_param
, index
);
1310 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1311 u64 in_param
, u64
*out_param
)
1316 if (op
!= RES_OP_RESERVE
)
1319 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
1323 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1325 __mlx4_xrcd_free(dev
, xrcdn
);
1327 set_param_l(out_param
, xrcdn
);
1332 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1333 struct mlx4_vhcr
*vhcr
,
1334 struct mlx4_cmd_mailbox
*inbox
,
1335 struct mlx4_cmd_mailbox
*outbox
,
1336 struct mlx4_cmd_info
*cmd
)
1339 int alop
= vhcr
->op_modifier
;
1341 switch (vhcr
->in_modifier
) {
1343 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1344 vhcr
->in_param
, &vhcr
->out_param
);
1348 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1349 vhcr
->in_param
, &vhcr
->out_param
);
1353 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1354 vhcr
->in_param
, &vhcr
->out_param
);
1358 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1359 vhcr
->in_param
, &vhcr
->out_param
);
1363 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1364 vhcr
->in_param
, &vhcr
->out_param
);
1368 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1369 vhcr
->in_param
, &vhcr
->out_param
);
1373 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1374 vhcr
->in_param
, &vhcr
->out_param
);
1378 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1379 vhcr
->in_param
, &vhcr
->out_param
);
1383 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1384 vhcr
->in_param
, &vhcr
->out_param
);
1395 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1404 case RES_OP_RESERVE
:
1405 base
= get_param_l(&in_param
) & 0x7fffff;
1406 count
= get_param_h(&in_param
);
1407 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1410 __mlx4_qp_release_range(dev
, base
, count
);
1412 case RES_OP_MAP_ICM
:
1413 qpn
= get_param_l(&in_param
) & 0x7fffff;
1414 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
1419 if (!valid_reserved(dev
, slave
, qpn
))
1420 __mlx4_qp_free_icm(dev
, qpn
);
1422 res_end_move(dev
, slave
, RES_QP
, qpn
);
1424 if (valid_reserved(dev
, slave
, qpn
))
1425 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1434 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1435 u64 in_param
, u64
*out_param
)
1441 if (op
!= RES_OP_RESERVE_AND_MAP
)
1444 base
= get_param_l(&in_param
);
1445 order
= get_param_h(&in_param
);
1446 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1448 __mlx4_free_mtt_range(dev
, base
, order
);
1452 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1458 struct res_mpt
*mpt
;
1461 case RES_OP_RESERVE
:
1462 index
= get_param_l(&in_param
);
1463 id
= index
& mpt_mask(dev
);
1464 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
1468 put_res(dev
, slave
, id
, RES_MPT
);
1470 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
1473 __mlx4_mr_release(dev
, index
);
1475 case RES_OP_MAP_ICM
:
1476 index
= get_param_l(&in_param
);
1477 id
= index
& mpt_mask(dev
);
1478 err
= mr_res_start_move_to(dev
, slave
, id
,
1479 RES_MPT_RESERVED
, &mpt
);
1483 __mlx4_mr_free_icm(dev
, mpt
->key
);
1484 res_end_move(dev
, slave
, RES_MPT
, id
);
1494 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1495 u64 in_param
, u64
*out_param
)
1501 case RES_OP_RESERVE_AND_MAP
:
1502 cqn
= get_param_l(&in_param
);
1503 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1507 __mlx4_cq_free_icm(dev
, cqn
);
1518 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1519 u64 in_param
, u64
*out_param
)
1525 case RES_OP_RESERVE_AND_MAP
:
1526 srqn
= get_param_l(&in_param
);
1527 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1531 __mlx4_srq_free_icm(dev
, srqn
);
1542 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1543 u64 in_param
, u64
*out_param
)
1549 case RES_OP_RESERVE_AND_MAP
:
1550 port
= get_param_l(out_param
);
1551 mac_del_from_slave(dev
, slave
, in_param
, port
);
1552 __mlx4_unregister_mac(dev
, port
, in_param
);
1563 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1564 u64 in_param
, u64
*out_param
)
1569 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1570 u64 in_param
, u64
*out_param
)
1575 if (op
!= RES_OP_RESERVE
)
1578 index
= get_param_l(&in_param
);
1579 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1583 __mlx4_counter_free(dev
, index
);
1588 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1589 u64 in_param
, u64
*out_param
)
1594 if (op
!= RES_OP_RESERVE
)
1597 xrcdn
= get_param_l(&in_param
);
1598 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1602 __mlx4_xrcd_free(dev
, xrcdn
);
1607 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1608 struct mlx4_vhcr
*vhcr
,
1609 struct mlx4_cmd_mailbox
*inbox
,
1610 struct mlx4_cmd_mailbox
*outbox
,
1611 struct mlx4_cmd_info
*cmd
)
1614 int alop
= vhcr
->op_modifier
;
1616 switch (vhcr
->in_modifier
) {
1618 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1623 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1624 vhcr
->in_param
, &vhcr
->out_param
);
1628 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1633 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1634 vhcr
->in_param
, &vhcr
->out_param
);
1638 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1639 vhcr
->in_param
, &vhcr
->out_param
);
1643 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1644 vhcr
->in_param
, &vhcr
->out_param
);
1648 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1649 vhcr
->in_param
, &vhcr
->out_param
);
1653 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1654 vhcr
->in_param
, &vhcr
->out_param
);
1658 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1659 vhcr
->in_param
, &vhcr
->out_param
);
1667 /* ugly but other choices are uglier */
1668 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
1670 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
1673 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
1675 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
1678 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
1680 return be32_to_cpu(mpt
->mtt_sz
);
1683 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
1685 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
1688 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
1690 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
1693 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
1695 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
1696 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
1697 int log_sq_sride
= qpc
->sq_size_stride
& 7;
1698 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
1699 int log_rq_stride
= qpc
->rq_size_stride
& 7;
1700 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
1701 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
1702 int xrc
= (be32_to_cpu(qpc
->local_qpn
) >> 23) & 1;
1707 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
1709 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
1710 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
1711 total_mem
= sq_size
+ rq_size
;
1713 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
1719 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
1720 int size
, struct res_mtt
*mtt
)
1722 int res_start
= mtt
->com
.res_id
;
1723 int res_size
= (1 << mtt
->order
);
1725 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
1730 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1731 struct mlx4_vhcr
*vhcr
,
1732 struct mlx4_cmd_mailbox
*inbox
,
1733 struct mlx4_cmd_mailbox
*outbox
,
1734 struct mlx4_cmd_info
*cmd
)
1737 int index
= vhcr
->in_modifier
;
1738 struct res_mtt
*mtt
;
1739 struct res_mpt
*mpt
;
1740 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
1744 id
= index
& mpt_mask(dev
);
1745 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
1749 phys
= mr_phys_mpt(inbox
->buf
);
1751 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
1755 err
= check_mtt_range(dev
, slave
, mtt_base
,
1756 mr_get_mtt_size(inbox
->buf
), mtt
);
1763 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1768 atomic_inc(&mtt
->ref_count
);
1769 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
1772 res_end_move(dev
, slave
, RES_MPT
, id
);
1777 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
1779 res_abort_move(dev
, slave
, RES_MPT
, id
);
1784 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1785 struct mlx4_vhcr
*vhcr
,
1786 struct mlx4_cmd_mailbox
*inbox
,
1787 struct mlx4_cmd_mailbox
*outbox
,
1788 struct mlx4_cmd_info
*cmd
)
1791 int index
= vhcr
->in_modifier
;
1792 struct res_mpt
*mpt
;
1795 id
= index
& mpt_mask(dev
);
1796 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
1800 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1805 atomic_dec(&mpt
->mtt
->ref_count
);
1807 res_end_move(dev
, slave
, RES_MPT
, id
);
1811 res_abort_move(dev
, slave
, RES_MPT
, id
);
1816 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1817 struct mlx4_vhcr
*vhcr
,
1818 struct mlx4_cmd_mailbox
*inbox
,
1819 struct mlx4_cmd_mailbox
*outbox
,
1820 struct mlx4_cmd_info
*cmd
)
1823 int index
= vhcr
->in_modifier
;
1824 struct res_mpt
*mpt
;
1827 id
= index
& mpt_mask(dev
);
1828 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
1832 if (mpt
->com
.from_state
!= RES_MPT_HW
) {
1837 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1840 put_res(dev
, slave
, id
, RES_MPT
);
1844 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
1846 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
1849 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
1851 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
1854 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
1856 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
1859 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
1860 struct mlx4_vhcr
*vhcr
,
1861 struct mlx4_cmd_mailbox
*inbox
,
1862 struct mlx4_cmd_mailbox
*outbox
,
1863 struct mlx4_cmd_info
*cmd
)
1866 int qpn
= vhcr
->in_modifier
& 0x7fffff;
1867 struct res_mtt
*mtt
;
1869 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
1870 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
1871 int mtt_size
= qp_get_mtt_size(qpc
);
1874 int rcqn
= qp_get_rcqn(qpc
);
1875 int scqn
= qp_get_scqn(qpc
);
1876 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
1877 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
1878 struct res_srq
*srq
;
1879 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
1881 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
1884 qp
->local_qpn
= local_qpn
;
1886 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
1890 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
1894 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
1899 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
1906 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
1911 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1914 atomic_inc(&mtt
->ref_count
);
1916 atomic_inc(&rcq
->ref_count
);
1918 atomic_inc(&scq
->ref_count
);
1922 put_res(dev
, slave
, scqn
, RES_CQ
);
1925 atomic_inc(&srq
->ref_count
);
1926 put_res(dev
, slave
, srqn
, RES_SRQ
);
1929 put_res(dev
, slave
, rcqn
, RES_CQ
);
1930 put_res(dev
, slave
, mtt_base
, RES_MTT
);
1931 res_end_move(dev
, slave
, RES_QP
, qpn
);
1937 put_res(dev
, slave
, srqn
, RES_SRQ
);
1940 put_res(dev
, slave
, scqn
, RES_CQ
);
1942 put_res(dev
, slave
, rcqn
, RES_CQ
);
1944 put_res(dev
, slave
, mtt_base
, RES_MTT
);
1946 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1951 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
1953 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
1956 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
1958 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
1959 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
1961 if (log_eq_size
+ 5 < page_shift
)
1964 return 1 << (log_eq_size
+ 5 - page_shift
);
1967 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
1969 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
1972 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
1974 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
1975 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
1977 if (log_cq_size
+ 5 < page_shift
)
1980 return 1 << (log_cq_size
+ 5 - page_shift
);
1983 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
1984 struct mlx4_vhcr
*vhcr
,
1985 struct mlx4_cmd_mailbox
*inbox
,
1986 struct mlx4_cmd_mailbox
*outbox
,
1987 struct mlx4_cmd_info
*cmd
)
1990 int eqn
= vhcr
->in_modifier
;
1991 int res_id
= (slave
<< 8) | eqn
;
1992 struct mlx4_eq_context
*eqc
= inbox
->buf
;
1993 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
1994 int mtt_size
= eq_get_mtt_size(eqc
);
1996 struct res_mtt
*mtt
;
1998 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2001 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2005 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2009 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2013 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2017 atomic_inc(&mtt
->ref_count
);
2019 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2020 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2024 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2026 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2028 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2032 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2033 int len
, struct res_mtt
**res
)
2035 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2036 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2037 struct res_mtt
*mtt
;
2040 spin_lock_irq(mlx4_tlock(dev
));
2041 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2043 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2045 mtt
->com
.from_state
= mtt
->com
.state
;
2046 mtt
->com
.state
= RES_MTT_BUSY
;
2051 spin_unlock_irq(mlx4_tlock(dev
));
2056 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
2057 struct mlx4_vhcr
*vhcr
,
2058 struct mlx4_cmd_mailbox
*inbox
,
2059 struct mlx4_cmd_mailbox
*outbox
,
2060 struct mlx4_cmd_info
*cmd
)
2062 struct mlx4_mtt mtt
;
2063 __be64
*page_list
= inbox
->buf
;
2064 u64
*pg_list
= (u64
*)page_list
;
2066 struct res_mtt
*rmtt
= NULL
;
2067 int start
= be64_to_cpu(page_list
[0]);
2068 int npages
= vhcr
->in_modifier
;
2071 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
2075 /* Call the SW implementation of write_mtt:
2076 * - Prepare a dummy mtt struct
2077 * - Translate inbox contents to simple addresses in host endianess */
2078 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
2079 we don't really use it */
2082 for (i
= 0; i
< npages
; ++i
)
2083 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
2085 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
2086 ((u64
*)page_list
+ 2));
2089 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
2094 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2095 struct mlx4_vhcr
*vhcr
,
2096 struct mlx4_cmd_mailbox
*inbox
,
2097 struct mlx4_cmd_mailbox
*outbox
,
2098 struct mlx4_cmd_info
*cmd
)
2100 int eqn
= vhcr
->in_modifier
;
2101 int res_id
= eqn
| (slave
<< 8);
2105 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
2109 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
2113 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2117 atomic_dec(&eq
->mtt
->ref_count
);
2118 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2119 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2120 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2125 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2127 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2132 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
2134 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2135 struct mlx4_slave_event_eq_info
*event_eq
;
2136 struct mlx4_cmd_mailbox
*mailbox
;
2137 u32 in_modifier
= 0;
2142 if (!priv
->mfunc
.master
.slave_state
)
2145 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
2147 /* Create the event only if the slave is registered */
2148 if (event_eq
->eqn
< 0)
2151 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2152 res_id
= (slave
<< 8) | event_eq
->eqn
;
2153 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
2157 if (req
->com
.from_state
!= RES_EQ_HW
) {
2162 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2163 if (IS_ERR(mailbox
)) {
2164 err
= PTR_ERR(mailbox
);
2168 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
2170 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
2173 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
2175 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
2177 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
2178 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
2181 put_res(dev
, slave
, res_id
, RES_EQ
);
2182 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2183 mlx4_free_cmd_mailbox(dev
, mailbox
);
2187 put_res(dev
, slave
, res_id
, RES_EQ
);
2190 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2194 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2195 struct mlx4_vhcr
*vhcr
,
2196 struct mlx4_cmd_mailbox
*inbox
,
2197 struct mlx4_cmd_mailbox
*outbox
,
2198 struct mlx4_cmd_info
*cmd
)
2200 int eqn
= vhcr
->in_modifier
;
2201 int res_id
= eqn
| (slave
<< 8);
2205 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
2209 if (eq
->com
.from_state
!= RES_EQ_HW
) {
2214 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2217 put_res(dev
, slave
, res_id
, RES_EQ
);
2221 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2222 struct mlx4_vhcr
*vhcr
,
2223 struct mlx4_cmd_mailbox
*inbox
,
2224 struct mlx4_cmd_mailbox
*outbox
,
2225 struct mlx4_cmd_info
*cmd
)
2228 int cqn
= vhcr
->in_modifier
;
2229 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2230 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2232 struct res_mtt
*mtt
;
2234 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
2237 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2240 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2243 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2246 atomic_inc(&mtt
->ref_count
);
2248 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2249 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2253 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2255 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2259 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2260 struct mlx4_vhcr
*vhcr
,
2261 struct mlx4_cmd_mailbox
*inbox
,
2262 struct mlx4_cmd_mailbox
*outbox
,
2263 struct mlx4_cmd_info
*cmd
)
2266 int cqn
= vhcr
->in_modifier
;
2269 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
2272 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2275 atomic_dec(&cq
->mtt
->ref_count
);
2276 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2280 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2284 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2285 struct mlx4_vhcr
*vhcr
,
2286 struct mlx4_cmd_mailbox
*inbox
,
2287 struct mlx4_cmd_mailbox
*outbox
,
2288 struct mlx4_cmd_info
*cmd
)
2290 int cqn
= vhcr
->in_modifier
;
2294 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2298 if (cq
->com
.from_state
!= RES_CQ_HW
)
2301 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2303 put_res(dev
, slave
, cqn
, RES_CQ
);
2308 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
2309 struct mlx4_vhcr
*vhcr
,
2310 struct mlx4_cmd_mailbox
*inbox
,
2311 struct mlx4_cmd_mailbox
*outbox
,
2312 struct mlx4_cmd_info
*cmd
,
2316 struct res_mtt
*orig_mtt
;
2317 struct res_mtt
*mtt
;
2318 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2319 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2321 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
2325 if (orig_mtt
!= cq
->mtt
) {
2330 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2334 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2337 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2340 atomic_dec(&orig_mtt
->ref_count
);
2341 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2342 atomic_inc(&mtt
->ref_count
);
2344 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2348 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2350 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2356 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2357 struct mlx4_vhcr
*vhcr
,
2358 struct mlx4_cmd_mailbox
*inbox
,
2359 struct mlx4_cmd_mailbox
*outbox
,
2360 struct mlx4_cmd_info
*cmd
)
2362 int cqn
= vhcr
->in_modifier
;
2366 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2370 if (cq
->com
.from_state
!= RES_CQ_HW
)
2373 if (vhcr
->op_modifier
== 0) {
2374 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
2378 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2380 put_res(dev
, slave
, cqn
, RES_CQ
);
2385 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
2387 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
2388 int log_rq_stride
= srqc
->logstride
& 7;
2389 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
2391 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
2394 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
2397 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2398 struct mlx4_vhcr
*vhcr
,
2399 struct mlx4_cmd_mailbox
*inbox
,
2400 struct mlx4_cmd_mailbox
*outbox
,
2401 struct mlx4_cmd_info
*cmd
)
2404 int srqn
= vhcr
->in_modifier
;
2405 struct res_mtt
*mtt
;
2406 struct res_srq
*srq
;
2407 struct mlx4_srq_context
*srqc
= inbox
->buf
;
2408 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
2410 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
2413 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
2416 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2419 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
2424 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2428 atomic_inc(&mtt
->ref_count
);
2430 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2431 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2435 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2437 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2442 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2443 struct mlx4_vhcr
*vhcr
,
2444 struct mlx4_cmd_mailbox
*inbox
,
2445 struct mlx4_cmd_mailbox
*outbox
,
2446 struct mlx4_cmd_info
*cmd
)
2449 int srqn
= vhcr
->in_modifier
;
2450 struct res_srq
*srq
;
2452 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
2455 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2458 atomic_dec(&srq
->mtt
->ref_count
);
2460 atomic_dec(&srq
->cq
->ref_count
);
2461 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2466 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2471 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2472 struct mlx4_vhcr
*vhcr
,
2473 struct mlx4_cmd_mailbox
*inbox
,
2474 struct mlx4_cmd_mailbox
*outbox
,
2475 struct mlx4_cmd_info
*cmd
)
2478 int srqn
= vhcr
->in_modifier
;
2479 struct res_srq
*srq
;
2481 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2484 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2488 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2490 put_res(dev
, slave
, srqn
, RES_SRQ
);
2494 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2495 struct mlx4_vhcr
*vhcr
,
2496 struct mlx4_cmd_mailbox
*inbox
,
2497 struct mlx4_cmd_mailbox
*outbox
,
2498 struct mlx4_cmd_info
*cmd
)
2501 int srqn
= vhcr
->in_modifier
;
2502 struct res_srq
*srq
;
2504 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2508 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2513 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2515 put_res(dev
, slave
, srqn
, RES_SRQ
);
2519 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2520 struct mlx4_vhcr
*vhcr
,
2521 struct mlx4_cmd_mailbox
*inbox
,
2522 struct mlx4_cmd_mailbox
*outbox
,
2523 struct mlx4_cmd_info
*cmd
)
2526 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2529 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
2532 if (qp
->com
.from_state
!= RES_QP_HW
) {
2537 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2539 put_res(dev
, slave
, qpn
, RES_QP
);
2543 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2544 struct mlx4_vhcr
*vhcr
,
2545 struct mlx4_cmd_mailbox
*inbox
,
2546 struct mlx4_cmd_mailbox
*outbox
,
2547 struct mlx4_cmd_info
*cmd
)
2549 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2551 update_ud_gid(dev
, qpc
, (u8
)slave
);
2553 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2556 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2557 struct mlx4_vhcr
*vhcr
,
2558 struct mlx4_cmd_mailbox
*inbox
,
2559 struct mlx4_cmd_mailbox
*outbox
,
2560 struct mlx4_cmd_info
*cmd
)
2563 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2566 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
2569 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2573 atomic_dec(&qp
->mtt
->ref_count
);
2574 atomic_dec(&qp
->rcq
->ref_count
);
2575 atomic_dec(&qp
->scq
->ref_count
);
2577 atomic_dec(&qp
->srq
->ref_count
);
2578 res_end_move(dev
, slave
, RES_QP
, qpn
);
2582 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2587 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
2588 struct res_qp
*rqp
, u8
*gid
)
2590 struct res_gid
*res
;
2592 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
2593 if (!memcmp(res
->gid
, gid
, 16))
2599 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
2600 u8
*gid
, enum mlx4_protocol prot
,
2601 enum mlx4_steer_type steer
)
2603 struct res_gid
*res
;
2606 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
2610 spin_lock_irq(&rqp
->mcg_spl
);
2611 if (find_gid(dev
, slave
, rqp
, gid
)) {
2615 memcpy(res
->gid
, gid
, 16);
2618 list_add_tail(&res
->list
, &rqp
->mcg_list
);
2621 spin_unlock_irq(&rqp
->mcg_spl
);
2626 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
2627 u8
*gid
, enum mlx4_protocol prot
,
2628 enum mlx4_steer_type steer
)
2630 struct res_gid
*res
;
2633 spin_lock_irq(&rqp
->mcg_spl
);
2634 res
= find_gid(dev
, slave
, rqp
, gid
);
2635 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
2638 list_del(&res
->list
);
2642 spin_unlock_irq(&rqp
->mcg_spl
);
2647 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
2648 struct mlx4_vhcr
*vhcr
,
2649 struct mlx4_cmd_mailbox
*inbox
,
2650 struct mlx4_cmd_mailbox
*outbox
,
2651 struct mlx4_cmd_info
*cmd
)
2653 struct mlx4_qp qp
; /* dummy for calling attach/detach */
2654 u8
*gid
= inbox
->buf
;
2655 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
2659 int attach
= vhcr
->op_modifier
;
2660 int block_loopback
= vhcr
->in_modifier
>> 31;
2661 u8 steer_type_mask
= 2;
2662 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
2664 qpn
= vhcr
->in_modifier
& 0xffffff;
2665 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
2671 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
2675 err
= mlx4_qp_attach_common(dev
, &qp
, gid
,
2676 block_loopback
, prot
, type
);
2680 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
2683 err
= mlx4_qp_detach_common(dev
, &qp
, gid
, prot
, type
);
2686 put_res(dev
, slave
, qpn
, RES_QP
);
2690 /* ignore error return below, already in error */
2691 (void) rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
2693 put_res(dev
, slave
, qpn
, RES_QP
);
2699 BUSY_MAX_RETRIES
= 10
2702 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
2703 struct mlx4_vhcr
*vhcr
,
2704 struct mlx4_cmd_mailbox
*inbox
,
2705 struct mlx4_cmd_mailbox
*outbox
,
2706 struct mlx4_cmd_info
*cmd
)
2709 int index
= vhcr
->in_modifier
& 0xffff;
2711 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
2715 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2716 put_res(dev
, slave
, index
, RES_COUNTER
);
2720 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
2722 struct res_gid
*rgid
;
2723 struct res_gid
*tmp
;
2724 struct mlx4_qp qp
; /* dummy for calling attach/detach */
2726 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
2727 qp
.qpn
= rqp
->local_qpn
;
2728 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
, rgid
->prot
,
2730 list_del(&rgid
->list
);
2735 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
2736 enum mlx4_resource type
, int print
)
2738 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2739 struct mlx4_resource_tracker
*tracker
=
2740 &priv
->mfunc
.master
.res_tracker
;
2741 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
2742 struct res_common
*r
;
2743 struct res_common
*tmp
;
2747 spin_lock_irq(mlx4_tlock(dev
));
2748 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
2749 if (r
->owner
== slave
) {
2751 if (r
->state
== RES_ANY_BUSY
) {
2754 "%s id 0x%x is busy\n",
2759 r
->from_state
= r
->state
;
2760 r
->state
= RES_ANY_BUSY
;
2766 spin_unlock_irq(mlx4_tlock(dev
));
2771 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
2772 enum mlx4_resource type
)
2774 unsigned long begin
;
2779 busy
= _move_all_busy(dev
, slave
, type
, 0);
2780 if (time_after(jiffies
, begin
+ 5 * HZ
))
2787 busy
= _move_all_busy(dev
, slave
, type
, 1);
2791 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
2793 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2794 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2795 struct list_head
*qp_list
=
2796 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
2804 err
= move_all_busy(dev
, slave
, RES_QP
);
2806 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy"
2807 "for slave %d\n", slave
);
2809 spin_lock_irq(mlx4_tlock(dev
));
2810 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
2811 spin_unlock_irq(mlx4_tlock(dev
));
2812 if (qp
->com
.owner
== slave
) {
2813 qpn
= qp
->com
.res_id
;
2814 detach_qp(dev
, slave
, qp
);
2815 state
= qp
->com
.from_state
;
2816 while (state
!= 0) {
2818 case RES_QP_RESERVED
:
2819 spin_lock_irq(mlx4_tlock(dev
));
2820 radix_tree_delete(&tracker
->res_tree
[RES_QP
],
2822 list_del(&qp
->com
.list
);
2823 spin_unlock_irq(mlx4_tlock(dev
));
2828 if (!valid_reserved(dev
, slave
, qpn
))
2829 __mlx4_qp_free_icm(dev
, qpn
);
2830 state
= RES_QP_RESERVED
;
2834 err
= mlx4_cmd(dev
, in_param
,
2837 MLX4_CMD_TIME_CLASS_A
,
2840 mlx4_dbg(dev
, "rem_slave_qps: failed"
2841 " to move slave %d qpn %d to"
2844 atomic_dec(&qp
->rcq
->ref_count
);
2845 atomic_dec(&qp
->scq
->ref_count
);
2846 atomic_dec(&qp
->mtt
->ref_count
);
2848 atomic_dec(&qp
->srq
->ref_count
);
2849 state
= RES_QP_MAPPED
;
2856 spin_lock_irq(mlx4_tlock(dev
));
2858 spin_unlock_irq(mlx4_tlock(dev
));
2861 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
2863 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2864 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2865 struct list_head
*srq_list
=
2866 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
2867 struct res_srq
*srq
;
2868 struct res_srq
*tmp
;
2875 err
= move_all_busy(dev
, slave
, RES_SRQ
);
2877 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs to "
2878 "busy for slave %d\n", slave
);
2880 spin_lock_irq(mlx4_tlock(dev
));
2881 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
2882 spin_unlock_irq(mlx4_tlock(dev
));
2883 if (srq
->com
.owner
== slave
) {
2884 srqn
= srq
->com
.res_id
;
2885 state
= srq
->com
.from_state
;
2886 while (state
!= 0) {
2888 case RES_SRQ_ALLOCATED
:
2889 __mlx4_srq_free_icm(dev
, srqn
);
2890 spin_lock_irq(mlx4_tlock(dev
));
2891 radix_tree_delete(&tracker
->res_tree
[RES_SRQ
],
2893 list_del(&srq
->com
.list
);
2894 spin_unlock_irq(mlx4_tlock(dev
));
2901 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
2903 MLX4_CMD_TIME_CLASS_A
,
2906 mlx4_dbg(dev
, "rem_slave_srqs: failed"
2907 " to move slave %d srq %d to"
2911 atomic_dec(&srq
->mtt
->ref_count
);
2913 atomic_dec(&srq
->cq
->ref_count
);
2914 state
= RES_SRQ_ALLOCATED
;
2922 spin_lock_irq(mlx4_tlock(dev
));
2924 spin_unlock_irq(mlx4_tlock(dev
));
2927 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
2929 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2930 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2931 struct list_head
*cq_list
=
2932 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
2941 err
= move_all_busy(dev
, slave
, RES_CQ
);
2943 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs to "
2944 "busy for slave %d\n", slave
);
2946 spin_lock_irq(mlx4_tlock(dev
));
2947 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
2948 spin_unlock_irq(mlx4_tlock(dev
));
2949 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
2950 cqn
= cq
->com
.res_id
;
2951 state
= cq
->com
.from_state
;
2952 while (state
!= 0) {
2954 case RES_CQ_ALLOCATED
:
2955 __mlx4_cq_free_icm(dev
, cqn
);
2956 spin_lock_irq(mlx4_tlock(dev
));
2957 radix_tree_delete(&tracker
->res_tree
[RES_CQ
],
2959 list_del(&cq
->com
.list
);
2960 spin_unlock_irq(mlx4_tlock(dev
));
2967 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
2969 MLX4_CMD_TIME_CLASS_A
,
2972 mlx4_dbg(dev
, "rem_slave_cqs: failed"
2973 " to move slave %d cq %d to"
2976 atomic_dec(&cq
->mtt
->ref_count
);
2977 state
= RES_CQ_ALLOCATED
;
2985 spin_lock_irq(mlx4_tlock(dev
));
2987 spin_unlock_irq(mlx4_tlock(dev
));
2990 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
2992 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2993 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2994 struct list_head
*mpt_list
=
2995 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
2996 struct res_mpt
*mpt
;
2997 struct res_mpt
*tmp
;
3004 err
= move_all_busy(dev
, slave
, RES_MPT
);
3006 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts to "
3007 "busy for slave %d\n", slave
);
3009 spin_lock_irq(mlx4_tlock(dev
));
3010 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
3011 spin_unlock_irq(mlx4_tlock(dev
));
3012 if (mpt
->com
.owner
== slave
) {
3013 mptn
= mpt
->com
.res_id
;
3014 state
= mpt
->com
.from_state
;
3015 while (state
!= 0) {
3017 case RES_MPT_RESERVED
:
3018 __mlx4_mr_release(dev
, mpt
->key
);
3019 spin_lock_irq(mlx4_tlock(dev
));
3020 radix_tree_delete(&tracker
->res_tree
[RES_MPT
],
3022 list_del(&mpt
->com
.list
);
3023 spin_unlock_irq(mlx4_tlock(dev
));
3028 case RES_MPT_MAPPED
:
3029 __mlx4_mr_free_icm(dev
, mpt
->key
);
3030 state
= RES_MPT_RESERVED
;
3035 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
3037 MLX4_CMD_TIME_CLASS_A
,
3040 mlx4_dbg(dev
, "rem_slave_mrs: failed"
3041 " to move slave %d mpt %d to"
3045 atomic_dec(&mpt
->mtt
->ref_count
);
3046 state
= RES_MPT_MAPPED
;
3053 spin_lock_irq(mlx4_tlock(dev
));
3055 spin_unlock_irq(mlx4_tlock(dev
));
3058 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
3060 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3061 struct mlx4_resource_tracker
*tracker
=
3062 &priv
->mfunc
.master
.res_tracker
;
3063 struct list_head
*mtt_list
=
3064 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
3065 struct res_mtt
*mtt
;
3066 struct res_mtt
*tmp
;
3072 err
= move_all_busy(dev
, slave
, RES_MTT
);
3074 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts to "
3075 "busy for slave %d\n", slave
);
3077 spin_lock_irq(mlx4_tlock(dev
));
3078 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
3079 spin_unlock_irq(mlx4_tlock(dev
));
3080 if (mtt
->com
.owner
== slave
) {
3081 base
= mtt
->com
.res_id
;
3082 state
= mtt
->com
.from_state
;
3083 while (state
!= 0) {
3085 case RES_MTT_ALLOCATED
:
3086 __mlx4_free_mtt_range(dev
, base
,
3088 spin_lock_irq(mlx4_tlock(dev
));
3089 radix_tree_delete(&tracker
->res_tree
[RES_MTT
],
3091 list_del(&mtt
->com
.list
);
3092 spin_unlock_irq(mlx4_tlock(dev
));
3102 spin_lock_irq(mlx4_tlock(dev
));
3104 spin_unlock_irq(mlx4_tlock(dev
));
3107 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
3109 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3110 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3111 struct list_head
*eq_list
=
3112 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
3119 struct mlx4_cmd_mailbox
*mailbox
;
3121 err
= move_all_busy(dev
, slave
, RES_EQ
);
3123 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs to "
3124 "busy for slave %d\n", slave
);
3126 spin_lock_irq(mlx4_tlock(dev
));
3127 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
3128 spin_unlock_irq(mlx4_tlock(dev
));
3129 if (eq
->com
.owner
== slave
) {
3130 eqn
= eq
->com
.res_id
;
3131 state
= eq
->com
.from_state
;
3132 while (state
!= 0) {
3134 case RES_EQ_RESERVED
:
3135 spin_lock_irq(mlx4_tlock(dev
));
3136 radix_tree_delete(&tracker
->res_tree
[RES_EQ
],
3138 list_del(&eq
->com
.list
);
3139 spin_unlock_irq(mlx4_tlock(dev
));
3145 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3146 if (IS_ERR(mailbox
)) {
3150 err
= mlx4_cmd_box(dev
, slave
, 0,
3153 MLX4_CMD_TIME_CLASS_A
,
3156 mlx4_dbg(dev
, "rem_slave_eqs: failed"
3157 " to move slave %d eqs %d to"
3158 " SW ownership\n", slave
, eqn
);
3159 mlx4_free_cmd_mailbox(dev
, mailbox
);
3160 atomic_dec(&eq
->mtt
->ref_count
);
3161 state
= RES_EQ_RESERVED
;
3169 spin_lock_irq(mlx4_tlock(dev
));
3171 spin_unlock_irq(mlx4_tlock(dev
));
3174 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
3176 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3177 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3178 struct list_head
*counter_list
=
3179 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
3180 struct res_counter
*counter
;
3181 struct res_counter
*tmp
;
3185 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
3187 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters to "
3188 "busy for slave %d\n", slave
);
3190 spin_lock_irq(mlx4_tlock(dev
));
3191 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
3192 if (counter
->com
.owner
== slave
) {
3193 index
= counter
->com
.res_id
;
3194 radix_tree_delete(&tracker
->res_tree
[RES_COUNTER
], index
);
3195 list_del(&counter
->com
.list
);
3197 __mlx4_counter_free(dev
, index
);
3200 spin_unlock_irq(mlx4_tlock(dev
));
3203 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
3205 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3206 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3207 struct list_head
*xrcdn_list
=
3208 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
3209 struct res_xrcdn
*xrcd
;
3210 struct res_xrcdn
*tmp
;
3214 err
= move_all_busy(dev
, slave
, RES_XRCD
);
3216 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns to "
3217 "busy for slave %d\n", slave
);
3219 spin_lock_irq(mlx4_tlock(dev
));
3220 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
3221 if (xrcd
->com
.owner
== slave
) {
3222 xrcdn
= xrcd
->com
.res_id
;
3223 radix_tree_delete(&tracker
->res_tree
[RES_XRCD
], xrcdn
);
3224 list_del(&xrcd
->com
.list
);
3226 __mlx4_xrcd_free(dev
, xrcdn
);
3229 spin_unlock_irq(mlx4_tlock(dev
));
3232 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
3234 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3236 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
3238 rem_slave_macs(dev
, slave
);
3239 rem_slave_qps(dev
, slave
);
3240 rem_slave_srqs(dev
, slave
);
3241 rem_slave_cqs(dev
, slave
);
3242 rem_slave_mrs(dev
, slave
);
3243 rem_slave_eqs(dev
, slave
);
3244 rem_slave_mtts(dev
, slave
);
3245 rem_slave_counters(dev
, slave
);
3246 rem_slave_xrcdns(dev
, slave
);
3247 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);