2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list
;
59 struct list_head list
;
74 struct list_head list
;
76 enum mlx4_protocol prot
;
77 enum mlx4_steer_type steer
;
81 RES_QP_BUSY
= RES_ANY_BUSY
,
83 /* QP number was allocated */
86 /* ICM memory for QP context was mapped */
89 /* QP is in hw ownership */
94 struct res_common com
;
99 struct list_head mcg_list
;
105 enum res_mtt_states
{
106 RES_MTT_BUSY
= RES_ANY_BUSY
,
110 static inline const char *mtt_states_str(enum res_mtt_states state
)
113 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
114 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
115 default: return "Unknown";
120 struct res_common com
;
125 enum res_mpt_states
{
126 RES_MPT_BUSY
= RES_ANY_BUSY
,
133 struct res_common com
;
139 RES_EQ_BUSY
= RES_ANY_BUSY
,
145 struct res_common com
;
150 RES_CQ_BUSY
= RES_ANY_BUSY
,
156 struct res_common com
;
161 enum res_srq_states
{
162 RES_SRQ_BUSY
= RES_ANY_BUSY
,
168 struct res_common com
;
174 enum res_counter_states
{
175 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
176 RES_COUNTER_ALLOCATED
,
180 struct res_common com
;
184 enum res_xrcdn_states
{
185 RES_XRCD_BUSY
= RES_ANY_BUSY
,
190 struct res_common com
;
194 enum res_fs_rule_states
{
195 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
196 RES_FS_RULE_ALLOCATED
,
200 struct res_common com
;
204 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
206 struct rb_node
*node
= root
->rb_node
;
209 struct res_common
*res
= container_of(node
, struct res_common
,
212 if (res_id
< res
->res_id
)
213 node
= node
->rb_left
;
214 else if (res_id
> res
->res_id
)
215 node
= node
->rb_right
;
222 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
224 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
226 /* Figure out where to put new node */
228 struct res_common
*this = container_of(*new, struct res_common
,
232 if (res
->res_id
< this->res_id
)
233 new = &((*new)->rb_left
);
234 else if (res
->res_id
> this->res_id
)
235 new = &((*new)->rb_right
);
240 /* Add new node and rebalance tree. */
241 rb_link_node(&res
->node
, parent
, new);
242 rb_insert_color(&res
->node
, root
);
257 static const char *ResourceType(enum mlx4_resource rt
)
260 case RES_QP
: return "RES_QP";
261 case RES_CQ
: return "RES_CQ";
262 case RES_SRQ
: return "RES_SRQ";
263 case RES_MPT
: return "RES_MPT";
264 case RES_MTT
: return "RES_MTT";
265 case RES_MAC
: return "RES_MAC";
266 case RES_EQ
: return "RES_EQ";
267 case RES_COUNTER
: return "RES_COUNTER";
268 case RES_FS_RULE
: return "RES_FS_RULE";
269 case RES_XRCD
: return "RES_XRCD";
270 default: return "Unknown resource type !!!";
274 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
276 struct mlx4_priv
*priv
= mlx4_priv(dev
);
280 priv
->mfunc
.master
.res_tracker
.slave_list
=
281 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
283 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
286 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
287 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
288 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
289 slave_list
[i
].res_list
[t
]);
290 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
293 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
295 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
296 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
298 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
302 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
303 enum mlx4_res_tracker_free_type type
)
305 struct mlx4_priv
*priv
= mlx4_priv(dev
);
308 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
309 if (type
!= RES_TR_FREE_STRUCTS_ONLY
)
310 for (i
= 0 ; i
< dev
->num_slaves
; i
++)
311 if (type
== RES_TR_FREE_ALL
||
312 dev
->caps
.function
!= i
)
313 mlx4_delete_all_resources_for_slave(dev
, i
);
315 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
316 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
317 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
322 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
323 struct mlx4_cmd_mailbox
*inbox
)
325 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
326 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
328 struct mlx4_priv
*priv
= mlx4_priv(dev
);
331 port
= (sched
>> 6 & 1) + 1;
333 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
334 *(u8
*)(inbox
->buf
+ 35) = new_index
;
337 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
340 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
341 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
342 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
344 if (MLX4_QP_ST_UD
== ts
)
345 qp_ctx
->pri_path
.mgid_index
= 0x80 | slave
;
347 if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_UC
== ts
) {
348 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
349 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
350 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
351 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
355 static int mpt_mask(struct mlx4_dev
*dev
)
357 return dev
->caps
.num_mpts
- 1;
360 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
361 enum mlx4_resource type
)
363 struct mlx4_priv
*priv
= mlx4_priv(dev
);
365 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
369 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
370 enum mlx4_resource type
,
373 struct res_common
*r
;
376 spin_lock_irq(mlx4_tlock(dev
));
377 r
= find_res(dev
, res_id
, type
);
383 if (r
->state
== RES_ANY_BUSY
) {
388 if (r
->owner
!= slave
) {
393 r
->from_state
= r
->state
;
394 r
->state
= RES_ANY_BUSY
;
397 *((struct res_common
**)res
) = r
;
400 spin_unlock_irq(mlx4_tlock(dev
));
404 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
405 enum mlx4_resource type
,
406 u64 res_id
, int *slave
)
409 struct res_common
*r
;
415 spin_lock(mlx4_tlock(dev
));
417 r
= find_res(dev
, id
, type
);
422 spin_unlock(mlx4_tlock(dev
));
427 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
428 enum mlx4_resource type
)
430 struct res_common
*r
;
432 spin_lock_irq(mlx4_tlock(dev
));
433 r
= find_res(dev
, res_id
, type
);
435 r
->state
= r
->from_state
;
436 spin_unlock_irq(mlx4_tlock(dev
));
439 static struct res_common
*alloc_qp_tr(int id
)
443 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
447 ret
->com
.res_id
= id
;
448 ret
->com
.state
= RES_QP_RESERVED
;
450 INIT_LIST_HEAD(&ret
->mcg_list
);
451 spin_lock_init(&ret
->mcg_spl
);
452 atomic_set(&ret
->ref_count
, 0);
457 static struct res_common
*alloc_mtt_tr(int id
, int order
)
461 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
465 ret
->com
.res_id
= id
;
467 ret
->com
.state
= RES_MTT_ALLOCATED
;
468 atomic_set(&ret
->ref_count
, 0);
473 static struct res_common
*alloc_mpt_tr(int id
, int key
)
477 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
481 ret
->com
.res_id
= id
;
482 ret
->com
.state
= RES_MPT_RESERVED
;
488 static struct res_common
*alloc_eq_tr(int id
)
492 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
496 ret
->com
.res_id
= id
;
497 ret
->com
.state
= RES_EQ_RESERVED
;
502 static struct res_common
*alloc_cq_tr(int id
)
506 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
510 ret
->com
.res_id
= id
;
511 ret
->com
.state
= RES_CQ_ALLOCATED
;
512 atomic_set(&ret
->ref_count
, 0);
517 static struct res_common
*alloc_srq_tr(int id
)
521 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
525 ret
->com
.res_id
= id
;
526 ret
->com
.state
= RES_SRQ_ALLOCATED
;
527 atomic_set(&ret
->ref_count
, 0);
532 static struct res_common
*alloc_counter_tr(int id
)
534 struct res_counter
*ret
;
536 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
540 ret
->com
.res_id
= id
;
541 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
546 static struct res_common
*alloc_xrcdn_tr(int id
)
548 struct res_xrcdn
*ret
;
550 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
554 ret
->com
.res_id
= id
;
555 ret
->com
.state
= RES_XRCD_ALLOCATED
;
560 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
562 struct res_fs_rule
*ret
;
564 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
568 ret
->com
.res_id
= id
;
569 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
574 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
577 struct res_common
*ret
;
581 ret
= alloc_qp_tr(id
);
584 ret
= alloc_mpt_tr(id
, extra
);
587 ret
= alloc_mtt_tr(id
, extra
);
590 ret
= alloc_eq_tr(id
);
593 ret
= alloc_cq_tr(id
);
596 ret
= alloc_srq_tr(id
);
599 printk(KERN_ERR
"implementation missing\n");
602 ret
= alloc_counter_tr(id
);
605 ret
= alloc_xrcdn_tr(id
);
608 ret
= alloc_fs_rule_tr(id
, extra
);
619 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
620 enum mlx4_resource type
, int extra
)
624 struct mlx4_priv
*priv
= mlx4_priv(dev
);
625 struct res_common
**res_arr
;
626 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
627 struct rb_root
*root
= &tracker
->res_tree
[type
];
629 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
633 for (i
= 0; i
< count
; ++i
) {
634 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
636 for (--i
; i
>= 0; --i
)
644 spin_lock_irq(mlx4_tlock(dev
));
645 for (i
= 0; i
< count
; ++i
) {
646 if (find_res(dev
, base
+ i
, type
)) {
650 err
= res_tracker_insert(root
, res_arr
[i
]);
653 list_add_tail(&res_arr
[i
]->list
,
654 &tracker
->slave_list
[slave
].res_list
[type
]);
656 spin_unlock_irq(mlx4_tlock(dev
));
662 for (--i
; i
>= base
; --i
)
663 rb_erase(&res_arr
[i
]->node
, root
);
665 spin_unlock_irq(mlx4_tlock(dev
));
667 for (i
= 0; i
< count
; ++i
)
675 static int remove_qp_ok(struct res_qp
*res
)
677 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
678 !list_empty(&res
->mcg_list
)) {
679 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
680 res
->com
.state
, atomic_read(&res
->ref_count
));
682 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
689 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
691 if (res
->com
.state
== RES_MTT_BUSY
||
692 atomic_read(&res
->ref_count
)) {
693 printk(KERN_DEBUG
"%s-%d: state %s, ref_count %d\n",
695 mtt_states_str(res
->com
.state
),
696 atomic_read(&res
->ref_count
));
698 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
700 else if (res
->order
!= order
)
706 static int remove_mpt_ok(struct res_mpt
*res
)
708 if (res
->com
.state
== RES_MPT_BUSY
)
710 else if (res
->com
.state
!= RES_MPT_RESERVED
)
716 static int remove_eq_ok(struct res_eq
*res
)
718 if (res
->com
.state
== RES_MPT_BUSY
)
720 else if (res
->com
.state
!= RES_MPT_RESERVED
)
726 static int remove_counter_ok(struct res_counter
*res
)
728 if (res
->com
.state
== RES_COUNTER_BUSY
)
730 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
736 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
738 if (res
->com
.state
== RES_XRCD_BUSY
)
740 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
746 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
748 if (res
->com
.state
== RES_FS_RULE_BUSY
)
750 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
756 static int remove_cq_ok(struct res_cq
*res
)
758 if (res
->com
.state
== RES_CQ_BUSY
)
760 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
766 static int remove_srq_ok(struct res_srq
*res
)
768 if (res
->com
.state
== RES_SRQ_BUSY
)
770 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
776 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
780 return remove_qp_ok((struct res_qp
*)res
);
782 return remove_cq_ok((struct res_cq
*)res
);
784 return remove_srq_ok((struct res_srq
*)res
);
786 return remove_mpt_ok((struct res_mpt
*)res
);
788 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
792 return remove_eq_ok((struct res_eq
*)res
);
794 return remove_counter_ok((struct res_counter
*)res
);
796 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
798 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
804 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
805 enum mlx4_resource type
, int extra
)
809 struct mlx4_priv
*priv
= mlx4_priv(dev
);
810 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
811 struct res_common
*r
;
813 spin_lock_irq(mlx4_tlock(dev
));
814 for (i
= base
; i
< base
+ count
; ++i
) {
815 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
820 if (r
->owner
!= slave
) {
824 err
= remove_ok(r
, type
, extra
);
829 for (i
= base
; i
< base
+ count
; ++i
) {
830 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
831 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
838 spin_unlock_irq(mlx4_tlock(dev
));
843 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
844 enum res_qp_states state
, struct res_qp
**qp
,
847 struct mlx4_priv
*priv
= mlx4_priv(dev
);
848 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
852 spin_lock_irq(mlx4_tlock(dev
));
853 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
856 else if (r
->com
.owner
!= slave
)
861 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
862 __func__
, r
->com
.res_id
);
866 case RES_QP_RESERVED
:
867 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
870 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
875 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
876 r
->com
.state
== RES_QP_HW
)
879 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
887 if (r
->com
.state
!= RES_QP_MAPPED
)
895 r
->com
.from_state
= r
->com
.state
;
896 r
->com
.to_state
= state
;
897 r
->com
.state
= RES_QP_BUSY
;
903 spin_unlock_irq(mlx4_tlock(dev
));
908 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
909 enum res_mpt_states state
, struct res_mpt
**mpt
)
911 struct mlx4_priv
*priv
= mlx4_priv(dev
);
912 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
916 spin_lock_irq(mlx4_tlock(dev
));
917 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
920 else if (r
->com
.owner
!= slave
)
928 case RES_MPT_RESERVED
:
929 if (r
->com
.state
!= RES_MPT_MAPPED
)
934 if (r
->com
.state
!= RES_MPT_RESERVED
&&
935 r
->com
.state
!= RES_MPT_HW
)
940 if (r
->com
.state
!= RES_MPT_MAPPED
)
948 r
->com
.from_state
= r
->com
.state
;
949 r
->com
.to_state
= state
;
950 r
->com
.state
= RES_MPT_BUSY
;
956 spin_unlock_irq(mlx4_tlock(dev
));
961 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
962 enum res_eq_states state
, struct res_eq
**eq
)
964 struct mlx4_priv
*priv
= mlx4_priv(dev
);
965 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
969 spin_lock_irq(mlx4_tlock(dev
));
970 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
973 else if (r
->com
.owner
!= slave
)
981 case RES_EQ_RESERVED
:
982 if (r
->com
.state
!= RES_EQ_HW
)
987 if (r
->com
.state
!= RES_EQ_RESERVED
)
996 r
->com
.from_state
= r
->com
.state
;
997 r
->com
.to_state
= state
;
998 r
->com
.state
= RES_EQ_BUSY
;
1004 spin_unlock_irq(mlx4_tlock(dev
));
1009 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1010 enum res_cq_states state
, struct res_cq
**cq
)
1012 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1013 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1017 spin_lock_irq(mlx4_tlock(dev
));
1018 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1021 else if (r
->com
.owner
!= slave
)
1029 case RES_CQ_ALLOCATED
:
1030 if (r
->com
.state
!= RES_CQ_HW
)
1032 else if (atomic_read(&r
->ref_count
))
1039 if (r
->com
.state
!= RES_CQ_ALLOCATED
)
1050 r
->com
.from_state
= r
->com
.state
;
1051 r
->com
.to_state
= state
;
1052 r
->com
.state
= RES_CQ_BUSY
;
1058 spin_unlock_irq(mlx4_tlock(dev
));
1063 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1064 enum res_cq_states state
, struct res_srq
**srq
)
1066 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1067 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1071 spin_lock_irq(mlx4_tlock(dev
));
1072 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1075 else if (r
->com
.owner
!= slave
)
1083 case RES_SRQ_ALLOCATED
:
1084 if (r
->com
.state
!= RES_SRQ_HW
)
1086 else if (atomic_read(&r
->ref_count
))
1091 if (r
->com
.state
!= RES_SRQ_ALLOCATED
)
1100 r
->com
.from_state
= r
->com
.state
;
1101 r
->com
.to_state
= state
;
1102 r
->com
.state
= RES_SRQ_BUSY
;
1108 spin_unlock_irq(mlx4_tlock(dev
));
1113 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1114 enum mlx4_resource type
, int id
)
1116 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1117 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1118 struct res_common
*r
;
1120 spin_lock_irq(mlx4_tlock(dev
));
1121 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1122 if (r
&& (r
->owner
== slave
))
1123 r
->state
= r
->from_state
;
1124 spin_unlock_irq(mlx4_tlock(dev
));
1127 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1128 enum mlx4_resource type
, int id
)
1130 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1131 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1132 struct res_common
*r
;
1134 spin_lock_irq(mlx4_tlock(dev
));
1135 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1136 if (r
&& (r
->owner
== slave
))
1137 r
->state
= r
->to_state
;
1138 spin_unlock_irq(mlx4_tlock(dev
));
1141 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1143 return mlx4_is_qp_reserved(dev
, qpn
) &&
1144 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1147 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1149 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1152 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1153 u64 in_param
, u64
*out_param
)
1162 case RES_OP_RESERVE
:
1163 count
= get_param_l(&in_param
);
1164 align
= get_param_h(&in_param
);
1165 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
);
1169 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1171 __mlx4_qp_release_range(dev
, base
, count
);
1174 set_param_l(out_param
, base
);
1176 case RES_OP_MAP_ICM
:
1177 qpn
= get_param_l(&in_param
) & 0x7fffff;
1178 if (valid_reserved(dev
, slave
, qpn
)) {
1179 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1184 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1189 if (!fw_reserved(dev
, qpn
)) {
1190 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1192 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1197 res_end_move(dev
, slave
, RES_QP
, qpn
);
1207 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1208 u64 in_param
, u64
*out_param
)
1214 if (op
!= RES_OP_RESERVE_AND_MAP
)
1217 order
= get_param_l(&in_param
);
1218 base
= __mlx4_alloc_mtt_range(dev
, order
);
1222 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1224 __mlx4_free_mtt_range(dev
, base
, order
);
1226 set_param_l(out_param
, base
);
1231 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1232 u64 in_param
, u64
*out_param
)
1237 struct res_mpt
*mpt
;
1240 case RES_OP_RESERVE
:
1241 index
= __mlx4_mpt_reserve(dev
);
1244 id
= index
& mpt_mask(dev
);
1246 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1248 __mlx4_mpt_release(dev
, index
);
1251 set_param_l(out_param
, index
);
1253 case RES_OP_MAP_ICM
:
1254 index
= get_param_l(&in_param
);
1255 id
= index
& mpt_mask(dev
);
1256 err
= mr_res_start_move_to(dev
, slave
, id
,
1257 RES_MPT_MAPPED
, &mpt
);
1261 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
);
1263 res_abort_move(dev
, slave
, RES_MPT
, id
);
1267 res_end_move(dev
, slave
, RES_MPT
, id
);
1273 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1274 u64 in_param
, u64
*out_param
)
1280 case RES_OP_RESERVE_AND_MAP
:
1281 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1285 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1287 __mlx4_cq_free_icm(dev
, cqn
);
1291 set_param_l(out_param
, cqn
);
1301 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1302 u64 in_param
, u64
*out_param
)
1308 case RES_OP_RESERVE_AND_MAP
:
1309 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1313 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1315 __mlx4_srq_free_icm(dev
, srqn
);
1319 set_param_l(out_param
, srqn
);
1329 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
)
1331 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1332 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1333 struct mac_res
*res
;
1335 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1339 res
->port
= (u8
) port
;
1340 list_add_tail(&res
->list
,
1341 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1345 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1348 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1349 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1350 struct list_head
*mac_list
=
1351 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1352 struct mac_res
*res
, *tmp
;
1354 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1355 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1356 list_del(&res
->list
);
1363 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1365 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1366 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1367 struct list_head
*mac_list
=
1368 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1369 struct mac_res
*res
, *tmp
;
1371 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1372 list_del(&res
->list
);
1373 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1378 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1379 u64 in_param
, u64
*out_param
)
1385 if (op
!= RES_OP_RESERVE_AND_MAP
)
1388 port
= get_param_l(out_param
);
1391 err
= __mlx4_register_mac(dev
, port
, mac
);
1393 set_param_l(out_param
, err
);
1398 err
= mac_add_to_slave(dev
, slave
, mac
, port
);
1400 __mlx4_unregister_mac(dev
, port
, mac
);
1405 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1406 u64 in_param
, u64
*out_param
)
1411 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1412 u64 in_param
, u64
*out_param
)
1417 if (op
!= RES_OP_RESERVE
)
1420 err
= __mlx4_counter_alloc(dev
, &index
);
1424 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1426 __mlx4_counter_free(dev
, index
);
1428 set_param_l(out_param
, index
);
1433 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1434 u64 in_param
, u64
*out_param
)
1439 if (op
!= RES_OP_RESERVE
)
1442 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
1446 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1448 __mlx4_xrcd_free(dev
, xrcdn
);
1450 set_param_l(out_param
, xrcdn
);
1455 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1456 struct mlx4_vhcr
*vhcr
,
1457 struct mlx4_cmd_mailbox
*inbox
,
1458 struct mlx4_cmd_mailbox
*outbox
,
1459 struct mlx4_cmd_info
*cmd
)
1462 int alop
= vhcr
->op_modifier
;
1464 switch (vhcr
->in_modifier
) {
1466 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1467 vhcr
->in_param
, &vhcr
->out_param
);
1471 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1472 vhcr
->in_param
, &vhcr
->out_param
);
1476 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1477 vhcr
->in_param
, &vhcr
->out_param
);
1481 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1482 vhcr
->in_param
, &vhcr
->out_param
);
1486 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1487 vhcr
->in_param
, &vhcr
->out_param
);
1491 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1492 vhcr
->in_param
, &vhcr
->out_param
);
1496 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1497 vhcr
->in_param
, &vhcr
->out_param
);
1501 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1502 vhcr
->in_param
, &vhcr
->out_param
);
1506 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1507 vhcr
->in_param
, &vhcr
->out_param
);
1518 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1527 case RES_OP_RESERVE
:
1528 base
= get_param_l(&in_param
) & 0x7fffff;
1529 count
= get_param_h(&in_param
);
1530 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1533 __mlx4_qp_release_range(dev
, base
, count
);
1535 case RES_OP_MAP_ICM
:
1536 qpn
= get_param_l(&in_param
) & 0x7fffff;
1537 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
1542 if (!fw_reserved(dev
, qpn
))
1543 __mlx4_qp_free_icm(dev
, qpn
);
1545 res_end_move(dev
, slave
, RES_QP
, qpn
);
1547 if (valid_reserved(dev
, slave
, qpn
))
1548 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1557 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1558 u64 in_param
, u64
*out_param
)
1564 if (op
!= RES_OP_RESERVE_AND_MAP
)
1567 base
= get_param_l(&in_param
);
1568 order
= get_param_h(&in_param
);
1569 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1571 __mlx4_free_mtt_range(dev
, base
, order
);
1575 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1581 struct res_mpt
*mpt
;
1584 case RES_OP_RESERVE
:
1585 index
= get_param_l(&in_param
);
1586 id
= index
& mpt_mask(dev
);
1587 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
1591 put_res(dev
, slave
, id
, RES_MPT
);
1593 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
1596 __mlx4_mpt_release(dev
, index
);
1598 case RES_OP_MAP_ICM
:
1599 index
= get_param_l(&in_param
);
1600 id
= index
& mpt_mask(dev
);
1601 err
= mr_res_start_move_to(dev
, slave
, id
,
1602 RES_MPT_RESERVED
, &mpt
);
1606 __mlx4_mpt_free_icm(dev
, mpt
->key
);
1607 res_end_move(dev
, slave
, RES_MPT
, id
);
1617 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1618 u64 in_param
, u64
*out_param
)
1624 case RES_OP_RESERVE_AND_MAP
:
1625 cqn
= get_param_l(&in_param
);
1626 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1630 __mlx4_cq_free_icm(dev
, cqn
);
1641 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1642 u64 in_param
, u64
*out_param
)
1648 case RES_OP_RESERVE_AND_MAP
:
1649 srqn
= get_param_l(&in_param
);
1650 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1654 __mlx4_srq_free_icm(dev
, srqn
);
1665 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1666 u64 in_param
, u64
*out_param
)
1672 case RES_OP_RESERVE_AND_MAP
:
1673 port
= get_param_l(out_param
);
1674 mac_del_from_slave(dev
, slave
, in_param
, port
);
1675 __mlx4_unregister_mac(dev
, port
, in_param
);
1686 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1687 u64 in_param
, u64
*out_param
)
1692 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1693 u64 in_param
, u64
*out_param
)
1698 if (op
!= RES_OP_RESERVE
)
1701 index
= get_param_l(&in_param
);
1702 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1706 __mlx4_counter_free(dev
, index
);
1711 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1712 u64 in_param
, u64
*out_param
)
1717 if (op
!= RES_OP_RESERVE
)
1720 xrcdn
= get_param_l(&in_param
);
1721 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1725 __mlx4_xrcd_free(dev
, xrcdn
);
1730 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1731 struct mlx4_vhcr
*vhcr
,
1732 struct mlx4_cmd_mailbox
*inbox
,
1733 struct mlx4_cmd_mailbox
*outbox
,
1734 struct mlx4_cmd_info
*cmd
)
1737 int alop
= vhcr
->op_modifier
;
1739 switch (vhcr
->in_modifier
) {
1741 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1746 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1747 vhcr
->in_param
, &vhcr
->out_param
);
1751 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1756 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1757 vhcr
->in_param
, &vhcr
->out_param
);
1761 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1762 vhcr
->in_param
, &vhcr
->out_param
);
1766 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1767 vhcr
->in_param
, &vhcr
->out_param
);
1771 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1772 vhcr
->in_param
, &vhcr
->out_param
);
1776 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1777 vhcr
->in_param
, &vhcr
->out_param
);
1781 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1782 vhcr
->in_param
, &vhcr
->out_param
);
1790 /* ugly but other choices are uglier */
1791 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
1793 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
1796 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
1798 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
1801 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
1803 return be32_to_cpu(mpt
->mtt_sz
);
1806 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
1808 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
1811 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
1813 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
1816 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
1818 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
1821 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
1823 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
1826 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
1828 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
1831 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
1833 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
1836 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
1838 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
1839 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
1840 int log_sq_sride
= qpc
->sq_size_stride
& 7;
1841 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
1842 int log_rq_stride
= qpc
->rq_size_stride
& 7;
1843 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
1844 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
1845 int xrc
= (be32_to_cpu(qpc
->local_qpn
) >> 23) & 1;
1850 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
1852 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
1853 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
1854 total_mem
= sq_size
+ rq_size
;
1856 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
1862 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
1863 int size
, struct res_mtt
*mtt
)
1865 int res_start
= mtt
->com
.res_id
;
1866 int res_size
= (1 << mtt
->order
);
1868 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
1873 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1874 struct mlx4_vhcr
*vhcr
,
1875 struct mlx4_cmd_mailbox
*inbox
,
1876 struct mlx4_cmd_mailbox
*outbox
,
1877 struct mlx4_cmd_info
*cmd
)
1880 int index
= vhcr
->in_modifier
;
1881 struct res_mtt
*mtt
;
1882 struct res_mpt
*mpt
;
1883 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
1889 id
= index
& mpt_mask(dev
);
1890 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
1894 /* Disable memory windows for VFs. */
1895 if (!mr_is_region(inbox
->buf
)) {
1900 /* Make sure that the PD bits related to the slave id are zeros. */
1901 pd
= mr_get_pd(inbox
->buf
);
1902 pd_slave
= (pd
>> 17) & 0x7f;
1903 if (pd_slave
!= 0 && pd_slave
!= slave
) {
1908 if (mr_is_fmr(inbox
->buf
)) {
1909 /* FMR and Bind Enable are forbidden in slave devices. */
1910 if (mr_is_bind_enabled(inbox
->buf
)) {
1914 /* FMR and Memory Windows are also forbidden. */
1915 if (!mr_is_region(inbox
->buf
)) {
1921 phys
= mr_phys_mpt(inbox
->buf
);
1923 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
1927 err
= check_mtt_range(dev
, slave
, mtt_base
,
1928 mr_get_mtt_size(inbox
->buf
), mtt
);
1935 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1940 atomic_inc(&mtt
->ref_count
);
1941 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
1944 res_end_move(dev
, slave
, RES_MPT
, id
);
1949 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
1951 res_abort_move(dev
, slave
, RES_MPT
, id
);
1956 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1957 struct mlx4_vhcr
*vhcr
,
1958 struct mlx4_cmd_mailbox
*inbox
,
1959 struct mlx4_cmd_mailbox
*outbox
,
1960 struct mlx4_cmd_info
*cmd
)
1963 int index
= vhcr
->in_modifier
;
1964 struct res_mpt
*mpt
;
1967 id
= index
& mpt_mask(dev
);
1968 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
1972 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
1977 atomic_dec(&mpt
->mtt
->ref_count
);
1979 res_end_move(dev
, slave
, RES_MPT
, id
);
1983 res_abort_move(dev
, slave
, RES_MPT
, id
);
1988 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
1989 struct mlx4_vhcr
*vhcr
,
1990 struct mlx4_cmd_mailbox
*inbox
,
1991 struct mlx4_cmd_mailbox
*outbox
,
1992 struct mlx4_cmd_info
*cmd
)
1995 int index
= vhcr
->in_modifier
;
1996 struct res_mpt
*mpt
;
1999 id
= index
& mpt_mask(dev
);
2000 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2004 if (mpt
->com
.from_state
!= RES_MPT_HW
) {
2009 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2012 put_res(dev
, slave
, id
, RES_MPT
);
2016 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2018 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2021 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2023 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2026 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2028 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2031 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2032 struct mlx4_qp_context
*context
)
2034 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2037 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2040 /* adjust qkey in qp context */
2041 context
->qkey
= cpu_to_be32(qkey
);
2044 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2045 struct mlx4_vhcr
*vhcr
,
2046 struct mlx4_cmd_mailbox
*inbox
,
2047 struct mlx4_cmd_mailbox
*outbox
,
2048 struct mlx4_cmd_info
*cmd
)
2051 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2052 struct res_mtt
*mtt
;
2054 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2055 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2056 int mtt_size
= qp_get_mtt_size(qpc
);
2059 int rcqn
= qp_get_rcqn(qpc
);
2060 int scqn
= qp_get_scqn(qpc
);
2061 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2062 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2063 struct res_srq
*srq
;
2064 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2066 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2069 qp
->local_qpn
= local_qpn
;
2071 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2075 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2079 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2084 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2091 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2096 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2097 update_pkey_index(dev
, slave
, inbox
);
2098 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2101 atomic_inc(&mtt
->ref_count
);
2103 atomic_inc(&rcq
->ref_count
);
2105 atomic_inc(&scq
->ref_count
);
2109 put_res(dev
, slave
, scqn
, RES_CQ
);
2112 atomic_inc(&srq
->ref_count
);
2113 put_res(dev
, slave
, srqn
, RES_SRQ
);
2116 put_res(dev
, slave
, rcqn
, RES_CQ
);
2117 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2118 res_end_move(dev
, slave
, RES_QP
, qpn
);
2124 put_res(dev
, slave
, srqn
, RES_SRQ
);
2127 put_res(dev
, slave
, scqn
, RES_CQ
);
2129 put_res(dev
, slave
, rcqn
, RES_CQ
);
2131 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2133 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2138 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2140 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2143 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2145 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2146 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2148 if (log_eq_size
+ 5 < page_shift
)
2151 return 1 << (log_eq_size
+ 5 - page_shift
);
2154 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2156 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2159 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
2161 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
2162 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
2164 if (log_cq_size
+ 5 < page_shift
)
2167 return 1 << (log_cq_size
+ 5 - page_shift
);
2170 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2171 struct mlx4_vhcr
*vhcr
,
2172 struct mlx4_cmd_mailbox
*inbox
,
2173 struct mlx4_cmd_mailbox
*outbox
,
2174 struct mlx4_cmd_info
*cmd
)
2177 int eqn
= vhcr
->in_modifier
;
2178 int res_id
= (slave
<< 8) | eqn
;
2179 struct mlx4_eq_context
*eqc
= inbox
->buf
;
2180 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
2181 int mtt_size
= eq_get_mtt_size(eqc
);
2183 struct res_mtt
*mtt
;
2185 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2188 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2192 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2196 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2200 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2204 atomic_inc(&mtt
->ref_count
);
2206 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2207 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2211 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2213 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2215 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2219 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2220 int len
, struct res_mtt
**res
)
2222 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2223 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2224 struct res_mtt
*mtt
;
2227 spin_lock_irq(mlx4_tlock(dev
));
2228 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2230 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2232 mtt
->com
.from_state
= mtt
->com
.state
;
2233 mtt
->com
.state
= RES_MTT_BUSY
;
2238 spin_unlock_irq(mlx4_tlock(dev
));
2243 static int verify_qp_parameters(struct mlx4_dev
*dev
,
2244 struct mlx4_cmd_mailbox
*inbox
,
2245 enum qp_transition transition
, u8 slave
)
2248 struct mlx4_qp_context
*qp_ctx
;
2249 enum mlx4_qp_optpar optpar
;
2251 qp_ctx
= inbox
->buf
+ 8;
2252 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
2253 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
2258 switch (transition
) {
2259 case QP_TRANS_INIT2RTR
:
2260 case QP_TRANS_RTR2RTS
:
2261 case QP_TRANS_RTS2RTS
:
2262 case QP_TRANS_SQD2SQD
:
2263 case QP_TRANS_SQD2RTS
:
2264 if (slave
!= mlx4_master_func_num(dev
))
2265 /* slaves have only gid index 0 */
2266 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
2267 if (qp_ctx
->pri_path
.mgid_index
)
2269 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
2270 if (qp_ctx
->alt_path
.mgid_index
)
2285 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
2286 struct mlx4_vhcr
*vhcr
,
2287 struct mlx4_cmd_mailbox
*inbox
,
2288 struct mlx4_cmd_mailbox
*outbox
,
2289 struct mlx4_cmd_info
*cmd
)
2291 struct mlx4_mtt mtt
;
2292 __be64
*page_list
= inbox
->buf
;
2293 u64
*pg_list
= (u64
*)page_list
;
2295 struct res_mtt
*rmtt
= NULL
;
2296 int start
= be64_to_cpu(page_list
[0]);
2297 int npages
= vhcr
->in_modifier
;
2300 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
2304 /* Call the SW implementation of write_mtt:
2305 * - Prepare a dummy mtt struct
2306 * - Translate inbox contents to simple addresses in host endianess */
2307 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
2308 we don't really use it */
2311 for (i
= 0; i
< npages
; ++i
)
2312 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
2314 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
2315 ((u64
*)page_list
+ 2));
2318 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
2323 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2324 struct mlx4_vhcr
*vhcr
,
2325 struct mlx4_cmd_mailbox
*inbox
,
2326 struct mlx4_cmd_mailbox
*outbox
,
2327 struct mlx4_cmd_info
*cmd
)
2329 int eqn
= vhcr
->in_modifier
;
2330 int res_id
= eqn
| (slave
<< 8);
2334 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
2338 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
2342 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2346 atomic_dec(&eq
->mtt
->ref_count
);
2347 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2348 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2349 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2354 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2356 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2361 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
2363 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2364 struct mlx4_slave_event_eq_info
*event_eq
;
2365 struct mlx4_cmd_mailbox
*mailbox
;
2366 u32 in_modifier
= 0;
2371 if (!priv
->mfunc
.master
.slave_state
)
2374 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
2376 /* Create the event only if the slave is registered */
2377 if (event_eq
->eqn
< 0)
2380 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2381 res_id
= (slave
<< 8) | event_eq
->eqn
;
2382 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
2386 if (req
->com
.from_state
!= RES_EQ_HW
) {
2391 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2392 if (IS_ERR(mailbox
)) {
2393 err
= PTR_ERR(mailbox
);
2397 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
2399 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
2402 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
2404 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
2406 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
2407 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
2410 put_res(dev
, slave
, res_id
, RES_EQ
);
2411 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2412 mlx4_free_cmd_mailbox(dev
, mailbox
);
2416 put_res(dev
, slave
, res_id
, RES_EQ
);
2419 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2423 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2424 struct mlx4_vhcr
*vhcr
,
2425 struct mlx4_cmd_mailbox
*inbox
,
2426 struct mlx4_cmd_mailbox
*outbox
,
2427 struct mlx4_cmd_info
*cmd
)
2429 int eqn
= vhcr
->in_modifier
;
2430 int res_id
= eqn
| (slave
<< 8);
2434 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
2438 if (eq
->com
.from_state
!= RES_EQ_HW
) {
2443 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2446 put_res(dev
, slave
, res_id
, RES_EQ
);
2450 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2451 struct mlx4_vhcr
*vhcr
,
2452 struct mlx4_cmd_mailbox
*inbox
,
2453 struct mlx4_cmd_mailbox
*outbox
,
2454 struct mlx4_cmd_info
*cmd
)
2457 int cqn
= vhcr
->in_modifier
;
2458 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2459 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2461 struct res_mtt
*mtt
;
2463 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
2466 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2469 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2472 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2475 atomic_inc(&mtt
->ref_count
);
2477 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2478 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2482 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2484 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2488 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2489 struct mlx4_vhcr
*vhcr
,
2490 struct mlx4_cmd_mailbox
*inbox
,
2491 struct mlx4_cmd_mailbox
*outbox
,
2492 struct mlx4_cmd_info
*cmd
)
2495 int cqn
= vhcr
->in_modifier
;
2498 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
2501 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2504 atomic_dec(&cq
->mtt
->ref_count
);
2505 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2509 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2513 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2514 struct mlx4_vhcr
*vhcr
,
2515 struct mlx4_cmd_mailbox
*inbox
,
2516 struct mlx4_cmd_mailbox
*outbox
,
2517 struct mlx4_cmd_info
*cmd
)
2519 int cqn
= vhcr
->in_modifier
;
2523 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2527 if (cq
->com
.from_state
!= RES_CQ_HW
)
2530 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2532 put_res(dev
, slave
, cqn
, RES_CQ
);
2537 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
2538 struct mlx4_vhcr
*vhcr
,
2539 struct mlx4_cmd_mailbox
*inbox
,
2540 struct mlx4_cmd_mailbox
*outbox
,
2541 struct mlx4_cmd_info
*cmd
,
2545 struct res_mtt
*orig_mtt
;
2546 struct res_mtt
*mtt
;
2547 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2548 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2550 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
2554 if (orig_mtt
!= cq
->mtt
) {
2559 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2563 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2566 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2569 atomic_dec(&orig_mtt
->ref_count
);
2570 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2571 atomic_inc(&mtt
->ref_count
);
2573 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2577 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2579 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2585 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2586 struct mlx4_vhcr
*vhcr
,
2587 struct mlx4_cmd_mailbox
*inbox
,
2588 struct mlx4_cmd_mailbox
*outbox
,
2589 struct mlx4_cmd_info
*cmd
)
2591 int cqn
= vhcr
->in_modifier
;
2595 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2599 if (cq
->com
.from_state
!= RES_CQ_HW
)
2602 if (vhcr
->op_modifier
== 0) {
2603 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
2607 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2609 put_res(dev
, slave
, cqn
, RES_CQ
);
2614 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
2616 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
2617 int log_rq_stride
= srqc
->logstride
& 7;
2618 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
2620 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
2623 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
2626 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2627 struct mlx4_vhcr
*vhcr
,
2628 struct mlx4_cmd_mailbox
*inbox
,
2629 struct mlx4_cmd_mailbox
*outbox
,
2630 struct mlx4_cmd_info
*cmd
)
2633 int srqn
= vhcr
->in_modifier
;
2634 struct res_mtt
*mtt
;
2635 struct res_srq
*srq
;
2636 struct mlx4_srq_context
*srqc
= inbox
->buf
;
2637 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
2639 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
2642 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
2645 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2648 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
2653 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2657 atomic_inc(&mtt
->ref_count
);
2659 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2660 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2664 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2666 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2671 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2672 struct mlx4_vhcr
*vhcr
,
2673 struct mlx4_cmd_mailbox
*inbox
,
2674 struct mlx4_cmd_mailbox
*outbox
,
2675 struct mlx4_cmd_info
*cmd
)
2678 int srqn
= vhcr
->in_modifier
;
2679 struct res_srq
*srq
;
2681 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
2684 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2687 atomic_dec(&srq
->mtt
->ref_count
);
2689 atomic_dec(&srq
->cq
->ref_count
);
2690 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2695 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2700 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2701 struct mlx4_vhcr
*vhcr
,
2702 struct mlx4_cmd_mailbox
*inbox
,
2703 struct mlx4_cmd_mailbox
*outbox
,
2704 struct mlx4_cmd_info
*cmd
)
2707 int srqn
= vhcr
->in_modifier
;
2708 struct res_srq
*srq
;
2710 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2713 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2717 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2719 put_res(dev
, slave
, srqn
, RES_SRQ
);
2723 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2724 struct mlx4_vhcr
*vhcr
,
2725 struct mlx4_cmd_mailbox
*inbox
,
2726 struct mlx4_cmd_mailbox
*outbox
,
2727 struct mlx4_cmd_info
*cmd
)
2730 int srqn
= vhcr
->in_modifier
;
2731 struct res_srq
*srq
;
2733 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2737 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2742 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2744 put_res(dev
, slave
, srqn
, RES_SRQ
);
2748 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2749 struct mlx4_vhcr
*vhcr
,
2750 struct mlx4_cmd_mailbox
*inbox
,
2751 struct mlx4_cmd_mailbox
*outbox
,
2752 struct mlx4_cmd_info
*cmd
)
2755 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2758 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
2761 if (qp
->com
.from_state
!= RES_QP_HW
) {
2766 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2768 put_res(dev
, slave
, qpn
, RES_QP
);
2772 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2773 struct mlx4_vhcr
*vhcr
,
2774 struct mlx4_cmd_mailbox
*inbox
,
2775 struct mlx4_cmd_mailbox
*outbox
,
2776 struct mlx4_cmd_info
*cmd
)
2778 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2779 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2780 update_pkey_index(dev
, slave
, inbox
);
2781 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2784 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2785 struct mlx4_vhcr
*vhcr
,
2786 struct mlx4_cmd_mailbox
*inbox
,
2787 struct mlx4_cmd_mailbox
*outbox
,
2788 struct mlx4_cmd_info
*cmd
)
2791 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2793 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_INIT2RTR
, slave
);
2797 update_pkey_index(dev
, slave
, inbox
);
2798 update_gid(dev
, inbox
, (u8
)slave
);
2799 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2801 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2804 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2805 struct mlx4_vhcr
*vhcr
,
2806 struct mlx4_cmd_mailbox
*inbox
,
2807 struct mlx4_cmd_mailbox
*outbox
,
2808 struct mlx4_cmd_info
*cmd
)
2811 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2813 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTR2RTS
, slave
);
2817 update_pkey_index(dev
, slave
, inbox
);
2818 update_gid(dev
, inbox
, (u8
)slave
);
2819 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2820 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2823 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2824 struct mlx4_vhcr
*vhcr
,
2825 struct mlx4_cmd_mailbox
*inbox
,
2826 struct mlx4_cmd_mailbox
*outbox
,
2827 struct mlx4_cmd_info
*cmd
)
2830 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2832 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTS2RTS
, slave
);
2836 update_pkey_index(dev
, slave
, inbox
);
2837 update_gid(dev
, inbox
, (u8
)slave
);
2838 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2839 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2843 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2844 struct mlx4_vhcr
*vhcr
,
2845 struct mlx4_cmd_mailbox
*inbox
,
2846 struct mlx4_cmd_mailbox
*outbox
,
2847 struct mlx4_cmd_info
*cmd
)
2849 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2850 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2851 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2854 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2855 struct mlx4_vhcr
*vhcr
,
2856 struct mlx4_cmd_mailbox
*inbox
,
2857 struct mlx4_cmd_mailbox
*outbox
,
2858 struct mlx4_cmd_info
*cmd
)
2861 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2863 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2SQD
, slave
);
2867 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2868 update_gid(dev
, inbox
, (u8
)slave
);
2869 update_pkey_index(dev
, slave
, inbox
);
2870 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2873 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2874 struct mlx4_vhcr
*vhcr
,
2875 struct mlx4_cmd_mailbox
*inbox
,
2876 struct mlx4_cmd_mailbox
*outbox
,
2877 struct mlx4_cmd_info
*cmd
)
2880 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2882 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2RTS
, slave
);
2886 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2887 update_gid(dev
, inbox
, (u8
)slave
);
2888 update_pkey_index(dev
, slave
, inbox
);
2889 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2892 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2893 struct mlx4_vhcr
*vhcr
,
2894 struct mlx4_cmd_mailbox
*inbox
,
2895 struct mlx4_cmd_mailbox
*outbox
,
2896 struct mlx4_cmd_info
*cmd
)
2899 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2902 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
2905 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2909 atomic_dec(&qp
->mtt
->ref_count
);
2910 atomic_dec(&qp
->rcq
->ref_count
);
2911 atomic_dec(&qp
->scq
->ref_count
);
2913 atomic_dec(&qp
->srq
->ref_count
);
2914 res_end_move(dev
, slave
, RES_QP
, qpn
);
2918 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2923 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
2924 struct res_qp
*rqp
, u8
*gid
)
2926 struct res_gid
*res
;
2928 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
2929 if (!memcmp(res
->gid
, gid
, 16))
2935 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
2936 u8
*gid
, enum mlx4_protocol prot
,
2937 enum mlx4_steer_type steer
)
2939 struct res_gid
*res
;
2942 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
2946 spin_lock_irq(&rqp
->mcg_spl
);
2947 if (find_gid(dev
, slave
, rqp
, gid
)) {
2951 memcpy(res
->gid
, gid
, 16);
2954 list_add_tail(&res
->list
, &rqp
->mcg_list
);
2957 spin_unlock_irq(&rqp
->mcg_spl
);
2962 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
2963 u8
*gid
, enum mlx4_protocol prot
,
2964 enum mlx4_steer_type steer
)
2966 struct res_gid
*res
;
2969 spin_lock_irq(&rqp
->mcg_spl
);
2970 res
= find_gid(dev
, slave
, rqp
, gid
);
2971 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
2974 list_del(&res
->list
);
2978 spin_unlock_irq(&rqp
->mcg_spl
);
2983 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
2984 struct mlx4_vhcr
*vhcr
,
2985 struct mlx4_cmd_mailbox
*inbox
,
2986 struct mlx4_cmd_mailbox
*outbox
,
2987 struct mlx4_cmd_info
*cmd
)
2989 struct mlx4_qp qp
; /* dummy for calling attach/detach */
2990 u8
*gid
= inbox
->buf
;
2991 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
2995 int attach
= vhcr
->op_modifier
;
2996 int block_loopback
= vhcr
->in_modifier
>> 31;
2997 u8 steer_type_mask
= 2;
2998 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
3000 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_B0
)
3003 qpn
= vhcr
->in_modifier
& 0xffffff;
3004 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3010 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
3014 err
= mlx4_qp_attach_common(dev
, &qp
, gid
,
3015 block_loopback
, prot
, type
);
3019 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
3022 err
= mlx4_qp_detach_common(dev
, &qp
, gid
, prot
, type
);
3025 put_res(dev
, slave
, qpn
, RES_QP
);
3029 /* ignore error return below, already in error */
3030 (void) rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
);
3032 put_res(dev
, slave
, qpn
, RES_QP
);
3038 * MAC validation for Flow Steering rules.
3039 * VF can attach rules only with a mac address which is assigned to it.
3041 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
3042 struct list_head
*rlist
)
3044 struct mac_res
*res
, *tmp
;
3047 /* make sure it isn't multicast or broadcast mac*/
3048 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
3049 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
3050 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3051 be_mac
= cpu_to_be64(res
->mac
<< 16);
3052 if (!memcmp(&be_mac
, eth_header
->eth
.dst_mac
, ETH_ALEN
))
3055 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3056 eth_header
->eth
.dst_mac
, slave
);
3063 * In case of missing eth header, append eth header with a MAC address
3064 * assigned to the VF.
3066 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
3067 struct mlx4_cmd_mailbox
*inbox
,
3068 struct list_head
*rlist
, int header_id
)
3070 struct mac_res
*res
, *tmp
;
3072 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3073 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
3074 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
3075 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
3077 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
3079 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3081 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
3083 /* Clear a space in the inbox for eth header */
3084 switch (header_id
) {
3085 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3087 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
3088 memmove(ip_header
, eth_header
,
3089 sizeof(*ip_header
) + sizeof(*l4_header
));
3091 case MLX4_NET_TRANS_RULE_ID_TCP
:
3092 case MLX4_NET_TRANS_RULE_ID_UDP
:
3093 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
3095 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
3100 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3101 if (port
== res
->port
) {
3102 be_mac
= cpu_to_be64(res
->mac
<< 16);
3107 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3112 memset(eth_header
, 0, sizeof(*eth_header
));
3113 eth_header
->size
= sizeof(*eth_header
) >> 2;
3114 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
3115 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
3116 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
3122 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3123 struct mlx4_vhcr
*vhcr
,
3124 struct mlx4_cmd_mailbox
*inbox
,
3125 struct mlx4_cmd_mailbox
*outbox
,
3126 struct mlx4_cmd_info
*cmd
)
3129 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3130 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3131 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
3135 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3136 struct _rule_hw
*rule_header
;
3139 if (dev
->caps
.steering_mode
!=
3140 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3143 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3144 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
3145 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3147 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn
);
3150 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
3151 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
3153 switch (header_id
) {
3154 case MLX4_NET_TRANS_RULE_ID_ETH
:
3155 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
3160 case MLX4_NET_TRANS_RULE_ID_IB
:
3162 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3163 case MLX4_NET_TRANS_RULE_ID_TCP
:
3164 case MLX4_NET_TRANS_RULE_ID_UDP
:
3165 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3166 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
3170 vhcr
->in_modifier
+=
3171 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
3174 pr_err("Corrupted mailbox.\n");
3179 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
3180 vhcr
->in_modifier
, 0,
3181 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
3186 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
3188 mlx4_err(dev
, "Fail to add flow steering resources.\n ");
3190 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
3191 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3195 atomic_inc(&rqp
->ref_count
);
3197 put_res(dev
, slave
, qpn
, RES_QP
);
3201 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3202 struct mlx4_vhcr
*vhcr
,
3203 struct mlx4_cmd_mailbox
*inbox
,
3204 struct mlx4_cmd_mailbox
*outbox
,
3205 struct mlx4_cmd_info
*cmd
)
3209 struct res_fs_rule
*rrule
;
3211 if (dev
->caps
.steering_mode
!=
3212 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3215 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
3218 /* Release the rule form busy state before removal */
3219 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
3220 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
3224 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
3226 mlx4_err(dev
, "Fail to remove flow steering resources.\n ");
3230 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
3231 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3234 atomic_dec(&rqp
->ref_count
);
3236 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
3241 BUSY_MAX_RETRIES
= 10
3244 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
3245 struct mlx4_vhcr
*vhcr
,
3246 struct mlx4_cmd_mailbox
*inbox
,
3247 struct mlx4_cmd_mailbox
*outbox
,
3248 struct mlx4_cmd_info
*cmd
)
3251 int index
= vhcr
->in_modifier
& 0xffff;
3253 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
3257 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3258 put_res(dev
, slave
, index
, RES_COUNTER
);
3262 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
3264 struct res_gid
*rgid
;
3265 struct res_gid
*tmp
;
3266 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3268 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
3269 qp
.qpn
= rqp
->local_qpn
;
3270 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
, rgid
->prot
,
3272 list_del(&rgid
->list
);
3277 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
3278 enum mlx4_resource type
, int print
)
3280 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3281 struct mlx4_resource_tracker
*tracker
=
3282 &priv
->mfunc
.master
.res_tracker
;
3283 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
3284 struct res_common
*r
;
3285 struct res_common
*tmp
;
3289 spin_lock_irq(mlx4_tlock(dev
));
3290 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
3291 if (r
->owner
== slave
) {
3293 if (r
->state
== RES_ANY_BUSY
) {
3296 "%s id 0x%llx is busy\n",
3301 r
->from_state
= r
->state
;
3302 r
->state
= RES_ANY_BUSY
;
3308 spin_unlock_irq(mlx4_tlock(dev
));
3313 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
3314 enum mlx4_resource type
)
3316 unsigned long begin
;
3321 busy
= _move_all_busy(dev
, slave
, type
, 0);
3322 if (time_after(jiffies
, begin
+ 5 * HZ
))
3329 busy
= _move_all_busy(dev
, slave
, type
, 1);
3333 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
3335 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3336 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3337 struct list_head
*qp_list
=
3338 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
3346 err
= move_all_busy(dev
, slave
, RES_QP
);
3348 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy"
3349 "for slave %d\n", slave
);
3351 spin_lock_irq(mlx4_tlock(dev
));
3352 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
3353 spin_unlock_irq(mlx4_tlock(dev
));
3354 if (qp
->com
.owner
== slave
) {
3355 qpn
= qp
->com
.res_id
;
3356 detach_qp(dev
, slave
, qp
);
3357 state
= qp
->com
.from_state
;
3358 while (state
!= 0) {
3360 case RES_QP_RESERVED
:
3361 spin_lock_irq(mlx4_tlock(dev
));
3362 rb_erase(&qp
->com
.node
,
3363 &tracker
->res_tree
[RES_QP
]);
3364 list_del(&qp
->com
.list
);
3365 spin_unlock_irq(mlx4_tlock(dev
));
3370 if (!valid_reserved(dev
, slave
, qpn
))
3371 __mlx4_qp_free_icm(dev
, qpn
);
3372 state
= RES_QP_RESERVED
;
3376 err
= mlx4_cmd(dev
, in_param
,
3379 MLX4_CMD_TIME_CLASS_A
,
3382 mlx4_dbg(dev
, "rem_slave_qps: failed"
3383 " to move slave %d qpn %d to"
3386 atomic_dec(&qp
->rcq
->ref_count
);
3387 atomic_dec(&qp
->scq
->ref_count
);
3388 atomic_dec(&qp
->mtt
->ref_count
);
3390 atomic_dec(&qp
->srq
->ref_count
);
3391 state
= RES_QP_MAPPED
;
3398 spin_lock_irq(mlx4_tlock(dev
));
3400 spin_unlock_irq(mlx4_tlock(dev
));
3403 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
3405 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3406 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3407 struct list_head
*srq_list
=
3408 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
3409 struct res_srq
*srq
;
3410 struct res_srq
*tmp
;
3417 err
= move_all_busy(dev
, slave
, RES_SRQ
);
3419 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs to "
3420 "busy for slave %d\n", slave
);
3422 spin_lock_irq(mlx4_tlock(dev
));
3423 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
3424 spin_unlock_irq(mlx4_tlock(dev
));
3425 if (srq
->com
.owner
== slave
) {
3426 srqn
= srq
->com
.res_id
;
3427 state
= srq
->com
.from_state
;
3428 while (state
!= 0) {
3430 case RES_SRQ_ALLOCATED
:
3431 __mlx4_srq_free_icm(dev
, srqn
);
3432 spin_lock_irq(mlx4_tlock(dev
));
3433 rb_erase(&srq
->com
.node
,
3434 &tracker
->res_tree
[RES_SRQ
]);
3435 list_del(&srq
->com
.list
);
3436 spin_unlock_irq(mlx4_tlock(dev
));
3443 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
3445 MLX4_CMD_TIME_CLASS_A
,
3448 mlx4_dbg(dev
, "rem_slave_srqs: failed"
3449 " to move slave %d srq %d to"
3453 atomic_dec(&srq
->mtt
->ref_count
);
3455 atomic_dec(&srq
->cq
->ref_count
);
3456 state
= RES_SRQ_ALLOCATED
;
3464 spin_lock_irq(mlx4_tlock(dev
));
3466 spin_unlock_irq(mlx4_tlock(dev
));
3469 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
3471 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3472 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3473 struct list_head
*cq_list
=
3474 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
3483 err
= move_all_busy(dev
, slave
, RES_CQ
);
3485 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs to "
3486 "busy for slave %d\n", slave
);
3488 spin_lock_irq(mlx4_tlock(dev
));
3489 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
3490 spin_unlock_irq(mlx4_tlock(dev
));
3491 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
3492 cqn
= cq
->com
.res_id
;
3493 state
= cq
->com
.from_state
;
3494 while (state
!= 0) {
3496 case RES_CQ_ALLOCATED
:
3497 __mlx4_cq_free_icm(dev
, cqn
);
3498 spin_lock_irq(mlx4_tlock(dev
));
3499 rb_erase(&cq
->com
.node
,
3500 &tracker
->res_tree
[RES_CQ
]);
3501 list_del(&cq
->com
.list
);
3502 spin_unlock_irq(mlx4_tlock(dev
));
3509 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
3511 MLX4_CMD_TIME_CLASS_A
,
3514 mlx4_dbg(dev
, "rem_slave_cqs: failed"
3515 " to move slave %d cq %d to"
3518 atomic_dec(&cq
->mtt
->ref_count
);
3519 state
= RES_CQ_ALLOCATED
;
3527 spin_lock_irq(mlx4_tlock(dev
));
3529 spin_unlock_irq(mlx4_tlock(dev
));
3532 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
3534 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3535 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3536 struct list_head
*mpt_list
=
3537 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
3538 struct res_mpt
*mpt
;
3539 struct res_mpt
*tmp
;
3546 err
= move_all_busy(dev
, slave
, RES_MPT
);
3548 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts to "
3549 "busy for slave %d\n", slave
);
3551 spin_lock_irq(mlx4_tlock(dev
));
3552 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
3553 spin_unlock_irq(mlx4_tlock(dev
));
3554 if (mpt
->com
.owner
== slave
) {
3555 mptn
= mpt
->com
.res_id
;
3556 state
= mpt
->com
.from_state
;
3557 while (state
!= 0) {
3559 case RES_MPT_RESERVED
:
3560 __mlx4_mpt_release(dev
, mpt
->key
);
3561 spin_lock_irq(mlx4_tlock(dev
));
3562 rb_erase(&mpt
->com
.node
,
3563 &tracker
->res_tree
[RES_MPT
]);
3564 list_del(&mpt
->com
.list
);
3565 spin_unlock_irq(mlx4_tlock(dev
));
3570 case RES_MPT_MAPPED
:
3571 __mlx4_mpt_free_icm(dev
, mpt
->key
);
3572 state
= RES_MPT_RESERVED
;
3577 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
3579 MLX4_CMD_TIME_CLASS_A
,
3582 mlx4_dbg(dev
, "rem_slave_mrs: failed"
3583 " to move slave %d mpt %d to"
3587 atomic_dec(&mpt
->mtt
->ref_count
);
3588 state
= RES_MPT_MAPPED
;
3595 spin_lock_irq(mlx4_tlock(dev
));
3597 spin_unlock_irq(mlx4_tlock(dev
));
3600 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
3602 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3603 struct mlx4_resource_tracker
*tracker
=
3604 &priv
->mfunc
.master
.res_tracker
;
3605 struct list_head
*mtt_list
=
3606 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
3607 struct res_mtt
*mtt
;
3608 struct res_mtt
*tmp
;
3614 err
= move_all_busy(dev
, slave
, RES_MTT
);
3616 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts to "
3617 "busy for slave %d\n", slave
);
3619 spin_lock_irq(mlx4_tlock(dev
));
3620 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
3621 spin_unlock_irq(mlx4_tlock(dev
));
3622 if (mtt
->com
.owner
== slave
) {
3623 base
= mtt
->com
.res_id
;
3624 state
= mtt
->com
.from_state
;
3625 while (state
!= 0) {
3627 case RES_MTT_ALLOCATED
:
3628 __mlx4_free_mtt_range(dev
, base
,
3630 spin_lock_irq(mlx4_tlock(dev
));
3631 rb_erase(&mtt
->com
.node
,
3632 &tracker
->res_tree
[RES_MTT
]);
3633 list_del(&mtt
->com
.list
);
3634 spin_unlock_irq(mlx4_tlock(dev
));
3644 spin_lock_irq(mlx4_tlock(dev
));
3646 spin_unlock_irq(mlx4_tlock(dev
));
3649 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
3651 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3652 struct mlx4_resource_tracker
*tracker
=
3653 &priv
->mfunc
.master
.res_tracker
;
3654 struct list_head
*fs_rule_list
=
3655 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
3656 struct res_fs_rule
*fs_rule
;
3657 struct res_fs_rule
*tmp
;
3662 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
3664 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3667 spin_lock_irq(mlx4_tlock(dev
));
3668 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
3669 spin_unlock_irq(mlx4_tlock(dev
));
3670 if (fs_rule
->com
.owner
== slave
) {
3671 base
= fs_rule
->com
.res_id
;
3672 state
= fs_rule
->com
.from_state
;
3673 while (state
!= 0) {
3675 case RES_FS_RULE_ALLOCATED
:
3677 err
= mlx4_cmd(dev
, base
, 0, 0,
3678 MLX4_QP_FLOW_STEERING_DETACH
,
3679 MLX4_CMD_TIME_CLASS_A
,
3682 spin_lock_irq(mlx4_tlock(dev
));
3683 rb_erase(&fs_rule
->com
.node
,
3684 &tracker
->res_tree
[RES_FS_RULE
]);
3685 list_del(&fs_rule
->com
.list
);
3686 spin_unlock_irq(mlx4_tlock(dev
));
3696 spin_lock_irq(mlx4_tlock(dev
));
3698 spin_unlock_irq(mlx4_tlock(dev
));
3701 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
3703 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3704 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3705 struct list_head
*eq_list
=
3706 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
3713 struct mlx4_cmd_mailbox
*mailbox
;
3715 err
= move_all_busy(dev
, slave
, RES_EQ
);
3717 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs to "
3718 "busy for slave %d\n", slave
);
3720 spin_lock_irq(mlx4_tlock(dev
));
3721 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
3722 spin_unlock_irq(mlx4_tlock(dev
));
3723 if (eq
->com
.owner
== slave
) {
3724 eqn
= eq
->com
.res_id
;
3725 state
= eq
->com
.from_state
;
3726 while (state
!= 0) {
3728 case RES_EQ_RESERVED
:
3729 spin_lock_irq(mlx4_tlock(dev
));
3730 rb_erase(&eq
->com
.node
,
3731 &tracker
->res_tree
[RES_EQ
]);
3732 list_del(&eq
->com
.list
);
3733 spin_unlock_irq(mlx4_tlock(dev
));
3739 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3740 if (IS_ERR(mailbox
)) {
3744 err
= mlx4_cmd_box(dev
, slave
, 0,
3747 MLX4_CMD_TIME_CLASS_A
,
3750 mlx4_dbg(dev
, "rem_slave_eqs: failed"
3751 " to move slave %d eqs %d to"
3752 " SW ownership\n", slave
, eqn
);
3753 mlx4_free_cmd_mailbox(dev
, mailbox
);
3754 atomic_dec(&eq
->mtt
->ref_count
);
3755 state
= RES_EQ_RESERVED
;
3763 spin_lock_irq(mlx4_tlock(dev
));
3765 spin_unlock_irq(mlx4_tlock(dev
));
3768 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
3770 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3771 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3772 struct list_head
*counter_list
=
3773 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
3774 struct res_counter
*counter
;
3775 struct res_counter
*tmp
;
3779 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
3781 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters to "
3782 "busy for slave %d\n", slave
);
3784 spin_lock_irq(mlx4_tlock(dev
));
3785 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
3786 if (counter
->com
.owner
== slave
) {
3787 index
= counter
->com
.res_id
;
3788 rb_erase(&counter
->com
.node
,
3789 &tracker
->res_tree
[RES_COUNTER
]);
3790 list_del(&counter
->com
.list
);
3792 __mlx4_counter_free(dev
, index
);
3795 spin_unlock_irq(mlx4_tlock(dev
));
3798 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
3800 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3801 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3802 struct list_head
*xrcdn_list
=
3803 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
3804 struct res_xrcdn
*xrcd
;
3805 struct res_xrcdn
*tmp
;
3809 err
= move_all_busy(dev
, slave
, RES_XRCD
);
3811 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns to "
3812 "busy for slave %d\n", slave
);
3814 spin_lock_irq(mlx4_tlock(dev
));
3815 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
3816 if (xrcd
->com
.owner
== slave
) {
3817 xrcdn
= xrcd
->com
.res_id
;
3818 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
3819 list_del(&xrcd
->com
.list
);
3821 __mlx4_xrcd_free(dev
, xrcdn
);
3824 spin_unlock_irq(mlx4_tlock(dev
));
3827 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
3829 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3831 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
3833 rem_slave_macs(dev
, slave
);
3834 rem_slave_fs_rule(dev
, slave
);
3835 rem_slave_qps(dev
, slave
);
3836 rem_slave_srqs(dev
, slave
);
3837 rem_slave_cqs(dev
, slave
);
3838 rem_slave_mrs(dev
, slave
);
3839 rem_slave_eqs(dev
, slave
);
3840 rem_slave_mtts(dev
, slave
);
3841 rem_slave_counters(dev
, slave
);
3842 rem_slave_xrcdns(dev
, slave
);
3843 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);