2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list
;
59 struct list_head list
;
67 struct list_head list
;
82 struct list_head list
;
84 enum mlx4_protocol prot
;
85 enum mlx4_steer_type steer
;
90 RES_QP_BUSY
= RES_ANY_BUSY
,
92 /* QP number was allocated */
95 /* ICM memory for QP context was mapped */
98 /* QP is in hw ownership */
103 struct res_common com
;
108 struct list_head mcg_list
;
116 enum res_mtt_states
{
117 RES_MTT_BUSY
= RES_ANY_BUSY
,
121 static inline const char *mtt_states_str(enum res_mtt_states state
)
124 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
125 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
126 default: return "Unknown";
131 struct res_common com
;
136 enum res_mpt_states
{
137 RES_MPT_BUSY
= RES_ANY_BUSY
,
144 struct res_common com
;
150 RES_EQ_BUSY
= RES_ANY_BUSY
,
156 struct res_common com
;
161 RES_CQ_BUSY
= RES_ANY_BUSY
,
167 struct res_common com
;
172 enum res_srq_states
{
173 RES_SRQ_BUSY
= RES_ANY_BUSY
,
179 struct res_common com
;
185 enum res_counter_states
{
186 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
187 RES_COUNTER_ALLOCATED
,
191 struct res_common com
;
195 enum res_xrcdn_states
{
196 RES_XRCD_BUSY
= RES_ANY_BUSY
,
201 struct res_common com
;
205 enum res_fs_rule_states
{
206 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
207 RES_FS_RULE_ALLOCATED
,
211 struct res_common com
;
215 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
217 struct rb_node
*node
= root
->rb_node
;
220 struct res_common
*res
= container_of(node
, struct res_common
,
223 if (res_id
< res
->res_id
)
224 node
= node
->rb_left
;
225 else if (res_id
> res
->res_id
)
226 node
= node
->rb_right
;
233 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
235 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
237 /* Figure out where to put new node */
239 struct res_common
*this = container_of(*new, struct res_common
,
243 if (res
->res_id
< this->res_id
)
244 new = &((*new)->rb_left
);
245 else if (res
->res_id
> this->res_id
)
246 new = &((*new)->rb_right
);
251 /* Add new node and rebalance tree. */
252 rb_link_node(&res
->node
, parent
, new);
253 rb_insert_color(&res
->node
, root
);
268 static const char *ResourceType(enum mlx4_resource rt
)
271 case RES_QP
: return "RES_QP";
272 case RES_CQ
: return "RES_CQ";
273 case RES_SRQ
: return "RES_SRQ";
274 case RES_MPT
: return "RES_MPT";
275 case RES_MTT
: return "RES_MTT";
276 case RES_MAC
: return "RES_MAC";
277 case RES_VLAN
: return "RES_VLAN";
278 case RES_EQ
: return "RES_EQ";
279 case RES_COUNTER
: return "RES_COUNTER";
280 case RES_FS_RULE
: return "RES_FS_RULE";
281 case RES_XRCD
: return "RES_XRCD";
282 default: return "Unknown resource type !!!";
286 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
287 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
289 struct mlx4_priv
*priv
= mlx4_priv(dev
);
293 priv
->mfunc
.master
.res_tracker
.slave_list
=
294 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
296 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
299 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
300 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
301 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
302 slave_list
[i
].res_list
[t
]);
303 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
306 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
308 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
309 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
311 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
315 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
316 enum mlx4_res_tracker_free_type type
)
318 struct mlx4_priv
*priv
= mlx4_priv(dev
);
321 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
322 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
323 for (i
= 0; i
< dev
->num_slaves
; i
++) {
324 if (type
== RES_TR_FREE_ALL
||
325 dev
->caps
.function
!= i
)
326 mlx4_delete_all_resources_for_slave(dev
, i
);
328 /* free master's vlans */
329 i
= dev
->caps
.function
;
330 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
331 rem_slave_vlans(dev
, i
);
332 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
335 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
336 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
337 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
342 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
343 struct mlx4_cmd_mailbox
*inbox
)
345 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
346 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
348 struct mlx4_priv
*priv
= mlx4_priv(dev
);
351 port
= (sched
>> 6 & 1) + 1;
353 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
354 *(u8
*)(inbox
->buf
+ 35) = new_index
;
357 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
360 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
361 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
362 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
364 if (MLX4_QP_ST_UD
== ts
)
365 qp_ctx
->pri_path
.mgid_index
= 0x80 | slave
;
367 if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_UC
== ts
) {
368 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
369 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
370 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
371 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
375 static int update_vport_qp_param(struct mlx4_dev
*dev
,
376 struct mlx4_cmd_mailbox
*inbox
,
379 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
380 struct mlx4_vport_oper_state
*vp_oper
;
381 struct mlx4_priv
*priv
;
385 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
386 priv
= mlx4_priv(dev
);
387 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
389 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
390 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
391 if (MLX4_QP_ST_RC
== qp_type
||
392 (MLX4_QP_ST_UD
== qp_type
&&
393 !mlx4_is_qp_reserved(dev
, qpn
)))
396 /* the reserved QPs (special, proxy, tunnel)
397 * do not operate over vlans
399 if (mlx4_is_qp_reserved(dev
, qpn
))
402 /* force strip vlan by clear vsd */
403 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
405 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
406 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
407 qpc
->pri_path
.vlan_control
=
408 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
409 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
410 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
411 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
412 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
413 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
414 } else if (0 != vp_oper
->state
.default_vlan
) {
415 qpc
->pri_path
.vlan_control
=
416 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
417 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
418 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
419 } else { /* priority tagged */
420 qpc
->pri_path
.vlan_control
=
421 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
422 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
425 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
426 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
427 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
428 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
429 qpc
->pri_path
.sched_queue
&= 0xC7;
430 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
432 if (vp_oper
->state
.spoofchk
) {
433 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
434 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
439 static int mpt_mask(struct mlx4_dev
*dev
)
441 return dev
->caps
.num_mpts
- 1;
444 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
445 enum mlx4_resource type
)
447 struct mlx4_priv
*priv
= mlx4_priv(dev
);
449 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
453 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
454 enum mlx4_resource type
,
457 struct res_common
*r
;
460 spin_lock_irq(mlx4_tlock(dev
));
461 r
= find_res(dev
, res_id
, type
);
467 if (r
->state
== RES_ANY_BUSY
) {
472 if (r
->owner
!= slave
) {
477 r
->from_state
= r
->state
;
478 r
->state
= RES_ANY_BUSY
;
481 *((struct res_common
**)res
) = r
;
484 spin_unlock_irq(mlx4_tlock(dev
));
488 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
489 enum mlx4_resource type
,
490 u64 res_id
, int *slave
)
493 struct res_common
*r
;
499 spin_lock(mlx4_tlock(dev
));
501 r
= find_res(dev
, id
, type
);
506 spin_unlock(mlx4_tlock(dev
));
511 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
512 enum mlx4_resource type
)
514 struct res_common
*r
;
516 spin_lock_irq(mlx4_tlock(dev
));
517 r
= find_res(dev
, res_id
, type
);
519 r
->state
= r
->from_state
;
520 spin_unlock_irq(mlx4_tlock(dev
));
523 static struct res_common
*alloc_qp_tr(int id
)
527 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
531 ret
->com
.res_id
= id
;
532 ret
->com
.state
= RES_QP_RESERVED
;
534 INIT_LIST_HEAD(&ret
->mcg_list
);
535 spin_lock_init(&ret
->mcg_spl
);
536 atomic_set(&ret
->ref_count
, 0);
541 static struct res_common
*alloc_mtt_tr(int id
, int order
)
545 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
549 ret
->com
.res_id
= id
;
551 ret
->com
.state
= RES_MTT_ALLOCATED
;
552 atomic_set(&ret
->ref_count
, 0);
557 static struct res_common
*alloc_mpt_tr(int id
, int key
)
561 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
565 ret
->com
.res_id
= id
;
566 ret
->com
.state
= RES_MPT_RESERVED
;
572 static struct res_common
*alloc_eq_tr(int id
)
576 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
580 ret
->com
.res_id
= id
;
581 ret
->com
.state
= RES_EQ_RESERVED
;
586 static struct res_common
*alloc_cq_tr(int id
)
590 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
594 ret
->com
.res_id
= id
;
595 ret
->com
.state
= RES_CQ_ALLOCATED
;
596 atomic_set(&ret
->ref_count
, 0);
601 static struct res_common
*alloc_srq_tr(int id
)
605 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
609 ret
->com
.res_id
= id
;
610 ret
->com
.state
= RES_SRQ_ALLOCATED
;
611 atomic_set(&ret
->ref_count
, 0);
616 static struct res_common
*alloc_counter_tr(int id
)
618 struct res_counter
*ret
;
620 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
624 ret
->com
.res_id
= id
;
625 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
630 static struct res_common
*alloc_xrcdn_tr(int id
)
632 struct res_xrcdn
*ret
;
634 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
638 ret
->com
.res_id
= id
;
639 ret
->com
.state
= RES_XRCD_ALLOCATED
;
644 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
646 struct res_fs_rule
*ret
;
648 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
652 ret
->com
.res_id
= id
;
653 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
658 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
661 struct res_common
*ret
;
665 ret
= alloc_qp_tr(id
);
668 ret
= alloc_mpt_tr(id
, extra
);
671 ret
= alloc_mtt_tr(id
, extra
);
674 ret
= alloc_eq_tr(id
);
677 ret
= alloc_cq_tr(id
);
680 ret
= alloc_srq_tr(id
);
683 printk(KERN_ERR
"implementation missing\n");
686 ret
= alloc_counter_tr(id
);
689 ret
= alloc_xrcdn_tr(id
);
692 ret
= alloc_fs_rule_tr(id
, extra
);
703 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
704 enum mlx4_resource type
, int extra
)
708 struct mlx4_priv
*priv
= mlx4_priv(dev
);
709 struct res_common
**res_arr
;
710 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
711 struct rb_root
*root
= &tracker
->res_tree
[type
];
713 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
717 for (i
= 0; i
< count
; ++i
) {
718 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
720 for (--i
; i
>= 0; --i
)
728 spin_lock_irq(mlx4_tlock(dev
));
729 for (i
= 0; i
< count
; ++i
) {
730 if (find_res(dev
, base
+ i
, type
)) {
734 err
= res_tracker_insert(root
, res_arr
[i
]);
737 list_add_tail(&res_arr
[i
]->list
,
738 &tracker
->slave_list
[slave
].res_list
[type
]);
740 spin_unlock_irq(mlx4_tlock(dev
));
746 for (--i
; i
>= base
; --i
)
747 rb_erase(&res_arr
[i
]->node
, root
);
749 spin_unlock_irq(mlx4_tlock(dev
));
751 for (i
= 0; i
< count
; ++i
)
759 static int remove_qp_ok(struct res_qp
*res
)
761 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
762 !list_empty(&res
->mcg_list
)) {
763 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
764 res
->com
.state
, atomic_read(&res
->ref_count
));
766 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
773 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
775 if (res
->com
.state
== RES_MTT_BUSY
||
776 atomic_read(&res
->ref_count
)) {
777 printk(KERN_DEBUG
"%s-%d: state %s, ref_count %d\n",
779 mtt_states_str(res
->com
.state
),
780 atomic_read(&res
->ref_count
));
782 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
784 else if (res
->order
!= order
)
790 static int remove_mpt_ok(struct res_mpt
*res
)
792 if (res
->com
.state
== RES_MPT_BUSY
)
794 else if (res
->com
.state
!= RES_MPT_RESERVED
)
800 static int remove_eq_ok(struct res_eq
*res
)
802 if (res
->com
.state
== RES_MPT_BUSY
)
804 else if (res
->com
.state
!= RES_MPT_RESERVED
)
810 static int remove_counter_ok(struct res_counter
*res
)
812 if (res
->com
.state
== RES_COUNTER_BUSY
)
814 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
820 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
822 if (res
->com
.state
== RES_XRCD_BUSY
)
824 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
830 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
832 if (res
->com
.state
== RES_FS_RULE_BUSY
)
834 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
840 static int remove_cq_ok(struct res_cq
*res
)
842 if (res
->com
.state
== RES_CQ_BUSY
)
844 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
850 static int remove_srq_ok(struct res_srq
*res
)
852 if (res
->com
.state
== RES_SRQ_BUSY
)
854 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
860 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
864 return remove_qp_ok((struct res_qp
*)res
);
866 return remove_cq_ok((struct res_cq
*)res
);
868 return remove_srq_ok((struct res_srq
*)res
);
870 return remove_mpt_ok((struct res_mpt
*)res
);
872 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
876 return remove_eq_ok((struct res_eq
*)res
);
878 return remove_counter_ok((struct res_counter
*)res
);
880 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
882 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
888 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
889 enum mlx4_resource type
, int extra
)
893 struct mlx4_priv
*priv
= mlx4_priv(dev
);
894 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
895 struct res_common
*r
;
897 spin_lock_irq(mlx4_tlock(dev
));
898 for (i
= base
; i
< base
+ count
; ++i
) {
899 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
904 if (r
->owner
!= slave
) {
908 err
= remove_ok(r
, type
, extra
);
913 for (i
= base
; i
< base
+ count
; ++i
) {
914 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
915 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
922 spin_unlock_irq(mlx4_tlock(dev
));
927 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
928 enum res_qp_states state
, struct res_qp
**qp
,
931 struct mlx4_priv
*priv
= mlx4_priv(dev
);
932 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
936 spin_lock_irq(mlx4_tlock(dev
));
937 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
940 else if (r
->com
.owner
!= slave
)
945 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
946 __func__
, r
->com
.res_id
);
950 case RES_QP_RESERVED
:
951 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
954 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
959 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
960 r
->com
.state
== RES_QP_HW
)
963 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
971 if (r
->com
.state
!= RES_QP_MAPPED
)
979 r
->com
.from_state
= r
->com
.state
;
980 r
->com
.to_state
= state
;
981 r
->com
.state
= RES_QP_BUSY
;
987 spin_unlock_irq(mlx4_tlock(dev
));
992 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
993 enum res_mpt_states state
, struct res_mpt
**mpt
)
995 struct mlx4_priv
*priv
= mlx4_priv(dev
);
996 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1000 spin_lock_irq(mlx4_tlock(dev
));
1001 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1004 else if (r
->com
.owner
!= slave
)
1012 case RES_MPT_RESERVED
:
1013 if (r
->com
.state
!= RES_MPT_MAPPED
)
1017 case RES_MPT_MAPPED
:
1018 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1019 r
->com
.state
!= RES_MPT_HW
)
1024 if (r
->com
.state
!= RES_MPT_MAPPED
)
1032 r
->com
.from_state
= r
->com
.state
;
1033 r
->com
.to_state
= state
;
1034 r
->com
.state
= RES_MPT_BUSY
;
1040 spin_unlock_irq(mlx4_tlock(dev
));
1045 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1046 enum res_eq_states state
, struct res_eq
**eq
)
1048 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1049 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1053 spin_lock_irq(mlx4_tlock(dev
));
1054 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1057 else if (r
->com
.owner
!= slave
)
1065 case RES_EQ_RESERVED
:
1066 if (r
->com
.state
!= RES_EQ_HW
)
1071 if (r
->com
.state
!= RES_EQ_RESERVED
)
1080 r
->com
.from_state
= r
->com
.state
;
1081 r
->com
.to_state
= state
;
1082 r
->com
.state
= RES_EQ_BUSY
;
1088 spin_unlock_irq(mlx4_tlock(dev
));
1093 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1094 enum res_cq_states state
, struct res_cq
**cq
)
1096 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1097 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1101 spin_lock_irq(mlx4_tlock(dev
));
1102 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1105 else if (r
->com
.owner
!= slave
)
1113 case RES_CQ_ALLOCATED
:
1114 if (r
->com
.state
!= RES_CQ_HW
)
1116 else if (atomic_read(&r
->ref_count
))
1123 if (r
->com
.state
!= RES_CQ_ALLOCATED
)
1134 r
->com
.from_state
= r
->com
.state
;
1135 r
->com
.to_state
= state
;
1136 r
->com
.state
= RES_CQ_BUSY
;
1142 spin_unlock_irq(mlx4_tlock(dev
));
1147 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1148 enum res_cq_states state
, struct res_srq
**srq
)
1150 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1151 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1155 spin_lock_irq(mlx4_tlock(dev
));
1156 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1159 else if (r
->com
.owner
!= slave
)
1167 case RES_SRQ_ALLOCATED
:
1168 if (r
->com
.state
!= RES_SRQ_HW
)
1170 else if (atomic_read(&r
->ref_count
))
1175 if (r
->com
.state
!= RES_SRQ_ALLOCATED
)
1184 r
->com
.from_state
= r
->com
.state
;
1185 r
->com
.to_state
= state
;
1186 r
->com
.state
= RES_SRQ_BUSY
;
1192 spin_unlock_irq(mlx4_tlock(dev
));
1197 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1198 enum mlx4_resource type
, int id
)
1200 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1201 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1202 struct res_common
*r
;
1204 spin_lock_irq(mlx4_tlock(dev
));
1205 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1206 if (r
&& (r
->owner
== slave
))
1207 r
->state
= r
->from_state
;
1208 spin_unlock_irq(mlx4_tlock(dev
));
1211 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1212 enum mlx4_resource type
, int id
)
1214 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1215 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1216 struct res_common
*r
;
1218 spin_lock_irq(mlx4_tlock(dev
));
1219 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1220 if (r
&& (r
->owner
== slave
))
1221 r
->state
= r
->to_state
;
1222 spin_unlock_irq(mlx4_tlock(dev
));
1225 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1227 return mlx4_is_qp_reserved(dev
, qpn
) &&
1228 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1231 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1233 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1236 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1237 u64 in_param
, u64
*out_param
)
1246 case RES_OP_RESERVE
:
1247 count
= get_param_l(&in_param
);
1248 align
= get_param_h(&in_param
);
1249 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
);
1253 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1255 __mlx4_qp_release_range(dev
, base
, count
);
1258 set_param_l(out_param
, base
);
1260 case RES_OP_MAP_ICM
:
1261 qpn
= get_param_l(&in_param
) & 0x7fffff;
1262 if (valid_reserved(dev
, slave
, qpn
)) {
1263 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1268 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1273 if (!fw_reserved(dev
, qpn
)) {
1274 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1276 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1281 res_end_move(dev
, slave
, RES_QP
, qpn
);
1291 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1292 u64 in_param
, u64
*out_param
)
1298 if (op
!= RES_OP_RESERVE_AND_MAP
)
1301 order
= get_param_l(&in_param
);
1302 base
= __mlx4_alloc_mtt_range(dev
, order
);
1306 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1308 __mlx4_free_mtt_range(dev
, base
, order
);
1310 set_param_l(out_param
, base
);
1315 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1316 u64 in_param
, u64
*out_param
)
1321 struct res_mpt
*mpt
;
1324 case RES_OP_RESERVE
:
1325 index
= __mlx4_mpt_reserve(dev
);
1328 id
= index
& mpt_mask(dev
);
1330 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1332 __mlx4_mpt_release(dev
, index
);
1335 set_param_l(out_param
, index
);
1337 case RES_OP_MAP_ICM
:
1338 index
= get_param_l(&in_param
);
1339 id
= index
& mpt_mask(dev
);
1340 err
= mr_res_start_move_to(dev
, slave
, id
,
1341 RES_MPT_MAPPED
, &mpt
);
1345 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
);
1347 res_abort_move(dev
, slave
, RES_MPT
, id
);
1351 res_end_move(dev
, slave
, RES_MPT
, id
);
1357 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1358 u64 in_param
, u64
*out_param
)
1364 case RES_OP_RESERVE_AND_MAP
:
1365 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1369 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1371 __mlx4_cq_free_icm(dev
, cqn
);
1375 set_param_l(out_param
, cqn
);
1385 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1386 u64 in_param
, u64
*out_param
)
1392 case RES_OP_RESERVE_AND_MAP
:
1393 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1397 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1399 __mlx4_srq_free_icm(dev
, srqn
);
1403 set_param_l(out_param
, srqn
);
1413 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
)
1415 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1416 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1417 struct mac_res
*res
;
1419 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1423 res
->port
= (u8
) port
;
1424 list_add_tail(&res
->list
,
1425 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1429 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1432 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1433 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1434 struct list_head
*mac_list
=
1435 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1436 struct mac_res
*res
, *tmp
;
1438 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1439 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1440 list_del(&res
->list
);
1447 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1449 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1450 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1451 struct list_head
*mac_list
=
1452 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1453 struct mac_res
*res
, *tmp
;
1455 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1456 list_del(&res
->list
);
1457 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1462 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1463 u64 in_param
, u64
*out_param
, int in_port
)
1469 if (op
!= RES_OP_RESERVE_AND_MAP
)
1472 port
= !in_port
? get_param_l(out_param
) : in_port
;
1475 err
= __mlx4_register_mac(dev
, port
, mac
);
1477 set_param_l(out_param
, err
);
1482 err
= mac_add_to_slave(dev
, slave
, mac
, port
);
1484 __mlx4_unregister_mac(dev
, port
, mac
);
1489 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1490 int port
, int vlan_index
)
1492 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1493 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1494 struct list_head
*vlan_list
=
1495 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1496 struct vlan_res
*res
, *tmp
;
1498 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1499 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1500 /* vlan found. update ref count */
1506 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
1510 res
->port
= (u8
) port
;
1511 res
->vlan_index
= vlan_index
;
1513 list_add_tail(&res
->list
,
1514 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
1519 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1522 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1523 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1524 struct list_head
*vlan_list
=
1525 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1526 struct vlan_res
*res
, *tmp
;
1528 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1529 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1530 if (!--res
->ref_count
) {
1531 list_del(&res
->list
);
1539 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
1541 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1542 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1543 struct list_head
*vlan_list
=
1544 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1545 struct vlan_res
*res
, *tmp
;
1548 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1549 list_del(&res
->list
);
1550 /* dereference the vlan the num times the slave referenced it */
1551 for (i
= 0; i
< res
->ref_count
; i
++)
1552 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
1557 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1558 u64 in_param
, u64
*out_param
, int in_port
)
1560 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1561 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1567 port
= !in_port
? get_param_l(out_param
) : in_port
;
1569 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
1572 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1573 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
1574 slave_state
[slave
].old_vlan_api
= true;
1578 vlan
= (u16
) in_param
;
1580 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
1582 set_param_l(out_param
, (u32
) vlan_index
);
1583 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
1585 __mlx4_unregister_vlan(dev
, port
, vlan
);
1590 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1591 u64 in_param
, u64
*out_param
)
1596 if (op
!= RES_OP_RESERVE
)
1599 err
= __mlx4_counter_alloc(dev
, &index
);
1603 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1605 __mlx4_counter_free(dev
, index
);
1607 set_param_l(out_param
, index
);
1612 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1613 u64 in_param
, u64
*out_param
)
1618 if (op
!= RES_OP_RESERVE
)
1621 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
1625 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1627 __mlx4_xrcd_free(dev
, xrcdn
);
1629 set_param_l(out_param
, xrcdn
);
1634 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1635 struct mlx4_vhcr
*vhcr
,
1636 struct mlx4_cmd_mailbox
*inbox
,
1637 struct mlx4_cmd_mailbox
*outbox
,
1638 struct mlx4_cmd_info
*cmd
)
1641 int alop
= vhcr
->op_modifier
;
1643 switch (vhcr
->in_modifier
& 0xFF) {
1645 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1646 vhcr
->in_param
, &vhcr
->out_param
);
1650 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1651 vhcr
->in_param
, &vhcr
->out_param
);
1655 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1656 vhcr
->in_param
, &vhcr
->out_param
);
1660 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1661 vhcr
->in_param
, &vhcr
->out_param
);
1665 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1666 vhcr
->in_param
, &vhcr
->out_param
);
1670 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1671 vhcr
->in_param
, &vhcr
->out_param
,
1672 (vhcr
->in_modifier
>> 8) & 0xFF);
1676 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1677 vhcr
->in_param
, &vhcr
->out_param
,
1678 (vhcr
->in_modifier
>> 8) & 0xFF);
1682 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1683 vhcr
->in_param
, &vhcr
->out_param
);
1687 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1688 vhcr
->in_param
, &vhcr
->out_param
);
1699 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1708 case RES_OP_RESERVE
:
1709 base
= get_param_l(&in_param
) & 0x7fffff;
1710 count
= get_param_h(&in_param
);
1711 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1714 __mlx4_qp_release_range(dev
, base
, count
);
1716 case RES_OP_MAP_ICM
:
1717 qpn
= get_param_l(&in_param
) & 0x7fffff;
1718 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
1723 if (!fw_reserved(dev
, qpn
))
1724 __mlx4_qp_free_icm(dev
, qpn
);
1726 res_end_move(dev
, slave
, RES_QP
, qpn
);
1728 if (valid_reserved(dev
, slave
, qpn
))
1729 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1738 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1739 u64 in_param
, u64
*out_param
)
1745 if (op
!= RES_OP_RESERVE_AND_MAP
)
1748 base
= get_param_l(&in_param
);
1749 order
= get_param_h(&in_param
);
1750 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1752 __mlx4_free_mtt_range(dev
, base
, order
);
1756 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1762 struct res_mpt
*mpt
;
1765 case RES_OP_RESERVE
:
1766 index
= get_param_l(&in_param
);
1767 id
= index
& mpt_mask(dev
);
1768 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
1772 put_res(dev
, slave
, id
, RES_MPT
);
1774 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
1777 __mlx4_mpt_release(dev
, index
);
1779 case RES_OP_MAP_ICM
:
1780 index
= get_param_l(&in_param
);
1781 id
= index
& mpt_mask(dev
);
1782 err
= mr_res_start_move_to(dev
, slave
, id
,
1783 RES_MPT_RESERVED
, &mpt
);
1787 __mlx4_mpt_free_icm(dev
, mpt
->key
);
1788 res_end_move(dev
, slave
, RES_MPT
, id
);
1798 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1799 u64 in_param
, u64
*out_param
)
1805 case RES_OP_RESERVE_AND_MAP
:
1806 cqn
= get_param_l(&in_param
);
1807 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1811 __mlx4_cq_free_icm(dev
, cqn
);
1822 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1823 u64 in_param
, u64
*out_param
)
1829 case RES_OP_RESERVE_AND_MAP
:
1830 srqn
= get_param_l(&in_param
);
1831 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1835 __mlx4_srq_free_icm(dev
, srqn
);
1846 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1847 u64 in_param
, u64
*out_param
, int in_port
)
1853 case RES_OP_RESERVE_AND_MAP
:
1854 port
= !in_port
? get_param_l(out_param
) : in_port
;
1855 mac_del_from_slave(dev
, slave
, in_param
, port
);
1856 __mlx4_unregister_mac(dev
, port
, in_param
);
1867 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1868 u64 in_param
, u64
*out_param
, int port
)
1870 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1871 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1875 case RES_OP_RESERVE_AND_MAP
:
1876 if (slave_state
[slave
].old_vlan_api
)
1880 vlan_del_from_slave(dev
, slave
, in_param
, port
);
1881 __mlx4_unregister_vlan(dev
, port
, in_param
);
1891 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1892 u64 in_param
, u64
*out_param
)
1897 if (op
!= RES_OP_RESERVE
)
1900 index
= get_param_l(&in_param
);
1901 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1905 __mlx4_counter_free(dev
, index
);
1910 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1911 u64 in_param
, u64
*out_param
)
1916 if (op
!= RES_OP_RESERVE
)
1919 xrcdn
= get_param_l(&in_param
);
1920 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1924 __mlx4_xrcd_free(dev
, xrcdn
);
1929 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1930 struct mlx4_vhcr
*vhcr
,
1931 struct mlx4_cmd_mailbox
*inbox
,
1932 struct mlx4_cmd_mailbox
*outbox
,
1933 struct mlx4_cmd_info
*cmd
)
1936 int alop
= vhcr
->op_modifier
;
1938 switch (vhcr
->in_modifier
& 0xFF) {
1940 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1945 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1946 vhcr
->in_param
, &vhcr
->out_param
);
1950 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1955 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1956 vhcr
->in_param
, &vhcr
->out_param
);
1960 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1961 vhcr
->in_param
, &vhcr
->out_param
);
1965 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1966 vhcr
->in_param
, &vhcr
->out_param
,
1967 (vhcr
->in_modifier
>> 8) & 0xFF);
1971 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1972 vhcr
->in_param
, &vhcr
->out_param
,
1973 (vhcr
->in_modifier
>> 8) & 0xFF);
1977 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1978 vhcr
->in_param
, &vhcr
->out_param
);
1982 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
1983 vhcr
->in_param
, &vhcr
->out_param
);
1991 /* ugly but other choices are uglier */
1992 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
1994 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
1997 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
1999 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2002 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2004 return be32_to_cpu(mpt
->mtt_sz
);
2007 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2009 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2012 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2014 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2017 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2019 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2022 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2024 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2027 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2029 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2032 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2034 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2037 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2039 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2040 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2041 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2042 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2043 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2044 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2045 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2046 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2047 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2052 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2054 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2055 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2056 total_mem
= sq_size
+ rq_size
;
2058 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2064 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2065 int size
, struct res_mtt
*mtt
)
2067 int res_start
= mtt
->com
.res_id
;
2068 int res_size
= (1 << mtt
->order
);
2070 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2075 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2076 struct mlx4_vhcr
*vhcr
,
2077 struct mlx4_cmd_mailbox
*inbox
,
2078 struct mlx4_cmd_mailbox
*outbox
,
2079 struct mlx4_cmd_info
*cmd
)
2082 int index
= vhcr
->in_modifier
;
2083 struct res_mtt
*mtt
;
2084 struct res_mpt
*mpt
;
2085 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2091 id
= index
& mpt_mask(dev
);
2092 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2096 /* Disable memory windows for VFs. */
2097 if (!mr_is_region(inbox
->buf
)) {
2102 /* Make sure that the PD bits related to the slave id are zeros. */
2103 pd
= mr_get_pd(inbox
->buf
);
2104 pd_slave
= (pd
>> 17) & 0x7f;
2105 if (pd_slave
!= 0 && pd_slave
!= slave
) {
2110 if (mr_is_fmr(inbox
->buf
)) {
2111 /* FMR and Bind Enable are forbidden in slave devices. */
2112 if (mr_is_bind_enabled(inbox
->buf
)) {
2116 /* FMR and Memory Windows are also forbidden. */
2117 if (!mr_is_region(inbox
->buf
)) {
2123 phys
= mr_phys_mpt(inbox
->buf
);
2125 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2129 err
= check_mtt_range(dev
, slave
, mtt_base
,
2130 mr_get_mtt_size(inbox
->buf
), mtt
);
2137 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2142 atomic_inc(&mtt
->ref_count
);
2143 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2146 res_end_move(dev
, slave
, RES_MPT
, id
);
2151 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2153 res_abort_move(dev
, slave
, RES_MPT
, id
);
2158 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2159 struct mlx4_vhcr
*vhcr
,
2160 struct mlx4_cmd_mailbox
*inbox
,
2161 struct mlx4_cmd_mailbox
*outbox
,
2162 struct mlx4_cmd_info
*cmd
)
2165 int index
= vhcr
->in_modifier
;
2166 struct res_mpt
*mpt
;
2169 id
= index
& mpt_mask(dev
);
2170 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2174 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2179 atomic_dec(&mpt
->mtt
->ref_count
);
2181 res_end_move(dev
, slave
, RES_MPT
, id
);
2185 res_abort_move(dev
, slave
, RES_MPT
, id
);
2190 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2191 struct mlx4_vhcr
*vhcr
,
2192 struct mlx4_cmd_mailbox
*inbox
,
2193 struct mlx4_cmd_mailbox
*outbox
,
2194 struct mlx4_cmd_info
*cmd
)
2197 int index
= vhcr
->in_modifier
;
2198 struct res_mpt
*mpt
;
2201 id
= index
& mpt_mask(dev
);
2202 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2206 if (mpt
->com
.from_state
!= RES_MPT_HW
) {
2211 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2214 put_res(dev
, slave
, id
, RES_MPT
);
2218 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2220 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2223 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2225 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2228 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2230 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2233 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2234 struct mlx4_qp_context
*context
)
2236 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2239 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2242 /* adjust qkey in qp context */
2243 context
->qkey
= cpu_to_be32(qkey
);
2246 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2247 struct mlx4_vhcr
*vhcr
,
2248 struct mlx4_cmd_mailbox
*inbox
,
2249 struct mlx4_cmd_mailbox
*outbox
,
2250 struct mlx4_cmd_info
*cmd
)
2253 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2254 struct res_mtt
*mtt
;
2256 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2257 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2258 int mtt_size
= qp_get_mtt_size(qpc
);
2261 int rcqn
= qp_get_rcqn(qpc
);
2262 int scqn
= qp_get_scqn(qpc
);
2263 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2264 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2265 struct res_srq
*srq
;
2266 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2268 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2271 qp
->local_qpn
= local_qpn
;
2272 qp
->sched_queue
= 0;
2273 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2275 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2279 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2283 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2288 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2295 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2300 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2301 update_pkey_index(dev
, slave
, inbox
);
2302 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2305 atomic_inc(&mtt
->ref_count
);
2307 atomic_inc(&rcq
->ref_count
);
2309 atomic_inc(&scq
->ref_count
);
2313 put_res(dev
, slave
, scqn
, RES_CQ
);
2316 atomic_inc(&srq
->ref_count
);
2317 put_res(dev
, slave
, srqn
, RES_SRQ
);
2320 put_res(dev
, slave
, rcqn
, RES_CQ
);
2321 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2322 res_end_move(dev
, slave
, RES_QP
, qpn
);
2328 put_res(dev
, slave
, srqn
, RES_SRQ
);
2331 put_res(dev
, slave
, scqn
, RES_CQ
);
2333 put_res(dev
, slave
, rcqn
, RES_CQ
);
2335 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2337 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2342 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2344 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2347 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2349 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2350 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2352 if (log_eq_size
+ 5 < page_shift
)
2355 return 1 << (log_eq_size
+ 5 - page_shift
);
2358 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2360 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2363 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
2365 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
2366 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
2368 if (log_cq_size
+ 5 < page_shift
)
2371 return 1 << (log_cq_size
+ 5 - page_shift
);
2374 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2375 struct mlx4_vhcr
*vhcr
,
2376 struct mlx4_cmd_mailbox
*inbox
,
2377 struct mlx4_cmd_mailbox
*outbox
,
2378 struct mlx4_cmd_info
*cmd
)
2381 int eqn
= vhcr
->in_modifier
;
2382 int res_id
= (slave
<< 8) | eqn
;
2383 struct mlx4_eq_context
*eqc
= inbox
->buf
;
2384 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
2385 int mtt_size
= eq_get_mtt_size(eqc
);
2387 struct res_mtt
*mtt
;
2389 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2392 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2396 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2400 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2404 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2408 atomic_inc(&mtt
->ref_count
);
2410 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2411 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2415 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2417 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2419 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2423 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2424 int len
, struct res_mtt
**res
)
2426 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2427 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2428 struct res_mtt
*mtt
;
2431 spin_lock_irq(mlx4_tlock(dev
));
2432 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2434 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2436 mtt
->com
.from_state
= mtt
->com
.state
;
2437 mtt
->com
.state
= RES_MTT_BUSY
;
2442 spin_unlock_irq(mlx4_tlock(dev
));
2447 static int verify_qp_parameters(struct mlx4_dev
*dev
,
2448 struct mlx4_cmd_mailbox
*inbox
,
2449 enum qp_transition transition
, u8 slave
)
2452 struct mlx4_qp_context
*qp_ctx
;
2453 enum mlx4_qp_optpar optpar
;
2455 qp_ctx
= inbox
->buf
+ 8;
2456 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
2457 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
2462 switch (transition
) {
2463 case QP_TRANS_INIT2RTR
:
2464 case QP_TRANS_RTR2RTS
:
2465 case QP_TRANS_RTS2RTS
:
2466 case QP_TRANS_SQD2SQD
:
2467 case QP_TRANS_SQD2RTS
:
2468 if (slave
!= mlx4_master_func_num(dev
))
2469 /* slaves have only gid index 0 */
2470 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
)
2471 if (qp_ctx
->pri_path
.mgid_index
)
2473 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
)
2474 if (qp_ctx
->alt_path
.mgid_index
)
2489 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
2490 struct mlx4_vhcr
*vhcr
,
2491 struct mlx4_cmd_mailbox
*inbox
,
2492 struct mlx4_cmd_mailbox
*outbox
,
2493 struct mlx4_cmd_info
*cmd
)
2495 struct mlx4_mtt mtt
;
2496 __be64
*page_list
= inbox
->buf
;
2497 u64
*pg_list
= (u64
*)page_list
;
2499 struct res_mtt
*rmtt
= NULL
;
2500 int start
= be64_to_cpu(page_list
[0]);
2501 int npages
= vhcr
->in_modifier
;
2504 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
2508 /* Call the SW implementation of write_mtt:
2509 * - Prepare a dummy mtt struct
2510 * - Translate inbox contents to simple addresses in host endianess */
2511 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
2512 we don't really use it */
2515 for (i
= 0; i
< npages
; ++i
)
2516 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
2518 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
2519 ((u64
*)page_list
+ 2));
2522 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
2527 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2528 struct mlx4_vhcr
*vhcr
,
2529 struct mlx4_cmd_mailbox
*inbox
,
2530 struct mlx4_cmd_mailbox
*outbox
,
2531 struct mlx4_cmd_info
*cmd
)
2533 int eqn
= vhcr
->in_modifier
;
2534 int res_id
= eqn
| (slave
<< 8);
2538 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
2542 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
2546 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2550 atomic_dec(&eq
->mtt
->ref_count
);
2551 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2552 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2553 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2558 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2560 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2565 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
2567 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2568 struct mlx4_slave_event_eq_info
*event_eq
;
2569 struct mlx4_cmd_mailbox
*mailbox
;
2570 u32 in_modifier
= 0;
2575 if (!priv
->mfunc
.master
.slave_state
)
2578 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
2580 /* Create the event only if the slave is registered */
2581 if (event_eq
->eqn
< 0)
2584 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2585 res_id
= (slave
<< 8) | event_eq
->eqn
;
2586 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
2590 if (req
->com
.from_state
!= RES_EQ_HW
) {
2595 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2596 if (IS_ERR(mailbox
)) {
2597 err
= PTR_ERR(mailbox
);
2601 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
2603 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
2606 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
2608 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
2610 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
2611 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
2614 put_res(dev
, slave
, res_id
, RES_EQ
);
2615 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2616 mlx4_free_cmd_mailbox(dev
, mailbox
);
2620 put_res(dev
, slave
, res_id
, RES_EQ
);
2623 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2627 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2628 struct mlx4_vhcr
*vhcr
,
2629 struct mlx4_cmd_mailbox
*inbox
,
2630 struct mlx4_cmd_mailbox
*outbox
,
2631 struct mlx4_cmd_info
*cmd
)
2633 int eqn
= vhcr
->in_modifier
;
2634 int res_id
= eqn
| (slave
<< 8);
2638 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
2642 if (eq
->com
.from_state
!= RES_EQ_HW
) {
2647 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2650 put_res(dev
, slave
, res_id
, RES_EQ
);
2654 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2655 struct mlx4_vhcr
*vhcr
,
2656 struct mlx4_cmd_mailbox
*inbox
,
2657 struct mlx4_cmd_mailbox
*outbox
,
2658 struct mlx4_cmd_info
*cmd
)
2661 int cqn
= vhcr
->in_modifier
;
2662 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2663 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2665 struct res_mtt
*mtt
;
2667 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
2670 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2673 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2676 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2679 atomic_inc(&mtt
->ref_count
);
2681 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2682 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2686 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2688 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2692 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2693 struct mlx4_vhcr
*vhcr
,
2694 struct mlx4_cmd_mailbox
*inbox
,
2695 struct mlx4_cmd_mailbox
*outbox
,
2696 struct mlx4_cmd_info
*cmd
)
2699 int cqn
= vhcr
->in_modifier
;
2702 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
2705 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2708 atomic_dec(&cq
->mtt
->ref_count
);
2709 res_end_move(dev
, slave
, RES_CQ
, cqn
);
2713 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
2717 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2718 struct mlx4_vhcr
*vhcr
,
2719 struct mlx4_cmd_mailbox
*inbox
,
2720 struct mlx4_cmd_mailbox
*outbox
,
2721 struct mlx4_cmd_info
*cmd
)
2723 int cqn
= vhcr
->in_modifier
;
2727 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2731 if (cq
->com
.from_state
!= RES_CQ_HW
)
2734 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2736 put_res(dev
, slave
, cqn
, RES_CQ
);
2741 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
2742 struct mlx4_vhcr
*vhcr
,
2743 struct mlx4_cmd_mailbox
*inbox
,
2744 struct mlx4_cmd_mailbox
*outbox
,
2745 struct mlx4_cmd_info
*cmd
,
2749 struct res_mtt
*orig_mtt
;
2750 struct res_mtt
*mtt
;
2751 struct mlx4_cq_context
*cqc
= inbox
->buf
;
2752 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
2754 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
2758 if (orig_mtt
!= cq
->mtt
) {
2763 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2767 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
2770 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2773 atomic_dec(&orig_mtt
->ref_count
);
2774 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2775 atomic_inc(&mtt
->ref_count
);
2777 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2781 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2783 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
2789 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2790 struct mlx4_vhcr
*vhcr
,
2791 struct mlx4_cmd_mailbox
*inbox
,
2792 struct mlx4_cmd_mailbox
*outbox
,
2793 struct mlx4_cmd_info
*cmd
)
2795 int cqn
= vhcr
->in_modifier
;
2799 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
2803 if (cq
->com
.from_state
!= RES_CQ_HW
)
2806 if (vhcr
->op_modifier
== 0) {
2807 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
2811 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2813 put_res(dev
, slave
, cqn
, RES_CQ
);
2818 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
2820 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
2821 int log_rq_stride
= srqc
->logstride
& 7;
2822 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
2824 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
2827 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
2830 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2831 struct mlx4_vhcr
*vhcr
,
2832 struct mlx4_cmd_mailbox
*inbox
,
2833 struct mlx4_cmd_mailbox
*outbox
,
2834 struct mlx4_cmd_info
*cmd
)
2837 int srqn
= vhcr
->in_modifier
;
2838 struct res_mtt
*mtt
;
2839 struct res_srq
*srq
;
2840 struct mlx4_srq_context
*srqc
= inbox
->buf
;
2841 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
2843 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
2846 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
2849 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2852 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
2857 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2861 atomic_inc(&mtt
->ref_count
);
2863 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2864 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2868 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2870 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2875 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2876 struct mlx4_vhcr
*vhcr
,
2877 struct mlx4_cmd_mailbox
*inbox
,
2878 struct mlx4_cmd_mailbox
*outbox
,
2879 struct mlx4_cmd_info
*cmd
)
2882 int srqn
= vhcr
->in_modifier
;
2883 struct res_srq
*srq
;
2885 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
2888 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2891 atomic_dec(&srq
->mtt
->ref_count
);
2893 atomic_dec(&srq
->cq
->ref_count
);
2894 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
2899 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
2904 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2905 struct mlx4_vhcr
*vhcr
,
2906 struct mlx4_cmd_mailbox
*inbox
,
2907 struct mlx4_cmd_mailbox
*outbox
,
2908 struct mlx4_cmd_info
*cmd
)
2911 int srqn
= vhcr
->in_modifier
;
2912 struct res_srq
*srq
;
2914 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2917 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2921 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2923 put_res(dev
, slave
, srqn
, RES_SRQ
);
2927 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2928 struct mlx4_vhcr
*vhcr
,
2929 struct mlx4_cmd_mailbox
*inbox
,
2930 struct mlx4_cmd_mailbox
*outbox
,
2931 struct mlx4_cmd_info
*cmd
)
2934 int srqn
= vhcr
->in_modifier
;
2935 struct res_srq
*srq
;
2937 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2941 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
2946 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2948 put_res(dev
, slave
, srqn
, RES_SRQ
);
2952 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2953 struct mlx4_vhcr
*vhcr
,
2954 struct mlx4_cmd_mailbox
*inbox
,
2955 struct mlx4_cmd_mailbox
*outbox
,
2956 struct mlx4_cmd_info
*cmd
)
2959 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2962 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
2965 if (qp
->com
.from_state
!= RES_QP_HW
) {
2970 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2972 put_res(dev
, slave
, qpn
, RES_QP
);
2976 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2977 struct mlx4_vhcr
*vhcr
,
2978 struct mlx4_cmd_mailbox
*inbox
,
2979 struct mlx4_cmd_mailbox
*outbox
,
2980 struct mlx4_cmd_info
*cmd
)
2982 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
2983 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
2984 update_pkey_index(dev
, slave
, inbox
);
2985 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2988 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2989 struct mlx4_vhcr
*vhcr
,
2990 struct mlx4_cmd_mailbox
*inbox
,
2991 struct mlx4_cmd_mailbox
*outbox
,
2992 struct mlx4_cmd_info
*cmd
)
2995 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2996 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2998 u8 orig_sched_queue
;
3000 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3004 update_pkey_index(dev
, slave
, inbox
);
3005 update_gid(dev
, inbox
, (u8
)slave
);
3006 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3007 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3008 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3012 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3015 if (qp
->com
.from_state
!= RES_QP_HW
) {
3020 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3022 /* if no error, save sched queue value passed in by VF. This is
3023 * essentially the QOS value provided by the VF. This will be useful
3024 * if we allow dynamic changes from VST back to VGT
3027 qp
->sched_queue
= orig_sched_queue
;
3029 put_res(dev
, slave
, qpn
, RES_QP
);
3033 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3034 struct mlx4_vhcr
*vhcr
,
3035 struct mlx4_cmd_mailbox
*inbox
,
3036 struct mlx4_cmd_mailbox
*outbox
,
3037 struct mlx4_cmd_info
*cmd
)
3040 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3042 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3046 update_pkey_index(dev
, slave
, inbox
);
3047 update_gid(dev
, inbox
, (u8
)slave
);
3048 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3049 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3052 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3053 struct mlx4_vhcr
*vhcr
,
3054 struct mlx4_cmd_mailbox
*inbox
,
3055 struct mlx4_cmd_mailbox
*outbox
,
3056 struct mlx4_cmd_info
*cmd
)
3059 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3061 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3065 update_pkey_index(dev
, slave
, inbox
);
3066 update_gid(dev
, inbox
, (u8
)slave
);
3067 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3068 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3072 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3073 struct mlx4_vhcr
*vhcr
,
3074 struct mlx4_cmd_mailbox
*inbox
,
3075 struct mlx4_cmd_mailbox
*outbox
,
3076 struct mlx4_cmd_info
*cmd
)
3078 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3079 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3080 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3083 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3084 struct mlx4_vhcr
*vhcr
,
3085 struct mlx4_cmd_mailbox
*inbox
,
3086 struct mlx4_cmd_mailbox
*outbox
,
3087 struct mlx4_cmd_info
*cmd
)
3090 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3092 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3096 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3097 update_gid(dev
, inbox
, (u8
)slave
);
3098 update_pkey_index(dev
, slave
, inbox
);
3099 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3102 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3103 struct mlx4_vhcr
*vhcr
,
3104 struct mlx4_cmd_mailbox
*inbox
,
3105 struct mlx4_cmd_mailbox
*outbox
,
3106 struct mlx4_cmd_info
*cmd
)
3109 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3111 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3115 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3116 update_gid(dev
, inbox
, (u8
)slave
);
3117 update_pkey_index(dev
, slave
, inbox
);
3118 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3121 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3122 struct mlx4_vhcr
*vhcr
,
3123 struct mlx4_cmd_mailbox
*inbox
,
3124 struct mlx4_cmd_mailbox
*outbox
,
3125 struct mlx4_cmd_info
*cmd
)
3128 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3131 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3134 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3138 atomic_dec(&qp
->mtt
->ref_count
);
3139 atomic_dec(&qp
->rcq
->ref_count
);
3140 atomic_dec(&qp
->scq
->ref_count
);
3142 atomic_dec(&qp
->srq
->ref_count
);
3143 res_end_move(dev
, slave
, RES_QP
, qpn
);
3147 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3152 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3153 struct res_qp
*rqp
, u8
*gid
)
3155 struct res_gid
*res
;
3157 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3158 if (!memcmp(res
->gid
, gid
, 16))
3164 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3165 u8
*gid
, enum mlx4_protocol prot
,
3166 enum mlx4_steer_type steer
, u64 reg_id
)
3168 struct res_gid
*res
;
3171 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3175 spin_lock_irq(&rqp
->mcg_spl
);
3176 if (find_gid(dev
, slave
, rqp
, gid
)) {
3180 memcpy(res
->gid
, gid
, 16);
3183 res
->reg_id
= reg_id
;
3184 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3187 spin_unlock_irq(&rqp
->mcg_spl
);
3192 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3193 u8
*gid
, enum mlx4_protocol prot
,
3194 enum mlx4_steer_type steer
, u64
*reg_id
)
3196 struct res_gid
*res
;
3199 spin_lock_irq(&rqp
->mcg_spl
);
3200 res
= find_gid(dev
, slave
, rqp
, gid
);
3201 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3204 *reg_id
= res
->reg_id
;
3205 list_del(&res
->list
);
3209 spin_unlock_irq(&rqp
->mcg_spl
);
3214 static int qp_attach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
3215 int block_loopback
, enum mlx4_protocol prot
,
3216 enum mlx4_steer_type type
, u64
*reg_id
)
3218 switch (dev
->caps
.steering_mode
) {
3219 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3220 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, gid
[5],
3221 block_loopback
, prot
,
3223 case MLX4_STEERING_MODE_B0
:
3224 return mlx4_qp_attach_common(dev
, qp
, gid
,
3225 block_loopback
, prot
, type
);
3231 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
, u8 gid
[16],
3232 enum mlx4_protocol prot
, enum mlx4_steer_type type
,
3235 switch (dev
->caps
.steering_mode
) {
3236 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3237 return mlx4_flow_detach(dev
, reg_id
);
3238 case MLX4_STEERING_MODE_B0
:
3239 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
3245 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3246 struct mlx4_vhcr
*vhcr
,
3247 struct mlx4_cmd_mailbox
*inbox
,
3248 struct mlx4_cmd_mailbox
*outbox
,
3249 struct mlx4_cmd_info
*cmd
)
3251 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3252 u8
*gid
= inbox
->buf
;
3253 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
3258 int attach
= vhcr
->op_modifier
;
3259 int block_loopback
= vhcr
->in_modifier
>> 31;
3260 u8 steer_type_mask
= 2;
3261 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
3263 qpn
= vhcr
->in_modifier
& 0xffffff;
3264 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3270 err
= qp_attach(dev
, &qp
, gid
, block_loopback
, prot
,
3273 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
3276 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
3280 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
3284 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3286 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3289 put_res(dev
, slave
, qpn
, RES_QP
);
3293 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3295 put_res(dev
, slave
, qpn
, RES_QP
);
3300 * MAC validation for Flow Steering rules.
3301 * VF can attach rules only with a mac address which is assigned to it.
3303 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
3304 struct list_head
*rlist
)
3306 struct mac_res
*res
, *tmp
;
3309 /* make sure it isn't multicast or broadcast mac*/
3310 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
3311 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
3312 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3313 be_mac
= cpu_to_be64(res
->mac
<< 16);
3314 if (!memcmp(&be_mac
, eth_header
->eth
.dst_mac
, ETH_ALEN
))
3317 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3318 eth_header
->eth
.dst_mac
, slave
);
3325 * In case of missing eth header, append eth header with a MAC address
3326 * assigned to the VF.
3328 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
3329 struct mlx4_cmd_mailbox
*inbox
,
3330 struct list_head
*rlist
, int header_id
)
3332 struct mac_res
*res
, *tmp
;
3334 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3335 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
3336 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
3337 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
3339 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
3341 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3343 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
3345 /* Clear a space in the inbox for eth header */
3346 switch (header_id
) {
3347 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3349 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
3350 memmove(ip_header
, eth_header
,
3351 sizeof(*ip_header
) + sizeof(*l4_header
));
3353 case MLX4_NET_TRANS_RULE_ID_TCP
:
3354 case MLX4_NET_TRANS_RULE_ID_UDP
:
3355 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
3357 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
3362 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3363 if (port
== res
->port
) {
3364 be_mac
= cpu_to_be64(res
->mac
<< 16);
3369 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3374 memset(eth_header
, 0, sizeof(*eth_header
));
3375 eth_header
->size
= sizeof(*eth_header
) >> 2;
3376 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
3377 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
3378 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
3384 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3385 struct mlx4_vhcr
*vhcr
,
3386 struct mlx4_cmd_mailbox
*inbox
,
3387 struct mlx4_cmd_mailbox
*outbox
,
3388 struct mlx4_cmd_info
*cmd
)
3391 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3392 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3393 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
3397 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3398 struct _rule_hw
*rule_header
;
3401 if (dev
->caps
.steering_mode
!=
3402 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3405 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3406 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
3407 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3409 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn
);
3412 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
3413 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
3415 switch (header_id
) {
3416 case MLX4_NET_TRANS_RULE_ID_ETH
:
3417 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
3422 case MLX4_NET_TRANS_RULE_ID_IB
:
3424 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3425 case MLX4_NET_TRANS_RULE_ID_TCP
:
3426 case MLX4_NET_TRANS_RULE_ID_UDP
:
3427 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3428 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
3432 vhcr
->in_modifier
+=
3433 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
3436 pr_err("Corrupted mailbox.\n");
3441 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
3442 vhcr
->in_modifier
, 0,
3443 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
3448 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
3450 mlx4_err(dev
, "Fail to add flow steering resources.\n ");
3452 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
3453 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3457 atomic_inc(&rqp
->ref_count
);
3459 put_res(dev
, slave
, qpn
, RES_QP
);
3463 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3464 struct mlx4_vhcr
*vhcr
,
3465 struct mlx4_cmd_mailbox
*inbox
,
3466 struct mlx4_cmd_mailbox
*outbox
,
3467 struct mlx4_cmd_info
*cmd
)
3471 struct res_fs_rule
*rrule
;
3473 if (dev
->caps
.steering_mode
!=
3474 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3477 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
3480 /* Release the rule form busy state before removal */
3481 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
3482 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
3486 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
3488 mlx4_err(dev
, "Fail to remove flow steering resources.\n ");
3492 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
3493 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3496 atomic_dec(&rqp
->ref_count
);
3498 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
3503 BUSY_MAX_RETRIES
= 10
3506 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
3507 struct mlx4_vhcr
*vhcr
,
3508 struct mlx4_cmd_mailbox
*inbox
,
3509 struct mlx4_cmd_mailbox
*outbox
,
3510 struct mlx4_cmd_info
*cmd
)
3513 int index
= vhcr
->in_modifier
& 0xffff;
3515 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
3519 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3520 put_res(dev
, slave
, index
, RES_COUNTER
);
3524 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
3526 struct res_gid
*rgid
;
3527 struct res_gid
*tmp
;
3528 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3530 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
3531 switch (dev
->caps
.steering_mode
) {
3532 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3533 mlx4_flow_detach(dev
, rgid
->reg_id
);
3535 case MLX4_STEERING_MODE_B0
:
3536 qp
.qpn
= rqp
->local_qpn
;
3537 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
3538 rgid
->prot
, rgid
->steer
);
3541 list_del(&rgid
->list
);
3546 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
3547 enum mlx4_resource type
, int print
)
3549 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3550 struct mlx4_resource_tracker
*tracker
=
3551 &priv
->mfunc
.master
.res_tracker
;
3552 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
3553 struct res_common
*r
;
3554 struct res_common
*tmp
;
3558 spin_lock_irq(mlx4_tlock(dev
));
3559 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
3560 if (r
->owner
== slave
) {
3562 if (r
->state
== RES_ANY_BUSY
) {
3565 "%s id 0x%llx is busy\n",
3570 r
->from_state
= r
->state
;
3571 r
->state
= RES_ANY_BUSY
;
3577 spin_unlock_irq(mlx4_tlock(dev
));
3582 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
3583 enum mlx4_resource type
)
3585 unsigned long begin
;
3590 busy
= _move_all_busy(dev
, slave
, type
, 0);
3591 if (time_after(jiffies
, begin
+ 5 * HZ
))
3598 busy
= _move_all_busy(dev
, slave
, type
, 1);
3602 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
3604 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3605 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3606 struct list_head
*qp_list
=
3607 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
3615 err
= move_all_busy(dev
, slave
, RES_QP
);
3617 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy"
3618 "for slave %d\n", slave
);
3620 spin_lock_irq(mlx4_tlock(dev
));
3621 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
3622 spin_unlock_irq(mlx4_tlock(dev
));
3623 if (qp
->com
.owner
== slave
) {
3624 qpn
= qp
->com
.res_id
;
3625 detach_qp(dev
, slave
, qp
);
3626 state
= qp
->com
.from_state
;
3627 while (state
!= 0) {
3629 case RES_QP_RESERVED
:
3630 spin_lock_irq(mlx4_tlock(dev
));
3631 rb_erase(&qp
->com
.node
,
3632 &tracker
->res_tree
[RES_QP
]);
3633 list_del(&qp
->com
.list
);
3634 spin_unlock_irq(mlx4_tlock(dev
));
3639 if (!valid_reserved(dev
, slave
, qpn
))
3640 __mlx4_qp_free_icm(dev
, qpn
);
3641 state
= RES_QP_RESERVED
;
3645 err
= mlx4_cmd(dev
, in_param
,
3648 MLX4_CMD_TIME_CLASS_A
,
3651 mlx4_dbg(dev
, "rem_slave_qps: failed"
3652 " to move slave %d qpn %d to"
3655 atomic_dec(&qp
->rcq
->ref_count
);
3656 atomic_dec(&qp
->scq
->ref_count
);
3657 atomic_dec(&qp
->mtt
->ref_count
);
3659 atomic_dec(&qp
->srq
->ref_count
);
3660 state
= RES_QP_MAPPED
;
3667 spin_lock_irq(mlx4_tlock(dev
));
3669 spin_unlock_irq(mlx4_tlock(dev
));
3672 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
3674 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3675 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3676 struct list_head
*srq_list
=
3677 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
3678 struct res_srq
*srq
;
3679 struct res_srq
*tmp
;
3686 err
= move_all_busy(dev
, slave
, RES_SRQ
);
3688 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs to "
3689 "busy for slave %d\n", slave
);
3691 spin_lock_irq(mlx4_tlock(dev
));
3692 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
3693 spin_unlock_irq(mlx4_tlock(dev
));
3694 if (srq
->com
.owner
== slave
) {
3695 srqn
= srq
->com
.res_id
;
3696 state
= srq
->com
.from_state
;
3697 while (state
!= 0) {
3699 case RES_SRQ_ALLOCATED
:
3700 __mlx4_srq_free_icm(dev
, srqn
);
3701 spin_lock_irq(mlx4_tlock(dev
));
3702 rb_erase(&srq
->com
.node
,
3703 &tracker
->res_tree
[RES_SRQ
]);
3704 list_del(&srq
->com
.list
);
3705 spin_unlock_irq(mlx4_tlock(dev
));
3712 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
3714 MLX4_CMD_TIME_CLASS_A
,
3717 mlx4_dbg(dev
, "rem_slave_srqs: failed"
3718 " to move slave %d srq %d to"
3722 atomic_dec(&srq
->mtt
->ref_count
);
3724 atomic_dec(&srq
->cq
->ref_count
);
3725 state
= RES_SRQ_ALLOCATED
;
3733 spin_lock_irq(mlx4_tlock(dev
));
3735 spin_unlock_irq(mlx4_tlock(dev
));
3738 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
3740 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3741 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3742 struct list_head
*cq_list
=
3743 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
3752 err
= move_all_busy(dev
, slave
, RES_CQ
);
3754 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs to "
3755 "busy for slave %d\n", slave
);
3757 spin_lock_irq(mlx4_tlock(dev
));
3758 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
3759 spin_unlock_irq(mlx4_tlock(dev
));
3760 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
3761 cqn
= cq
->com
.res_id
;
3762 state
= cq
->com
.from_state
;
3763 while (state
!= 0) {
3765 case RES_CQ_ALLOCATED
:
3766 __mlx4_cq_free_icm(dev
, cqn
);
3767 spin_lock_irq(mlx4_tlock(dev
));
3768 rb_erase(&cq
->com
.node
,
3769 &tracker
->res_tree
[RES_CQ
]);
3770 list_del(&cq
->com
.list
);
3771 spin_unlock_irq(mlx4_tlock(dev
));
3778 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
3780 MLX4_CMD_TIME_CLASS_A
,
3783 mlx4_dbg(dev
, "rem_slave_cqs: failed"
3784 " to move slave %d cq %d to"
3787 atomic_dec(&cq
->mtt
->ref_count
);
3788 state
= RES_CQ_ALLOCATED
;
3796 spin_lock_irq(mlx4_tlock(dev
));
3798 spin_unlock_irq(mlx4_tlock(dev
));
3801 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
3803 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3804 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3805 struct list_head
*mpt_list
=
3806 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
3807 struct res_mpt
*mpt
;
3808 struct res_mpt
*tmp
;
3815 err
= move_all_busy(dev
, slave
, RES_MPT
);
3817 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts to "
3818 "busy for slave %d\n", slave
);
3820 spin_lock_irq(mlx4_tlock(dev
));
3821 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
3822 spin_unlock_irq(mlx4_tlock(dev
));
3823 if (mpt
->com
.owner
== slave
) {
3824 mptn
= mpt
->com
.res_id
;
3825 state
= mpt
->com
.from_state
;
3826 while (state
!= 0) {
3828 case RES_MPT_RESERVED
:
3829 __mlx4_mpt_release(dev
, mpt
->key
);
3830 spin_lock_irq(mlx4_tlock(dev
));
3831 rb_erase(&mpt
->com
.node
,
3832 &tracker
->res_tree
[RES_MPT
]);
3833 list_del(&mpt
->com
.list
);
3834 spin_unlock_irq(mlx4_tlock(dev
));
3839 case RES_MPT_MAPPED
:
3840 __mlx4_mpt_free_icm(dev
, mpt
->key
);
3841 state
= RES_MPT_RESERVED
;
3846 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
3848 MLX4_CMD_TIME_CLASS_A
,
3851 mlx4_dbg(dev
, "rem_slave_mrs: failed"
3852 " to move slave %d mpt %d to"
3856 atomic_dec(&mpt
->mtt
->ref_count
);
3857 state
= RES_MPT_MAPPED
;
3864 spin_lock_irq(mlx4_tlock(dev
));
3866 spin_unlock_irq(mlx4_tlock(dev
));
3869 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
3871 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3872 struct mlx4_resource_tracker
*tracker
=
3873 &priv
->mfunc
.master
.res_tracker
;
3874 struct list_head
*mtt_list
=
3875 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
3876 struct res_mtt
*mtt
;
3877 struct res_mtt
*tmp
;
3883 err
= move_all_busy(dev
, slave
, RES_MTT
);
3885 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts to "
3886 "busy for slave %d\n", slave
);
3888 spin_lock_irq(mlx4_tlock(dev
));
3889 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
3890 spin_unlock_irq(mlx4_tlock(dev
));
3891 if (mtt
->com
.owner
== slave
) {
3892 base
= mtt
->com
.res_id
;
3893 state
= mtt
->com
.from_state
;
3894 while (state
!= 0) {
3896 case RES_MTT_ALLOCATED
:
3897 __mlx4_free_mtt_range(dev
, base
,
3899 spin_lock_irq(mlx4_tlock(dev
));
3900 rb_erase(&mtt
->com
.node
,
3901 &tracker
->res_tree
[RES_MTT
]);
3902 list_del(&mtt
->com
.list
);
3903 spin_unlock_irq(mlx4_tlock(dev
));
3913 spin_lock_irq(mlx4_tlock(dev
));
3915 spin_unlock_irq(mlx4_tlock(dev
));
3918 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
3920 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3921 struct mlx4_resource_tracker
*tracker
=
3922 &priv
->mfunc
.master
.res_tracker
;
3923 struct list_head
*fs_rule_list
=
3924 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
3925 struct res_fs_rule
*fs_rule
;
3926 struct res_fs_rule
*tmp
;
3931 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
3933 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3936 spin_lock_irq(mlx4_tlock(dev
));
3937 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
3938 spin_unlock_irq(mlx4_tlock(dev
));
3939 if (fs_rule
->com
.owner
== slave
) {
3940 base
= fs_rule
->com
.res_id
;
3941 state
= fs_rule
->com
.from_state
;
3942 while (state
!= 0) {
3944 case RES_FS_RULE_ALLOCATED
:
3946 err
= mlx4_cmd(dev
, base
, 0, 0,
3947 MLX4_QP_FLOW_STEERING_DETACH
,
3948 MLX4_CMD_TIME_CLASS_A
,
3951 spin_lock_irq(mlx4_tlock(dev
));
3952 rb_erase(&fs_rule
->com
.node
,
3953 &tracker
->res_tree
[RES_FS_RULE
]);
3954 list_del(&fs_rule
->com
.list
);
3955 spin_unlock_irq(mlx4_tlock(dev
));
3965 spin_lock_irq(mlx4_tlock(dev
));
3967 spin_unlock_irq(mlx4_tlock(dev
));
3970 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
3972 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3973 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3974 struct list_head
*eq_list
=
3975 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
3982 struct mlx4_cmd_mailbox
*mailbox
;
3984 err
= move_all_busy(dev
, slave
, RES_EQ
);
3986 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs to "
3987 "busy for slave %d\n", slave
);
3989 spin_lock_irq(mlx4_tlock(dev
));
3990 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
3991 spin_unlock_irq(mlx4_tlock(dev
));
3992 if (eq
->com
.owner
== slave
) {
3993 eqn
= eq
->com
.res_id
;
3994 state
= eq
->com
.from_state
;
3995 while (state
!= 0) {
3997 case RES_EQ_RESERVED
:
3998 spin_lock_irq(mlx4_tlock(dev
));
3999 rb_erase(&eq
->com
.node
,
4000 &tracker
->res_tree
[RES_EQ
]);
4001 list_del(&eq
->com
.list
);
4002 spin_unlock_irq(mlx4_tlock(dev
));
4008 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4009 if (IS_ERR(mailbox
)) {
4013 err
= mlx4_cmd_box(dev
, slave
, 0,
4016 MLX4_CMD_TIME_CLASS_A
,
4019 mlx4_dbg(dev
, "rem_slave_eqs: failed"
4020 " to move slave %d eqs %d to"
4021 " SW ownership\n", slave
, eqn
);
4022 mlx4_free_cmd_mailbox(dev
, mailbox
);
4023 atomic_dec(&eq
->mtt
->ref_count
);
4024 state
= RES_EQ_RESERVED
;
4032 spin_lock_irq(mlx4_tlock(dev
));
4034 spin_unlock_irq(mlx4_tlock(dev
));
4037 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4039 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4040 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4041 struct list_head
*counter_list
=
4042 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4043 struct res_counter
*counter
;
4044 struct res_counter
*tmp
;
4048 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4050 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters to "
4051 "busy for slave %d\n", slave
);
4053 spin_lock_irq(mlx4_tlock(dev
));
4054 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4055 if (counter
->com
.owner
== slave
) {
4056 index
= counter
->com
.res_id
;
4057 rb_erase(&counter
->com
.node
,
4058 &tracker
->res_tree
[RES_COUNTER
]);
4059 list_del(&counter
->com
.list
);
4061 __mlx4_counter_free(dev
, index
);
4064 spin_unlock_irq(mlx4_tlock(dev
));
4067 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4069 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4070 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4071 struct list_head
*xrcdn_list
=
4072 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4073 struct res_xrcdn
*xrcd
;
4074 struct res_xrcdn
*tmp
;
4078 err
= move_all_busy(dev
, slave
, RES_XRCD
);
4080 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns to "
4081 "busy for slave %d\n", slave
);
4083 spin_lock_irq(mlx4_tlock(dev
));
4084 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
4085 if (xrcd
->com
.owner
== slave
) {
4086 xrcdn
= xrcd
->com
.res_id
;
4087 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
4088 list_del(&xrcd
->com
.list
);
4090 __mlx4_xrcd_free(dev
, xrcdn
);
4093 spin_unlock_irq(mlx4_tlock(dev
));
4096 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
4098 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4100 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4101 rem_slave_vlans(dev
, slave
);
4102 rem_slave_macs(dev
, slave
);
4103 rem_slave_fs_rule(dev
, slave
);
4104 rem_slave_qps(dev
, slave
);
4105 rem_slave_srqs(dev
, slave
);
4106 rem_slave_cqs(dev
, slave
);
4107 rem_slave_mrs(dev
, slave
);
4108 rem_slave_eqs(dev
, slave
);
4109 rem_slave_mtts(dev
, slave
);
4110 rem_slave_counters(dev
, slave
);
4111 rem_slave_xrcdns(dev
, slave
);
4112 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4115 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
4117 struct mlx4_vf_immed_vlan_work
*work
=
4118 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
4119 struct mlx4_cmd_mailbox
*mailbox
;
4120 struct mlx4_update_qp_context
*upd_context
;
4121 struct mlx4_dev
*dev
= &work
->priv
->dev
;
4122 struct mlx4_resource_tracker
*tracker
=
4123 &work
->priv
->mfunc
.master
.res_tracker
;
4124 struct list_head
*qp_list
=
4125 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
4128 u64 qp_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
4129 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
4130 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
4131 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
4132 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
4133 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
) |
4134 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
4135 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
4138 int port
, errors
= 0;
4141 if (mlx4_is_slave(dev
)) {
4142 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
4147 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4148 if (IS_ERR(mailbox
))
4150 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
4151 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4152 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
4153 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
4154 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4155 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
4156 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4157 else if (!work
->vlan_id
)
4158 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4159 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4161 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4162 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4163 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
4165 upd_context
= mailbox
->buf
;
4166 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_mask
);
4167 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
4168 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
4170 spin_lock_irq(mlx4_tlock(dev
));
4171 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4172 spin_unlock_irq(mlx4_tlock(dev
));
4173 if (qp
->com
.owner
== work
->slave
) {
4174 if (qp
->com
.from_state
!= RES_QP_HW
||
4175 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
4176 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
4177 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
4178 spin_lock_irq(mlx4_tlock(dev
));
4181 port
= (qp
->sched_queue
>> 6 & 1) + 1;
4182 if (port
!= work
->port
) {
4183 spin_lock_irq(mlx4_tlock(dev
));
4186 upd_context
->qp_context
.pri_path
.sched_queue
=
4187 qp
->sched_queue
& 0xC7;
4188 upd_context
->qp_context
.pri_path
.sched_queue
|=
4189 ((work
->qos
& 0x7) << 3);
4191 err
= mlx4_cmd(dev
, mailbox
->dma
,
4192 qp
->local_qpn
& 0xffffff,
4193 0, MLX4_CMD_UPDATE_QP
,
4194 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
4196 mlx4_info(dev
, "UPDATE_QP failed for slave %d, "
4197 "port %d, qpn %d (%d)\n",
4198 work
->slave
, port
, qp
->local_qpn
,
4203 spin_lock_irq(mlx4_tlock(dev
));
4205 spin_unlock_irq(mlx4_tlock(dev
));
4206 mlx4_free_cmd_mailbox(dev
, mailbox
);
4209 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
4210 errors
, work
->slave
, work
->port
);
4212 /* unregister previous vlan_id if needed and we had no errors
4213 * while updating the QPs
4215 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
4216 NO_INDX
!= work
->orig_vlan_ix
)
4217 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
4218 work
->orig_vlan_id
);