2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list
;
61 struct list_head list
;
69 struct list_head list
;
84 struct list_head list
;
86 enum mlx4_protocol prot
;
87 enum mlx4_steer_type steer
;
92 RES_QP_BUSY
= RES_ANY_BUSY
,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com
;
110 struct list_head mcg_list
;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states
{
126 RES_MTT_BUSY
= RES_ANY_BUSY
,
130 static inline const char *mtt_states_str(enum res_mtt_states state
)
133 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com
;
145 enum res_mpt_states
{
146 RES_MPT_BUSY
= RES_ANY_BUSY
,
153 struct res_common com
;
159 RES_EQ_BUSY
= RES_ANY_BUSY
,
165 struct res_common com
;
170 RES_CQ_BUSY
= RES_ANY_BUSY
,
176 struct res_common com
;
181 enum res_srq_states
{
182 RES_SRQ_BUSY
= RES_ANY_BUSY
,
188 struct res_common com
;
194 enum res_counter_states
{
195 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
196 RES_COUNTER_ALLOCATED
,
200 struct res_common com
;
204 enum res_xrcdn_states
{
205 RES_XRCD_BUSY
= RES_ANY_BUSY
,
210 struct res_common com
;
214 enum res_fs_rule_states
{
215 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
216 RES_FS_RULE_ALLOCATED
,
220 struct res_common com
;
224 static int mlx4_is_eth(struct mlx4_dev
*dev
, int port
)
226 return dev
->caps
.port_mask
[port
] == MLX4_PORT_TYPE_IB
? 0 : 1;
229 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
231 struct rb_node
*node
= root
->rb_node
;
234 struct res_common
*res
= container_of(node
, struct res_common
,
237 if (res_id
< res
->res_id
)
238 node
= node
->rb_left
;
239 else if (res_id
> res
->res_id
)
240 node
= node
->rb_right
;
247 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
249 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
251 /* Figure out where to put new node */
253 struct res_common
*this = container_of(*new, struct res_common
,
257 if (res
->res_id
< this->res_id
)
258 new = &((*new)->rb_left
);
259 else if (res
->res_id
> this->res_id
)
260 new = &((*new)->rb_right
);
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res
->node
, parent
, new);
267 rb_insert_color(&res
->node
, root
);
282 static const char *ResourceType(enum mlx4_resource rt
)
285 case RES_QP
: return "RES_QP";
286 case RES_CQ
: return "RES_CQ";
287 case RES_SRQ
: return "RES_SRQ";
288 case RES_MPT
: return "RES_MPT";
289 case RES_MTT
: return "RES_MTT";
290 case RES_MAC
: return "RES_MAC";
291 case RES_VLAN
: return "RES_VLAN";
292 case RES_EQ
: return "RES_EQ";
293 case RES_COUNTER
: return "RES_COUNTER";
294 case RES_FS_RULE
: return "RES_FS_RULE";
295 case RES_XRCD
: return "RES_XRCD";
296 default: return "Unknown resource type !!!";
300 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
301 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
302 enum mlx4_resource res_type
, int count
,
305 struct mlx4_priv
*priv
= mlx4_priv(dev
);
306 struct resource_allocator
*res_alloc
=
307 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
309 int allocated
, free
, reserved
, guaranteed
, from_free
;
311 if (slave
> dev
->num_vfs
)
314 spin_lock(&res_alloc
->alloc_lock
);
315 allocated
= (port
> 0) ?
316 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] :
317 res_alloc
->allocated
[slave
];
318 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
320 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
321 res_alloc
->res_reserved
;
322 guaranteed
= res_alloc
->guaranteed
[slave
];
324 if (allocated
+ count
> res_alloc
->quota
[slave
])
327 if (allocated
+ count
<= guaranteed
) {
330 /* portion may need to be obtained from free area */
331 if (guaranteed
- allocated
> 0)
332 from_free
= count
- (guaranteed
- allocated
);
336 if (free
- from_free
> reserved
)
341 /* grant the request */
343 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] += count
;
344 res_alloc
->res_port_free
[port
- 1] -= count
;
346 res_alloc
->allocated
[slave
] += count
;
347 res_alloc
->res_free
-= count
;
352 spin_unlock(&res_alloc
->alloc_lock
);
356 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
357 enum mlx4_resource res_type
, int count
,
360 struct mlx4_priv
*priv
= mlx4_priv(dev
);
361 struct resource_allocator
*res_alloc
=
362 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
364 if (slave
> dev
->num_vfs
)
367 spin_lock(&res_alloc
->alloc_lock
);
369 res_alloc
->allocated
[(port
- 1) * (dev
->num_vfs
+ 1) + slave
] -= count
;
370 res_alloc
->res_port_free
[port
- 1] += count
;
372 res_alloc
->allocated
[slave
] -= count
;
373 res_alloc
->res_free
+= count
;
376 spin_unlock(&res_alloc
->alloc_lock
);
380 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
381 struct resource_allocator
*res_alloc
,
382 enum mlx4_resource res_type
,
383 int vf
, int num_instances
)
385 res_alloc
->guaranteed
[vf
] = num_instances
/ (2 * (dev
->num_vfs
+ 1));
386 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
387 if (vf
== mlx4_master_func_num(dev
)) {
388 res_alloc
->res_free
= num_instances
;
389 if (res_type
== RES_MTT
) {
390 /* reserved mtts will be taken out of the PF allocation */
391 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
392 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
393 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
398 void mlx4_init_quotas(struct mlx4_dev
*dev
)
400 struct mlx4_priv
*priv
= mlx4_priv(dev
);
403 /* quotas for VFs are initialized in mlx4_slave_cap */
404 if (mlx4_is_slave(dev
))
407 if (!mlx4_is_mfunc(dev
)) {
408 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
409 mlx4_num_reserved_sqps(dev
);
410 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
411 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
412 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
413 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
417 pf
= mlx4_master_func_num(dev
);
419 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
421 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
423 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
425 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
427 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
429 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
431 struct mlx4_priv
*priv
= mlx4_priv(dev
);
435 priv
->mfunc
.master
.res_tracker
.slave_list
=
436 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
438 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
441 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
442 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
443 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
444 slave_list
[i
].res_list
[t
]);
445 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
448 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
450 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
451 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
453 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
454 struct resource_allocator
*res_alloc
=
455 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
456 res_alloc
->quota
= kmalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
457 res_alloc
->guaranteed
= kmalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
458 if (i
== RES_MAC
|| i
== RES_VLAN
)
459 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
460 (dev
->num_vfs
+ 1) * sizeof(int),
463 res_alloc
->allocated
= kzalloc((dev
->num_vfs
+ 1) * sizeof(int), GFP_KERNEL
);
465 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
466 !res_alloc
->allocated
)
469 spin_lock_init(&res_alloc
->alloc_lock
);
470 for (t
= 0; t
< dev
->num_vfs
+ 1; t
++) {
471 struct mlx4_active_ports actv_ports
=
472 mlx4_get_active_ports(dev
, t
);
475 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
476 t
, dev
->caps
.num_qps
-
477 dev
->caps
.reserved_qps
-
478 mlx4_num_reserved_sqps(dev
));
481 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
482 t
, dev
->caps
.num_cqs
-
483 dev
->caps
.reserved_cqs
);
486 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
487 t
, dev
->caps
.num_srqs
-
488 dev
->caps
.reserved_srqs
);
491 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
492 t
, dev
->caps
.num_mpts
-
493 dev
->caps
.reserved_mrws
);
496 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
497 t
, dev
->caps
.num_mtts
-
498 dev
->caps
.reserved_mtts
);
501 if (t
== mlx4_master_func_num(dev
)) {
502 int max_vfs_pport
= 0;
503 /* Calculate the max vfs per port for */
505 for (j
= 0; j
< dev
->caps
.num_ports
;
507 struct mlx4_slaves_pport slaves_pport
=
508 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
509 unsigned current_slaves
=
510 bitmap_weight(slaves_pport
.slaves
,
511 dev
->caps
.num_ports
) - 1;
512 if (max_vfs_pport
< current_slaves
)
516 res_alloc
->quota
[t
] =
519 res_alloc
->guaranteed
[t
] = 2;
520 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
521 res_alloc
->res_port_free
[j
] =
524 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
525 res_alloc
->guaranteed
[t
] = 2;
529 if (t
== mlx4_master_func_num(dev
)) {
530 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
531 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
532 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
533 res_alloc
->res_port_free
[j
] =
536 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
537 res_alloc
->guaranteed
[t
] = 0;
541 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
542 res_alloc
->guaranteed
[t
] = 0;
543 if (t
== mlx4_master_func_num(dev
))
544 res_alloc
->res_free
= res_alloc
->quota
[t
];
549 if (i
== RES_MAC
|| i
== RES_VLAN
) {
550 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
551 if (test_bit(j
, actv_ports
.ports
))
552 res_alloc
->res_port_rsvd
[j
] +=
553 res_alloc
->guaranteed
[t
];
555 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
559 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
563 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
564 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
565 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
566 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
567 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
568 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
569 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
574 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
575 enum mlx4_res_tracker_free_type type
)
577 struct mlx4_priv
*priv
= mlx4_priv(dev
);
580 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
581 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
582 for (i
= 0; i
< dev
->num_slaves
; i
++) {
583 if (type
== RES_TR_FREE_ALL
||
584 dev
->caps
.function
!= i
)
585 mlx4_delete_all_resources_for_slave(dev
, i
);
587 /* free master's vlans */
588 i
= dev
->caps
.function
;
589 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
590 rem_slave_vlans(dev
, i
);
591 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
594 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
595 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
596 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
597 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
598 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
599 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
600 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
601 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
603 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
604 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
609 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
610 struct mlx4_cmd_mailbox
*inbox
)
612 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
613 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
615 struct mlx4_priv
*priv
= mlx4_priv(dev
);
618 port
= (sched
>> 6 & 1) + 1;
620 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
621 *(u8
*)(inbox
->buf
+ 35) = new_index
;
624 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
627 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
628 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
629 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
632 if (MLX4_QP_ST_UD
== ts
) {
633 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
634 if (mlx4_is_eth(dev
, port
))
635 qp_ctx
->pri_path
.mgid_index
=
636 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
638 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
640 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
641 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
642 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
643 if (mlx4_is_eth(dev
, port
)) {
644 qp_ctx
->pri_path
.mgid_index
+=
645 mlx4_get_base_gid_ix(dev
, slave
, port
);
646 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
648 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
651 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
652 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
653 if (mlx4_is_eth(dev
, port
)) {
654 qp_ctx
->alt_path
.mgid_index
+=
655 mlx4_get_base_gid_ix(dev
, slave
, port
);
656 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
658 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
664 static int update_vport_qp_param(struct mlx4_dev
*dev
,
665 struct mlx4_cmd_mailbox
*inbox
,
668 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
669 struct mlx4_vport_oper_state
*vp_oper
;
670 struct mlx4_priv
*priv
;
673 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
674 priv
= mlx4_priv(dev
);
675 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
677 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
678 /* the reserved QPs (special, proxy, tunnel)
679 * do not operate over vlans
681 if (mlx4_is_qp_reserved(dev
, qpn
))
684 /* force strip vlan by clear vsd */
685 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
687 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
688 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
689 qpc
->pri_path
.vlan_control
=
690 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
691 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
692 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
693 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
694 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
695 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
696 } else if (0 != vp_oper
->state
.default_vlan
) {
697 qpc
->pri_path
.vlan_control
=
698 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
699 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
700 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
701 } else { /* priority tagged */
702 qpc
->pri_path
.vlan_control
=
703 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
704 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
707 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
708 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
709 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
710 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
711 qpc
->pri_path
.sched_queue
&= 0xC7;
712 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
714 if (vp_oper
->state
.spoofchk
) {
715 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
716 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
721 static int mpt_mask(struct mlx4_dev
*dev
)
723 return dev
->caps
.num_mpts
- 1;
726 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
727 enum mlx4_resource type
)
729 struct mlx4_priv
*priv
= mlx4_priv(dev
);
731 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
735 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
736 enum mlx4_resource type
,
739 struct res_common
*r
;
742 spin_lock_irq(mlx4_tlock(dev
));
743 r
= find_res(dev
, res_id
, type
);
749 if (r
->state
== RES_ANY_BUSY
) {
754 if (r
->owner
!= slave
) {
759 r
->from_state
= r
->state
;
760 r
->state
= RES_ANY_BUSY
;
763 *((struct res_common
**)res
) = r
;
766 spin_unlock_irq(mlx4_tlock(dev
));
770 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
771 enum mlx4_resource type
,
772 u64 res_id
, int *slave
)
775 struct res_common
*r
;
781 spin_lock(mlx4_tlock(dev
));
783 r
= find_res(dev
, id
, type
);
788 spin_unlock(mlx4_tlock(dev
));
793 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
794 enum mlx4_resource type
)
796 struct res_common
*r
;
798 spin_lock_irq(mlx4_tlock(dev
));
799 r
= find_res(dev
, res_id
, type
);
801 r
->state
= r
->from_state
;
802 spin_unlock_irq(mlx4_tlock(dev
));
805 static struct res_common
*alloc_qp_tr(int id
)
809 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
813 ret
->com
.res_id
= id
;
814 ret
->com
.state
= RES_QP_RESERVED
;
816 INIT_LIST_HEAD(&ret
->mcg_list
);
817 spin_lock_init(&ret
->mcg_spl
);
818 atomic_set(&ret
->ref_count
, 0);
823 static struct res_common
*alloc_mtt_tr(int id
, int order
)
827 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
831 ret
->com
.res_id
= id
;
833 ret
->com
.state
= RES_MTT_ALLOCATED
;
834 atomic_set(&ret
->ref_count
, 0);
839 static struct res_common
*alloc_mpt_tr(int id
, int key
)
843 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
847 ret
->com
.res_id
= id
;
848 ret
->com
.state
= RES_MPT_RESERVED
;
854 static struct res_common
*alloc_eq_tr(int id
)
858 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
862 ret
->com
.res_id
= id
;
863 ret
->com
.state
= RES_EQ_RESERVED
;
868 static struct res_common
*alloc_cq_tr(int id
)
872 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
876 ret
->com
.res_id
= id
;
877 ret
->com
.state
= RES_CQ_ALLOCATED
;
878 atomic_set(&ret
->ref_count
, 0);
883 static struct res_common
*alloc_srq_tr(int id
)
887 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
891 ret
->com
.res_id
= id
;
892 ret
->com
.state
= RES_SRQ_ALLOCATED
;
893 atomic_set(&ret
->ref_count
, 0);
898 static struct res_common
*alloc_counter_tr(int id
)
900 struct res_counter
*ret
;
902 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
906 ret
->com
.res_id
= id
;
907 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
912 static struct res_common
*alloc_xrcdn_tr(int id
)
914 struct res_xrcdn
*ret
;
916 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
920 ret
->com
.res_id
= id
;
921 ret
->com
.state
= RES_XRCD_ALLOCATED
;
926 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
928 struct res_fs_rule
*ret
;
930 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
934 ret
->com
.res_id
= id
;
935 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
940 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
943 struct res_common
*ret
;
947 ret
= alloc_qp_tr(id
);
950 ret
= alloc_mpt_tr(id
, extra
);
953 ret
= alloc_mtt_tr(id
, extra
);
956 ret
= alloc_eq_tr(id
);
959 ret
= alloc_cq_tr(id
);
962 ret
= alloc_srq_tr(id
);
965 printk(KERN_ERR
"implementation missing\n");
968 ret
= alloc_counter_tr(id
);
971 ret
= alloc_xrcdn_tr(id
);
974 ret
= alloc_fs_rule_tr(id
, extra
);
985 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
986 enum mlx4_resource type
, int extra
)
990 struct mlx4_priv
*priv
= mlx4_priv(dev
);
991 struct res_common
**res_arr
;
992 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
993 struct rb_root
*root
= &tracker
->res_tree
[type
];
995 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
999 for (i
= 0; i
< count
; ++i
) {
1000 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1002 for (--i
; i
>= 0; --i
)
1010 spin_lock_irq(mlx4_tlock(dev
));
1011 for (i
= 0; i
< count
; ++i
) {
1012 if (find_res(dev
, base
+ i
, type
)) {
1016 err
= res_tracker_insert(root
, res_arr
[i
]);
1019 list_add_tail(&res_arr
[i
]->list
,
1020 &tracker
->slave_list
[slave
].res_list
[type
]);
1022 spin_unlock_irq(mlx4_tlock(dev
));
1028 for (--i
; i
>= base
; --i
)
1029 rb_erase(&res_arr
[i
]->node
, root
);
1031 spin_unlock_irq(mlx4_tlock(dev
));
1033 for (i
= 0; i
< count
; ++i
)
1041 static int remove_qp_ok(struct res_qp
*res
)
1043 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1044 !list_empty(&res
->mcg_list
)) {
1045 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1046 res
->com
.state
, atomic_read(&res
->ref_count
));
1048 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1055 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1057 if (res
->com
.state
== RES_MTT_BUSY
||
1058 atomic_read(&res
->ref_count
)) {
1059 printk(KERN_DEBUG
"%s-%d: state %s, ref_count %d\n",
1061 mtt_states_str(res
->com
.state
),
1062 atomic_read(&res
->ref_count
));
1064 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1066 else if (res
->order
!= order
)
1072 static int remove_mpt_ok(struct res_mpt
*res
)
1074 if (res
->com
.state
== RES_MPT_BUSY
)
1076 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1082 static int remove_eq_ok(struct res_eq
*res
)
1084 if (res
->com
.state
== RES_MPT_BUSY
)
1086 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1092 static int remove_counter_ok(struct res_counter
*res
)
1094 if (res
->com
.state
== RES_COUNTER_BUSY
)
1096 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1102 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1104 if (res
->com
.state
== RES_XRCD_BUSY
)
1106 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1112 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1114 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1116 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1122 static int remove_cq_ok(struct res_cq
*res
)
1124 if (res
->com
.state
== RES_CQ_BUSY
)
1126 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1132 static int remove_srq_ok(struct res_srq
*res
)
1134 if (res
->com
.state
== RES_SRQ_BUSY
)
1136 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1142 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1146 return remove_qp_ok((struct res_qp
*)res
);
1148 return remove_cq_ok((struct res_cq
*)res
);
1150 return remove_srq_ok((struct res_srq
*)res
);
1152 return remove_mpt_ok((struct res_mpt
*)res
);
1154 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1158 return remove_eq_ok((struct res_eq
*)res
);
1160 return remove_counter_ok((struct res_counter
*)res
);
1162 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1164 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1170 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1171 enum mlx4_resource type
, int extra
)
1175 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1176 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1177 struct res_common
*r
;
1179 spin_lock_irq(mlx4_tlock(dev
));
1180 for (i
= base
; i
< base
+ count
; ++i
) {
1181 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1186 if (r
->owner
!= slave
) {
1190 err
= remove_ok(r
, type
, extra
);
1195 for (i
= base
; i
< base
+ count
; ++i
) {
1196 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1197 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1204 spin_unlock_irq(mlx4_tlock(dev
));
1209 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1210 enum res_qp_states state
, struct res_qp
**qp
,
1213 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1214 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1218 spin_lock_irq(mlx4_tlock(dev
));
1219 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1222 else if (r
->com
.owner
!= slave
)
1227 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1228 __func__
, r
->com
.res_id
);
1232 case RES_QP_RESERVED
:
1233 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1236 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1241 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1242 r
->com
.state
== RES_QP_HW
)
1245 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1253 if (r
->com
.state
!= RES_QP_MAPPED
)
1261 r
->com
.from_state
= r
->com
.state
;
1262 r
->com
.to_state
= state
;
1263 r
->com
.state
= RES_QP_BUSY
;
1269 spin_unlock_irq(mlx4_tlock(dev
));
1274 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1275 enum res_mpt_states state
, struct res_mpt
**mpt
)
1277 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1278 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1282 spin_lock_irq(mlx4_tlock(dev
));
1283 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1286 else if (r
->com
.owner
!= slave
)
1294 case RES_MPT_RESERVED
:
1295 if (r
->com
.state
!= RES_MPT_MAPPED
)
1299 case RES_MPT_MAPPED
:
1300 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1301 r
->com
.state
!= RES_MPT_HW
)
1306 if (r
->com
.state
!= RES_MPT_MAPPED
)
1314 r
->com
.from_state
= r
->com
.state
;
1315 r
->com
.to_state
= state
;
1316 r
->com
.state
= RES_MPT_BUSY
;
1322 spin_unlock_irq(mlx4_tlock(dev
));
1327 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1328 enum res_eq_states state
, struct res_eq
**eq
)
1330 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1331 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1335 spin_lock_irq(mlx4_tlock(dev
));
1336 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1339 else if (r
->com
.owner
!= slave
)
1347 case RES_EQ_RESERVED
:
1348 if (r
->com
.state
!= RES_EQ_HW
)
1353 if (r
->com
.state
!= RES_EQ_RESERVED
)
1362 r
->com
.from_state
= r
->com
.state
;
1363 r
->com
.to_state
= state
;
1364 r
->com
.state
= RES_EQ_BUSY
;
1370 spin_unlock_irq(mlx4_tlock(dev
));
1375 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1376 enum res_cq_states state
, struct res_cq
**cq
)
1378 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1379 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1383 spin_lock_irq(mlx4_tlock(dev
));
1384 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1387 } else if (r
->com
.owner
!= slave
) {
1389 } else if (state
== RES_CQ_ALLOCATED
) {
1390 if (r
->com
.state
!= RES_CQ_HW
)
1392 else if (atomic_read(&r
->ref_count
))
1396 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1403 r
->com
.from_state
= r
->com
.state
;
1404 r
->com
.to_state
= state
;
1405 r
->com
.state
= RES_CQ_BUSY
;
1410 spin_unlock_irq(mlx4_tlock(dev
));
1415 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1416 enum res_srq_states state
, struct res_srq
**srq
)
1418 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1419 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1423 spin_lock_irq(mlx4_tlock(dev
));
1424 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1427 } else if (r
->com
.owner
!= slave
) {
1429 } else if (state
== RES_SRQ_ALLOCATED
) {
1430 if (r
->com
.state
!= RES_SRQ_HW
)
1432 else if (atomic_read(&r
->ref_count
))
1434 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1439 r
->com
.from_state
= r
->com
.state
;
1440 r
->com
.to_state
= state
;
1441 r
->com
.state
= RES_SRQ_BUSY
;
1446 spin_unlock_irq(mlx4_tlock(dev
));
1451 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1452 enum mlx4_resource type
, int id
)
1454 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1455 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1456 struct res_common
*r
;
1458 spin_lock_irq(mlx4_tlock(dev
));
1459 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1460 if (r
&& (r
->owner
== slave
))
1461 r
->state
= r
->from_state
;
1462 spin_unlock_irq(mlx4_tlock(dev
));
1465 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1466 enum mlx4_resource type
, int id
)
1468 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1469 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1470 struct res_common
*r
;
1472 spin_lock_irq(mlx4_tlock(dev
));
1473 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1474 if (r
&& (r
->owner
== slave
))
1475 r
->state
= r
->to_state
;
1476 spin_unlock_irq(mlx4_tlock(dev
));
1479 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1481 return mlx4_is_qp_reserved(dev
, qpn
) &&
1482 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1485 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1487 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1490 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1491 u64 in_param
, u64
*out_param
)
1500 case RES_OP_RESERVE
:
1501 count
= get_param_l(&in_param
);
1502 align
= get_param_h(&in_param
);
1503 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1507 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
);
1509 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1513 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1515 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1516 __mlx4_qp_release_range(dev
, base
, count
);
1519 set_param_l(out_param
, base
);
1521 case RES_OP_MAP_ICM
:
1522 qpn
= get_param_l(&in_param
) & 0x7fffff;
1523 if (valid_reserved(dev
, slave
, qpn
)) {
1524 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1529 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1534 if (!fw_reserved(dev
, qpn
)) {
1535 err
= __mlx4_qp_alloc_icm(dev
, qpn
);
1537 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1542 res_end_move(dev
, slave
, RES_QP
, qpn
);
1552 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1553 u64 in_param
, u64
*out_param
)
1559 if (op
!= RES_OP_RESERVE_AND_MAP
)
1562 order
= get_param_l(&in_param
);
1564 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1568 base
= __mlx4_alloc_mtt_range(dev
, order
);
1570 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1574 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1576 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1577 __mlx4_free_mtt_range(dev
, base
, order
);
1579 set_param_l(out_param
, base
);
1585 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1586 u64 in_param
, u64
*out_param
)
1591 struct res_mpt
*mpt
;
1594 case RES_OP_RESERVE
:
1595 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1599 index
= __mlx4_mpt_reserve(dev
);
1601 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1604 id
= index
& mpt_mask(dev
);
1606 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1608 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1609 __mlx4_mpt_release(dev
, index
);
1612 set_param_l(out_param
, index
);
1614 case RES_OP_MAP_ICM
:
1615 index
= get_param_l(&in_param
);
1616 id
= index
& mpt_mask(dev
);
1617 err
= mr_res_start_move_to(dev
, slave
, id
,
1618 RES_MPT_MAPPED
, &mpt
);
1622 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
);
1624 res_abort_move(dev
, slave
, RES_MPT
, id
);
1628 res_end_move(dev
, slave
, RES_MPT
, id
);
1634 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1635 u64 in_param
, u64
*out_param
)
1641 case RES_OP_RESERVE_AND_MAP
:
1642 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1646 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1648 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1652 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1654 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1655 __mlx4_cq_free_icm(dev
, cqn
);
1659 set_param_l(out_param
, cqn
);
1669 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1670 u64 in_param
, u64
*out_param
)
1676 case RES_OP_RESERVE_AND_MAP
:
1677 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1681 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1683 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1687 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1689 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1690 __mlx4_srq_free_icm(dev
, srqn
);
1694 set_param_l(out_param
, srqn
);
1704 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1705 u8 smac_index
, u64
*mac
)
1707 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1708 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1709 struct list_head
*mac_list
=
1710 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1711 struct mac_res
*res
, *tmp
;
1713 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1714 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
1722 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
1724 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1725 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1726 struct list_head
*mac_list
=
1727 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1728 struct mac_res
*res
, *tmp
;
1730 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1731 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1732 /* mac found. update ref count */
1738 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1740 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1742 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1746 res
->port
= (u8
) port
;
1747 res
->smac_index
= smac_index
;
1749 list_add_tail(&res
->list
,
1750 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1754 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1757 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1758 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1759 struct list_head
*mac_list
=
1760 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1761 struct mac_res
*res
, *tmp
;
1763 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1764 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1765 if (!--res
->ref_count
) {
1766 list_del(&res
->list
);
1767 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1775 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1777 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1778 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1779 struct list_head
*mac_list
=
1780 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1781 struct mac_res
*res
, *tmp
;
1784 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1785 list_del(&res
->list
);
1786 /* dereference the mac the num times the slave referenced it */
1787 for (i
= 0; i
< res
->ref_count
; i
++)
1788 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1789 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
1794 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1795 u64 in_param
, u64
*out_param
, int in_port
)
1802 if (op
!= RES_OP_RESERVE_AND_MAP
)
1805 port
= !in_port
? get_param_l(out_param
) : in_port
;
1806 port
= mlx4_slave_convert_port(
1813 err
= __mlx4_register_mac(dev
, port
, mac
);
1816 set_param_l(out_param
, err
);
1821 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
1823 __mlx4_unregister_mac(dev
, port
, mac
);
1828 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1829 int port
, int vlan_index
)
1831 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1832 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1833 struct list_head
*vlan_list
=
1834 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1835 struct vlan_res
*res
, *tmp
;
1837 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1838 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1839 /* vlan found. update ref count */
1845 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
1847 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
1849 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
1853 res
->port
= (u8
) port
;
1854 res
->vlan_index
= vlan_index
;
1856 list_add_tail(&res
->list
,
1857 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
1862 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1865 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1866 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1867 struct list_head
*vlan_list
=
1868 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1869 struct vlan_res
*res
, *tmp
;
1871 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1872 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1873 if (!--res
->ref_count
) {
1874 list_del(&res
->list
);
1875 mlx4_release_resource(dev
, slave
, RES_VLAN
,
1884 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
1886 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1887 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1888 struct list_head
*vlan_list
=
1889 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1890 struct vlan_res
*res
, *tmp
;
1893 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1894 list_del(&res
->list
);
1895 /* dereference the vlan the num times the slave referenced it */
1896 for (i
= 0; i
< res
->ref_count
; i
++)
1897 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
1898 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
1903 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1904 u64 in_param
, u64
*out_param
, int in_port
)
1906 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1907 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1913 port
= !in_port
? get_param_l(out_param
) : in_port
;
1915 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
1918 port
= mlx4_slave_convert_port(
1923 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1924 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
1925 slave_state
[slave
].old_vlan_api
= true;
1929 vlan
= (u16
) in_param
;
1931 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
1933 set_param_l(out_param
, (u32
) vlan_index
);
1934 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
1936 __mlx4_unregister_vlan(dev
, port
, vlan
);
1941 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1942 u64 in_param
, u64
*out_param
)
1947 if (op
!= RES_OP_RESERVE
)
1950 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1954 err
= __mlx4_counter_alloc(dev
, &index
);
1956 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1960 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
1962 __mlx4_counter_free(dev
, index
);
1963 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
1965 set_param_l(out_param
, index
);
1971 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1972 u64 in_param
, u64
*out_param
)
1977 if (op
!= RES_OP_RESERVE
)
1980 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
1984 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
1986 __mlx4_xrcd_free(dev
, xrcdn
);
1988 set_param_l(out_param
, xrcdn
);
1993 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
1994 struct mlx4_vhcr
*vhcr
,
1995 struct mlx4_cmd_mailbox
*inbox
,
1996 struct mlx4_cmd_mailbox
*outbox
,
1997 struct mlx4_cmd_info
*cmd
)
2000 int alop
= vhcr
->op_modifier
;
2002 switch (vhcr
->in_modifier
& 0xFF) {
2004 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2005 vhcr
->in_param
, &vhcr
->out_param
);
2009 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2010 vhcr
->in_param
, &vhcr
->out_param
);
2014 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2015 vhcr
->in_param
, &vhcr
->out_param
);
2019 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2020 vhcr
->in_param
, &vhcr
->out_param
);
2024 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2025 vhcr
->in_param
, &vhcr
->out_param
);
2029 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2030 vhcr
->in_param
, &vhcr
->out_param
,
2031 (vhcr
->in_modifier
>> 8) & 0xFF);
2035 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2036 vhcr
->in_param
, &vhcr
->out_param
,
2037 (vhcr
->in_modifier
>> 8) & 0xFF);
2041 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2042 vhcr
->in_param
, &vhcr
->out_param
);
2046 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2047 vhcr
->in_param
, &vhcr
->out_param
);
2058 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2067 case RES_OP_RESERVE
:
2068 base
= get_param_l(&in_param
) & 0x7fffff;
2069 count
= get_param_h(&in_param
);
2070 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2073 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2074 __mlx4_qp_release_range(dev
, base
, count
);
2076 case RES_OP_MAP_ICM
:
2077 qpn
= get_param_l(&in_param
) & 0x7fffff;
2078 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2083 if (!fw_reserved(dev
, qpn
))
2084 __mlx4_qp_free_icm(dev
, qpn
);
2086 res_end_move(dev
, slave
, RES_QP
, qpn
);
2088 if (valid_reserved(dev
, slave
, qpn
))
2089 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2098 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2099 u64 in_param
, u64
*out_param
)
2105 if (op
!= RES_OP_RESERVE_AND_MAP
)
2108 base
= get_param_l(&in_param
);
2109 order
= get_param_h(&in_param
);
2110 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2112 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2113 __mlx4_free_mtt_range(dev
, base
, order
);
2118 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2124 struct res_mpt
*mpt
;
2127 case RES_OP_RESERVE
:
2128 index
= get_param_l(&in_param
);
2129 id
= index
& mpt_mask(dev
);
2130 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2134 put_res(dev
, slave
, id
, RES_MPT
);
2136 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2139 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2140 __mlx4_mpt_release(dev
, index
);
2142 case RES_OP_MAP_ICM
:
2143 index
= get_param_l(&in_param
);
2144 id
= index
& mpt_mask(dev
);
2145 err
= mr_res_start_move_to(dev
, slave
, id
,
2146 RES_MPT_RESERVED
, &mpt
);
2150 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2151 res_end_move(dev
, slave
, RES_MPT
, id
);
2161 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2162 u64 in_param
, u64
*out_param
)
2168 case RES_OP_RESERVE_AND_MAP
:
2169 cqn
= get_param_l(&in_param
);
2170 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2174 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2175 __mlx4_cq_free_icm(dev
, cqn
);
2186 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2187 u64 in_param
, u64
*out_param
)
2193 case RES_OP_RESERVE_AND_MAP
:
2194 srqn
= get_param_l(&in_param
);
2195 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2199 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2200 __mlx4_srq_free_icm(dev
, srqn
);
2211 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2212 u64 in_param
, u64
*out_param
, int in_port
)
2218 case RES_OP_RESERVE_AND_MAP
:
2219 port
= !in_port
? get_param_l(out_param
) : in_port
;
2220 port
= mlx4_slave_convert_port(
2225 mac_del_from_slave(dev
, slave
, in_param
, port
);
2226 __mlx4_unregister_mac(dev
, port
, in_param
);
2237 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2238 u64 in_param
, u64
*out_param
, int port
)
2240 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2241 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2244 port
= mlx4_slave_convert_port(
2250 case RES_OP_RESERVE_AND_MAP
:
2251 if (slave_state
[slave
].old_vlan_api
)
2255 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2256 __mlx4_unregister_vlan(dev
, port
, in_param
);
2266 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2267 u64 in_param
, u64
*out_param
)
2272 if (op
!= RES_OP_RESERVE
)
2275 index
= get_param_l(&in_param
);
2276 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2280 __mlx4_counter_free(dev
, index
);
2281 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2286 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2287 u64 in_param
, u64
*out_param
)
2292 if (op
!= RES_OP_RESERVE
)
2295 xrcdn
= get_param_l(&in_param
);
2296 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2300 __mlx4_xrcd_free(dev
, xrcdn
);
2305 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2306 struct mlx4_vhcr
*vhcr
,
2307 struct mlx4_cmd_mailbox
*inbox
,
2308 struct mlx4_cmd_mailbox
*outbox
,
2309 struct mlx4_cmd_info
*cmd
)
2312 int alop
= vhcr
->op_modifier
;
2314 switch (vhcr
->in_modifier
& 0xFF) {
2316 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2321 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2322 vhcr
->in_param
, &vhcr
->out_param
);
2326 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2331 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2332 vhcr
->in_param
, &vhcr
->out_param
);
2336 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2337 vhcr
->in_param
, &vhcr
->out_param
);
2341 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2342 vhcr
->in_param
, &vhcr
->out_param
,
2343 (vhcr
->in_modifier
>> 8) & 0xFF);
2347 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2348 vhcr
->in_param
, &vhcr
->out_param
,
2349 (vhcr
->in_modifier
>> 8) & 0xFF);
2353 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2354 vhcr
->in_param
, &vhcr
->out_param
);
2358 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2359 vhcr
->in_param
, &vhcr
->out_param
);
2367 /* ugly but other choices are uglier */
2368 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2370 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2373 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2375 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2378 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2380 return be32_to_cpu(mpt
->mtt_sz
);
2383 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2385 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2388 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2390 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2393 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2395 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2398 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2400 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2403 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2405 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2408 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2410 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2413 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2415 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2416 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2417 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2418 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2419 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2420 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2421 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2422 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2423 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2428 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2430 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2431 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2432 total_mem
= sq_size
+ rq_size
;
2434 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2440 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2441 int size
, struct res_mtt
*mtt
)
2443 int res_start
= mtt
->com
.res_id
;
2444 int res_size
= (1 << mtt
->order
);
2446 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2451 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2452 struct mlx4_vhcr
*vhcr
,
2453 struct mlx4_cmd_mailbox
*inbox
,
2454 struct mlx4_cmd_mailbox
*outbox
,
2455 struct mlx4_cmd_info
*cmd
)
2458 int index
= vhcr
->in_modifier
;
2459 struct res_mtt
*mtt
;
2460 struct res_mpt
*mpt
;
2461 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2467 id
= index
& mpt_mask(dev
);
2468 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2472 /* Disable memory windows for VFs. */
2473 if (!mr_is_region(inbox
->buf
)) {
2478 /* Make sure that the PD bits related to the slave id are zeros. */
2479 pd
= mr_get_pd(inbox
->buf
);
2480 pd_slave
= (pd
>> 17) & 0x7f;
2481 if (pd_slave
!= 0 && pd_slave
!= slave
) {
2486 if (mr_is_fmr(inbox
->buf
)) {
2487 /* FMR and Bind Enable are forbidden in slave devices. */
2488 if (mr_is_bind_enabled(inbox
->buf
)) {
2492 /* FMR and Memory Windows are also forbidden. */
2493 if (!mr_is_region(inbox
->buf
)) {
2499 phys
= mr_phys_mpt(inbox
->buf
);
2501 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2505 err
= check_mtt_range(dev
, slave
, mtt_base
,
2506 mr_get_mtt_size(inbox
->buf
), mtt
);
2513 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2518 atomic_inc(&mtt
->ref_count
);
2519 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2522 res_end_move(dev
, slave
, RES_MPT
, id
);
2527 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2529 res_abort_move(dev
, slave
, RES_MPT
, id
);
2534 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2535 struct mlx4_vhcr
*vhcr
,
2536 struct mlx4_cmd_mailbox
*inbox
,
2537 struct mlx4_cmd_mailbox
*outbox
,
2538 struct mlx4_cmd_info
*cmd
)
2541 int index
= vhcr
->in_modifier
;
2542 struct res_mpt
*mpt
;
2545 id
= index
& mpt_mask(dev
);
2546 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2550 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2555 atomic_dec(&mpt
->mtt
->ref_count
);
2557 res_end_move(dev
, slave
, RES_MPT
, id
);
2561 res_abort_move(dev
, slave
, RES_MPT
, id
);
2566 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2567 struct mlx4_vhcr
*vhcr
,
2568 struct mlx4_cmd_mailbox
*inbox
,
2569 struct mlx4_cmd_mailbox
*outbox
,
2570 struct mlx4_cmd_info
*cmd
)
2573 int index
= vhcr
->in_modifier
;
2574 struct res_mpt
*mpt
;
2577 id
= index
& mpt_mask(dev
);
2578 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2582 if (mpt
->com
.from_state
!= RES_MPT_HW
) {
2587 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2590 put_res(dev
, slave
, id
, RES_MPT
);
2594 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2596 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2599 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2601 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2604 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2606 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2609 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2610 struct mlx4_qp_context
*context
)
2612 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2615 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2618 /* adjust qkey in qp context */
2619 context
->qkey
= cpu_to_be32(qkey
);
2622 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2623 struct mlx4_vhcr
*vhcr
,
2624 struct mlx4_cmd_mailbox
*inbox
,
2625 struct mlx4_cmd_mailbox
*outbox
,
2626 struct mlx4_cmd_info
*cmd
)
2629 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2630 struct res_mtt
*mtt
;
2632 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2633 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2634 int mtt_size
= qp_get_mtt_size(qpc
);
2637 int rcqn
= qp_get_rcqn(qpc
);
2638 int scqn
= qp_get_scqn(qpc
);
2639 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2640 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2641 struct res_srq
*srq
;
2642 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2644 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2647 qp
->local_qpn
= local_qpn
;
2648 qp
->sched_queue
= 0;
2650 qp
->vlan_control
= 0;
2652 qp
->pri_path_fl
= 0;
2655 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2657 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2661 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2665 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2670 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2677 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2682 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2683 update_pkey_index(dev
, slave
, inbox
);
2684 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2687 atomic_inc(&mtt
->ref_count
);
2689 atomic_inc(&rcq
->ref_count
);
2691 atomic_inc(&scq
->ref_count
);
2695 put_res(dev
, slave
, scqn
, RES_CQ
);
2698 atomic_inc(&srq
->ref_count
);
2699 put_res(dev
, slave
, srqn
, RES_SRQ
);
2702 put_res(dev
, slave
, rcqn
, RES_CQ
);
2703 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2704 res_end_move(dev
, slave
, RES_QP
, qpn
);
2710 put_res(dev
, slave
, srqn
, RES_SRQ
);
2713 put_res(dev
, slave
, scqn
, RES_CQ
);
2715 put_res(dev
, slave
, rcqn
, RES_CQ
);
2717 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2719 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2724 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2726 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2729 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2731 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2732 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2734 if (log_eq_size
+ 5 < page_shift
)
2737 return 1 << (log_eq_size
+ 5 - page_shift
);
2740 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2742 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2745 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
2747 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
2748 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
2750 if (log_cq_size
+ 5 < page_shift
)
2753 return 1 << (log_cq_size
+ 5 - page_shift
);
2756 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2757 struct mlx4_vhcr
*vhcr
,
2758 struct mlx4_cmd_mailbox
*inbox
,
2759 struct mlx4_cmd_mailbox
*outbox
,
2760 struct mlx4_cmd_info
*cmd
)
2763 int eqn
= vhcr
->in_modifier
;
2764 int res_id
= (slave
<< 8) | eqn
;
2765 struct mlx4_eq_context
*eqc
= inbox
->buf
;
2766 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
2767 int mtt_size
= eq_get_mtt_size(eqc
);
2769 struct res_mtt
*mtt
;
2771 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2774 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2778 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2782 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2786 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2790 atomic_inc(&mtt
->ref_count
);
2792 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2793 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2797 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2799 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2801 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2805 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2806 int len
, struct res_mtt
**res
)
2808 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2809 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2810 struct res_mtt
*mtt
;
2813 spin_lock_irq(mlx4_tlock(dev
));
2814 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2816 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2818 mtt
->com
.from_state
= mtt
->com
.state
;
2819 mtt
->com
.state
= RES_MTT_BUSY
;
2824 spin_unlock_irq(mlx4_tlock(dev
));
2829 static int verify_qp_parameters(struct mlx4_dev
*dev
,
2830 struct mlx4_cmd_mailbox
*inbox
,
2831 enum qp_transition transition
, u8 slave
)
2834 struct mlx4_qp_context
*qp_ctx
;
2835 enum mlx4_qp_optpar optpar
;
2839 qp_ctx
= inbox
->buf
+ 8;
2840 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
2841 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
2845 case MLX4_QP_ST_XRC
:
2847 switch (transition
) {
2848 case QP_TRANS_INIT2RTR
:
2849 case QP_TRANS_RTR2RTS
:
2850 case QP_TRANS_RTS2RTS
:
2851 case QP_TRANS_SQD2SQD
:
2852 case QP_TRANS_SQD2RTS
:
2853 if (slave
!= mlx4_master_func_num(dev
))
2854 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
2855 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
2856 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
2857 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
2860 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
2863 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
2864 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
2865 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
2866 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
2869 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
2885 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
2886 struct mlx4_vhcr
*vhcr
,
2887 struct mlx4_cmd_mailbox
*inbox
,
2888 struct mlx4_cmd_mailbox
*outbox
,
2889 struct mlx4_cmd_info
*cmd
)
2891 struct mlx4_mtt mtt
;
2892 __be64
*page_list
= inbox
->buf
;
2893 u64
*pg_list
= (u64
*)page_list
;
2895 struct res_mtt
*rmtt
= NULL
;
2896 int start
= be64_to_cpu(page_list
[0]);
2897 int npages
= vhcr
->in_modifier
;
2900 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
2904 /* Call the SW implementation of write_mtt:
2905 * - Prepare a dummy mtt struct
2906 * - Translate inbox contents to simple addresses in host endianess */
2907 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
2908 we don't really use it */
2911 for (i
= 0; i
< npages
; ++i
)
2912 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
2914 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
2915 ((u64
*)page_list
+ 2));
2918 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
2923 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2924 struct mlx4_vhcr
*vhcr
,
2925 struct mlx4_cmd_mailbox
*inbox
,
2926 struct mlx4_cmd_mailbox
*outbox
,
2927 struct mlx4_cmd_info
*cmd
)
2929 int eqn
= vhcr
->in_modifier
;
2930 int res_id
= eqn
| (slave
<< 8);
2934 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
2938 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
2942 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2946 atomic_dec(&eq
->mtt
->ref_count
);
2947 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2948 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2949 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2954 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
2956 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2961 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
2963 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2964 struct mlx4_slave_event_eq_info
*event_eq
;
2965 struct mlx4_cmd_mailbox
*mailbox
;
2966 u32 in_modifier
= 0;
2971 if (!priv
->mfunc
.master
.slave_state
)
2974 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
2976 /* Create the event only if the slave is registered */
2977 if (event_eq
->eqn
< 0)
2980 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
2981 res_id
= (slave
<< 8) | event_eq
->eqn
;
2982 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
2986 if (req
->com
.from_state
!= RES_EQ_HW
) {
2991 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2992 if (IS_ERR(mailbox
)) {
2993 err
= PTR_ERR(mailbox
);
2997 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
2999 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3002 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3004 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
3006 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3007 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3010 put_res(dev
, slave
, res_id
, RES_EQ
);
3011 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3012 mlx4_free_cmd_mailbox(dev
, mailbox
);
3016 put_res(dev
, slave
, res_id
, RES_EQ
);
3019 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3023 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3024 struct mlx4_vhcr
*vhcr
,
3025 struct mlx4_cmd_mailbox
*inbox
,
3026 struct mlx4_cmd_mailbox
*outbox
,
3027 struct mlx4_cmd_info
*cmd
)
3029 int eqn
= vhcr
->in_modifier
;
3030 int res_id
= eqn
| (slave
<< 8);
3034 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3038 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3043 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3046 put_res(dev
, slave
, res_id
, RES_EQ
);
3050 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3051 struct mlx4_vhcr
*vhcr
,
3052 struct mlx4_cmd_mailbox
*inbox
,
3053 struct mlx4_cmd_mailbox
*outbox
,
3054 struct mlx4_cmd_info
*cmd
)
3057 int cqn
= vhcr
->in_modifier
;
3058 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3059 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3061 struct res_mtt
*mtt
;
3063 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3066 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3069 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3072 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3075 atomic_inc(&mtt
->ref_count
);
3077 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3078 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3082 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3084 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3088 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3089 struct mlx4_vhcr
*vhcr
,
3090 struct mlx4_cmd_mailbox
*inbox
,
3091 struct mlx4_cmd_mailbox
*outbox
,
3092 struct mlx4_cmd_info
*cmd
)
3095 int cqn
= vhcr
->in_modifier
;
3098 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3101 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3104 atomic_dec(&cq
->mtt
->ref_count
);
3105 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3109 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3113 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3114 struct mlx4_vhcr
*vhcr
,
3115 struct mlx4_cmd_mailbox
*inbox
,
3116 struct mlx4_cmd_mailbox
*outbox
,
3117 struct mlx4_cmd_info
*cmd
)
3119 int cqn
= vhcr
->in_modifier
;
3123 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3127 if (cq
->com
.from_state
!= RES_CQ_HW
)
3130 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3132 put_res(dev
, slave
, cqn
, RES_CQ
);
3137 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3138 struct mlx4_vhcr
*vhcr
,
3139 struct mlx4_cmd_mailbox
*inbox
,
3140 struct mlx4_cmd_mailbox
*outbox
,
3141 struct mlx4_cmd_info
*cmd
,
3145 struct res_mtt
*orig_mtt
;
3146 struct res_mtt
*mtt
;
3147 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3148 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3150 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3154 if (orig_mtt
!= cq
->mtt
) {
3159 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3163 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3166 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3169 atomic_dec(&orig_mtt
->ref_count
);
3170 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3171 atomic_inc(&mtt
->ref_count
);
3173 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3177 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3179 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3185 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3186 struct mlx4_vhcr
*vhcr
,
3187 struct mlx4_cmd_mailbox
*inbox
,
3188 struct mlx4_cmd_mailbox
*outbox
,
3189 struct mlx4_cmd_info
*cmd
)
3191 int cqn
= vhcr
->in_modifier
;
3195 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3199 if (cq
->com
.from_state
!= RES_CQ_HW
)
3202 if (vhcr
->op_modifier
== 0) {
3203 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3207 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3209 put_res(dev
, slave
, cqn
, RES_CQ
);
3214 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3216 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3217 int log_rq_stride
= srqc
->logstride
& 7;
3218 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3220 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3223 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3226 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3227 struct mlx4_vhcr
*vhcr
,
3228 struct mlx4_cmd_mailbox
*inbox
,
3229 struct mlx4_cmd_mailbox
*outbox
,
3230 struct mlx4_cmd_info
*cmd
)
3233 int srqn
= vhcr
->in_modifier
;
3234 struct res_mtt
*mtt
;
3235 struct res_srq
*srq
;
3236 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3237 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3239 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3242 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3245 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3248 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3253 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3257 atomic_inc(&mtt
->ref_count
);
3259 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3260 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3264 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3266 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3271 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3272 struct mlx4_vhcr
*vhcr
,
3273 struct mlx4_cmd_mailbox
*inbox
,
3274 struct mlx4_cmd_mailbox
*outbox
,
3275 struct mlx4_cmd_info
*cmd
)
3278 int srqn
= vhcr
->in_modifier
;
3279 struct res_srq
*srq
;
3281 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3284 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3287 atomic_dec(&srq
->mtt
->ref_count
);
3289 atomic_dec(&srq
->cq
->ref_count
);
3290 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3295 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3300 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3301 struct mlx4_vhcr
*vhcr
,
3302 struct mlx4_cmd_mailbox
*inbox
,
3303 struct mlx4_cmd_mailbox
*outbox
,
3304 struct mlx4_cmd_info
*cmd
)
3307 int srqn
= vhcr
->in_modifier
;
3308 struct res_srq
*srq
;
3310 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3313 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3317 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3319 put_res(dev
, slave
, srqn
, RES_SRQ
);
3323 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3324 struct mlx4_vhcr
*vhcr
,
3325 struct mlx4_cmd_mailbox
*inbox
,
3326 struct mlx4_cmd_mailbox
*outbox
,
3327 struct mlx4_cmd_info
*cmd
)
3330 int srqn
= vhcr
->in_modifier
;
3331 struct res_srq
*srq
;
3333 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3337 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3342 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3344 put_res(dev
, slave
, srqn
, RES_SRQ
);
3348 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3349 struct mlx4_vhcr
*vhcr
,
3350 struct mlx4_cmd_mailbox
*inbox
,
3351 struct mlx4_cmd_mailbox
*outbox
,
3352 struct mlx4_cmd_info
*cmd
)
3355 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3358 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3361 if (qp
->com
.from_state
!= RES_QP_HW
) {
3366 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3368 put_res(dev
, slave
, qpn
, RES_QP
);
3372 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3373 struct mlx4_vhcr
*vhcr
,
3374 struct mlx4_cmd_mailbox
*inbox
,
3375 struct mlx4_cmd_mailbox
*outbox
,
3376 struct mlx4_cmd_info
*cmd
)
3378 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3379 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3380 update_pkey_index(dev
, slave
, inbox
);
3381 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3384 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3385 struct mlx4_qp_context
*qpc
,
3386 struct mlx4_cmd_mailbox
*inbox
)
3388 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3390 int port
= mlx4_slave_convert_port(
3391 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3396 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3399 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
||
3400 mlx4_is_eth(dev
, port
+ 1)) {
3401 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3404 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3405 port
= mlx4_slave_convert_port(
3406 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3410 qpc
->alt_path
.sched_queue
=
3411 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3417 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3418 struct mlx4_qp_context
*qpc
,
3419 struct mlx4_cmd_mailbox
*inbox
)
3423 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3424 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3427 port
= (sched
>> 6 & 1) + 1;
3428 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3429 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3430 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3436 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3437 struct mlx4_vhcr
*vhcr
,
3438 struct mlx4_cmd_mailbox
*inbox
,
3439 struct mlx4_cmd_mailbox
*outbox
,
3440 struct mlx4_cmd_info
*cmd
)
3443 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3444 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3446 u8 orig_sched_queue
;
3447 __be32 orig_param3
= qpc
->param3
;
3448 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3449 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3450 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3451 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3452 u8 orig_feup
= qpc
->pri_path
.feup
;
3454 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3457 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3461 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3464 update_pkey_index(dev
, slave
, inbox
);
3465 update_gid(dev
, inbox
, (u8
)slave
);
3466 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3467 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3468 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3472 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3475 if (qp
->com
.from_state
!= RES_QP_HW
) {
3480 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3482 /* if no error, save sched queue value passed in by VF. This is
3483 * essentially the QOS value provided by the VF. This will be useful
3484 * if we allow dynamic changes from VST back to VGT
3487 qp
->sched_queue
= orig_sched_queue
;
3488 qp
->param3
= orig_param3
;
3489 qp
->vlan_control
= orig_vlan_control
;
3490 qp
->fvl_rx
= orig_fvl_rx
;
3491 qp
->pri_path_fl
= orig_pri_path_fl
;
3492 qp
->vlan_index
= orig_vlan_index
;
3493 qp
->feup
= orig_feup
;
3495 put_res(dev
, slave
, qpn
, RES_QP
);
3499 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3500 struct mlx4_vhcr
*vhcr
,
3501 struct mlx4_cmd_mailbox
*inbox
,
3502 struct mlx4_cmd_mailbox
*outbox
,
3503 struct mlx4_cmd_info
*cmd
)
3506 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3508 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3511 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3515 update_pkey_index(dev
, slave
, inbox
);
3516 update_gid(dev
, inbox
, (u8
)slave
);
3517 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3518 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3521 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3522 struct mlx4_vhcr
*vhcr
,
3523 struct mlx4_cmd_mailbox
*inbox
,
3524 struct mlx4_cmd_mailbox
*outbox
,
3525 struct mlx4_cmd_info
*cmd
)
3528 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3530 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3533 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3537 update_pkey_index(dev
, slave
, inbox
);
3538 update_gid(dev
, inbox
, (u8
)slave
);
3539 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3540 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3544 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3545 struct mlx4_vhcr
*vhcr
,
3546 struct mlx4_cmd_mailbox
*inbox
,
3547 struct mlx4_cmd_mailbox
*outbox
,
3548 struct mlx4_cmd_info
*cmd
)
3550 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3551 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3554 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3555 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3558 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3559 struct mlx4_vhcr
*vhcr
,
3560 struct mlx4_cmd_mailbox
*inbox
,
3561 struct mlx4_cmd_mailbox
*outbox
,
3562 struct mlx4_cmd_info
*cmd
)
3565 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3567 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3570 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3574 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3575 update_gid(dev
, inbox
, (u8
)slave
);
3576 update_pkey_index(dev
, slave
, inbox
);
3577 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3580 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3581 struct mlx4_vhcr
*vhcr
,
3582 struct mlx4_cmd_mailbox
*inbox
,
3583 struct mlx4_cmd_mailbox
*outbox
,
3584 struct mlx4_cmd_info
*cmd
)
3587 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3589 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3592 err
= verify_qp_parameters(dev
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3596 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3597 update_gid(dev
, inbox
, (u8
)slave
);
3598 update_pkey_index(dev
, slave
, inbox
);
3599 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3602 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3603 struct mlx4_vhcr
*vhcr
,
3604 struct mlx4_cmd_mailbox
*inbox
,
3605 struct mlx4_cmd_mailbox
*outbox
,
3606 struct mlx4_cmd_info
*cmd
)
3609 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3612 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3615 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3619 atomic_dec(&qp
->mtt
->ref_count
);
3620 atomic_dec(&qp
->rcq
->ref_count
);
3621 atomic_dec(&qp
->scq
->ref_count
);
3623 atomic_dec(&qp
->srq
->ref_count
);
3624 res_end_move(dev
, slave
, RES_QP
, qpn
);
3628 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3633 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3634 struct res_qp
*rqp
, u8
*gid
)
3636 struct res_gid
*res
;
3638 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3639 if (!memcmp(res
->gid
, gid
, 16))
3645 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3646 u8
*gid
, enum mlx4_protocol prot
,
3647 enum mlx4_steer_type steer
, u64 reg_id
)
3649 struct res_gid
*res
;
3652 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3656 spin_lock_irq(&rqp
->mcg_spl
);
3657 if (find_gid(dev
, slave
, rqp
, gid
)) {
3661 memcpy(res
->gid
, gid
, 16);
3664 res
->reg_id
= reg_id
;
3665 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3668 spin_unlock_irq(&rqp
->mcg_spl
);
3673 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3674 u8
*gid
, enum mlx4_protocol prot
,
3675 enum mlx4_steer_type steer
, u64
*reg_id
)
3677 struct res_gid
*res
;
3680 spin_lock_irq(&rqp
->mcg_spl
);
3681 res
= find_gid(dev
, slave
, rqp
, gid
);
3682 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3685 *reg_id
= res
->reg_id
;
3686 list_del(&res
->list
);
3690 spin_unlock_irq(&rqp
->mcg_spl
);
3695 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
3696 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
3697 enum mlx4_steer_type type
, u64
*reg_id
)
3699 switch (dev
->caps
.steering_mode
) {
3700 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
3701 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3704 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
3705 block_loopback
, prot
,
3708 case MLX4_STEERING_MODE_B0
:
3709 if (prot
== MLX4_PROT_ETH
) {
3710 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3715 return mlx4_qp_attach_common(dev
, qp
, gid
,
3716 block_loopback
, prot
, type
);
3722 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
3723 u8 gid
[16], enum mlx4_protocol prot
,
3724 enum mlx4_steer_type type
, u64 reg_id
)
3726 switch (dev
->caps
.steering_mode
) {
3727 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3728 return mlx4_flow_detach(dev
, reg_id
);
3729 case MLX4_STEERING_MODE_B0
:
3730 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
3736 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
3737 u8
*gid
, enum mlx4_protocol prot
)
3741 if (prot
!= MLX4_PROT_ETH
)
3744 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
3745 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
3746 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3755 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3756 struct mlx4_vhcr
*vhcr
,
3757 struct mlx4_cmd_mailbox
*inbox
,
3758 struct mlx4_cmd_mailbox
*outbox
,
3759 struct mlx4_cmd_info
*cmd
)
3761 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3762 u8
*gid
= inbox
->buf
;
3763 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
3768 int attach
= vhcr
->op_modifier
;
3769 int block_loopback
= vhcr
->in_modifier
>> 31;
3770 u8 steer_type_mask
= 2;
3771 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
3773 qpn
= vhcr
->in_modifier
& 0xffffff;
3774 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3780 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
3783 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
3786 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
3790 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
3794 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
3798 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3800 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3803 put_res(dev
, slave
, qpn
, RES_QP
);
3807 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3809 put_res(dev
, slave
, qpn
, RES_QP
);
3814 * MAC validation for Flow Steering rules.
3815 * VF can attach rules only with a mac address which is assigned to it.
3817 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
3818 struct list_head
*rlist
)
3820 struct mac_res
*res
, *tmp
;
3823 /* make sure it isn't multicast or broadcast mac*/
3824 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
3825 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
3826 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3827 be_mac
= cpu_to_be64(res
->mac
<< 16);
3828 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
3831 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3832 eth_header
->eth
.dst_mac
, slave
);
3839 * In case of missing eth header, append eth header with a MAC address
3840 * assigned to the VF.
3842 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
3843 struct mlx4_cmd_mailbox
*inbox
,
3844 struct list_head
*rlist
, int header_id
)
3846 struct mac_res
*res
, *tmp
;
3848 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3849 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
3850 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
3851 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
3853 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
3855 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3857 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
3859 /* Clear a space in the inbox for eth header */
3860 switch (header_id
) {
3861 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3863 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
3864 memmove(ip_header
, eth_header
,
3865 sizeof(*ip_header
) + sizeof(*l4_header
));
3867 case MLX4_NET_TRANS_RULE_ID_TCP
:
3868 case MLX4_NET_TRANS_RULE_ID_UDP
:
3869 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
3871 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
3876 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3877 if (port
== res
->port
) {
3878 be_mac
= cpu_to_be64(res
->mac
<< 16);
3883 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3888 memset(eth_header
, 0, sizeof(*eth_header
));
3889 eth_header
->size
= sizeof(*eth_header
) >> 2;
3890 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
3891 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
3892 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
3898 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3899 struct mlx4_vhcr
*vhcr
,
3900 struct mlx4_cmd_mailbox
*inbox
,
3901 struct mlx4_cmd_mailbox
*outbox
,
3902 struct mlx4_cmd_info
*cmd
)
3905 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3906 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
3907 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
3911 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3912 struct _rule_hw
*rule_header
;
3915 if (dev
->caps
.steering_mode
!=
3916 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3919 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3920 ctrl
->port
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
3921 if (ctrl
->port
<= 0)
3923 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
3924 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3926 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn
);
3929 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
3930 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
3932 switch (header_id
) {
3933 case MLX4_NET_TRANS_RULE_ID_ETH
:
3934 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
3939 case MLX4_NET_TRANS_RULE_ID_IB
:
3941 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3942 case MLX4_NET_TRANS_RULE_ID_TCP
:
3943 case MLX4_NET_TRANS_RULE_ID_UDP
:
3944 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3945 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
3949 vhcr
->in_modifier
+=
3950 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
3953 pr_err("Corrupted mailbox.\n");
3958 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
3959 vhcr
->in_modifier
, 0,
3960 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
3965 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
3967 mlx4_err(dev
, "Fail to add flow steering resources.\n ");
3969 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
3970 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
3974 atomic_inc(&rqp
->ref_count
);
3976 put_res(dev
, slave
, qpn
, RES_QP
);
3980 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3981 struct mlx4_vhcr
*vhcr
,
3982 struct mlx4_cmd_mailbox
*inbox
,
3983 struct mlx4_cmd_mailbox
*outbox
,
3984 struct mlx4_cmd_info
*cmd
)
3988 struct res_fs_rule
*rrule
;
3990 if (dev
->caps
.steering_mode
!=
3991 MLX4_STEERING_MODE_DEVICE_MANAGED
)
3994 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
3997 /* Release the rule form busy state before removal */
3998 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
3999 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
4003 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4005 mlx4_err(dev
, "Fail to remove flow steering resources.\n ");
4009 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4010 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4013 atomic_dec(&rqp
->ref_count
);
4015 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
4020 BUSY_MAX_RETRIES
= 10
4023 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4024 struct mlx4_vhcr
*vhcr
,
4025 struct mlx4_cmd_mailbox
*inbox
,
4026 struct mlx4_cmd_mailbox
*outbox
,
4027 struct mlx4_cmd_info
*cmd
)
4030 int index
= vhcr
->in_modifier
& 0xffff;
4032 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4036 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4037 put_res(dev
, slave
, index
, RES_COUNTER
);
4041 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4043 struct res_gid
*rgid
;
4044 struct res_gid
*tmp
;
4045 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4047 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4048 switch (dev
->caps
.steering_mode
) {
4049 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4050 mlx4_flow_detach(dev
, rgid
->reg_id
);
4052 case MLX4_STEERING_MODE_B0
:
4053 qp
.qpn
= rqp
->local_qpn
;
4054 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4055 rgid
->prot
, rgid
->steer
);
4058 list_del(&rgid
->list
);
4063 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4064 enum mlx4_resource type
, int print
)
4066 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4067 struct mlx4_resource_tracker
*tracker
=
4068 &priv
->mfunc
.master
.res_tracker
;
4069 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4070 struct res_common
*r
;
4071 struct res_common
*tmp
;
4075 spin_lock_irq(mlx4_tlock(dev
));
4076 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4077 if (r
->owner
== slave
) {
4079 if (r
->state
== RES_ANY_BUSY
) {
4082 "%s id 0x%llx is busy\n",
4087 r
->from_state
= r
->state
;
4088 r
->state
= RES_ANY_BUSY
;
4094 spin_unlock_irq(mlx4_tlock(dev
));
4099 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4100 enum mlx4_resource type
)
4102 unsigned long begin
;
4107 busy
= _move_all_busy(dev
, slave
, type
, 0);
4108 if (time_after(jiffies
, begin
+ 5 * HZ
))
4115 busy
= _move_all_busy(dev
, slave
, type
, 1);
4119 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4121 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4122 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4123 struct list_head
*qp_list
=
4124 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4132 err
= move_all_busy(dev
, slave
, RES_QP
);
4134 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy"
4135 "for slave %d\n", slave
);
4137 spin_lock_irq(mlx4_tlock(dev
));
4138 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4139 spin_unlock_irq(mlx4_tlock(dev
));
4140 if (qp
->com
.owner
== slave
) {
4141 qpn
= qp
->com
.res_id
;
4142 detach_qp(dev
, slave
, qp
);
4143 state
= qp
->com
.from_state
;
4144 while (state
!= 0) {
4146 case RES_QP_RESERVED
:
4147 spin_lock_irq(mlx4_tlock(dev
));
4148 rb_erase(&qp
->com
.node
,
4149 &tracker
->res_tree
[RES_QP
]);
4150 list_del(&qp
->com
.list
);
4151 spin_unlock_irq(mlx4_tlock(dev
));
4152 if (!valid_reserved(dev
, slave
, qpn
)) {
4153 __mlx4_qp_release_range(dev
, qpn
, 1);
4154 mlx4_release_resource(dev
, slave
,
4161 if (!valid_reserved(dev
, slave
, qpn
))
4162 __mlx4_qp_free_icm(dev
, qpn
);
4163 state
= RES_QP_RESERVED
;
4167 err
= mlx4_cmd(dev
, in_param
,
4170 MLX4_CMD_TIME_CLASS_A
,
4173 mlx4_dbg(dev
, "rem_slave_qps: failed"
4174 " to move slave %d qpn %d to"
4177 atomic_dec(&qp
->rcq
->ref_count
);
4178 atomic_dec(&qp
->scq
->ref_count
);
4179 atomic_dec(&qp
->mtt
->ref_count
);
4181 atomic_dec(&qp
->srq
->ref_count
);
4182 state
= RES_QP_MAPPED
;
4189 spin_lock_irq(mlx4_tlock(dev
));
4191 spin_unlock_irq(mlx4_tlock(dev
));
4194 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4196 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4197 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4198 struct list_head
*srq_list
=
4199 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4200 struct res_srq
*srq
;
4201 struct res_srq
*tmp
;
4208 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4210 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs to "
4211 "busy for slave %d\n", slave
);
4213 spin_lock_irq(mlx4_tlock(dev
));
4214 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4215 spin_unlock_irq(mlx4_tlock(dev
));
4216 if (srq
->com
.owner
== slave
) {
4217 srqn
= srq
->com
.res_id
;
4218 state
= srq
->com
.from_state
;
4219 while (state
!= 0) {
4221 case RES_SRQ_ALLOCATED
:
4222 __mlx4_srq_free_icm(dev
, srqn
);
4223 spin_lock_irq(mlx4_tlock(dev
));
4224 rb_erase(&srq
->com
.node
,
4225 &tracker
->res_tree
[RES_SRQ
]);
4226 list_del(&srq
->com
.list
);
4227 spin_unlock_irq(mlx4_tlock(dev
));
4228 mlx4_release_resource(dev
, slave
,
4236 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4238 MLX4_CMD_TIME_CLASS_A
,
4241 mlx4_dbg(dev
, "rem_slave_srqs: failed"
4242 " to move slave %d srq %d to"
4246 atomic_dec(&srq
->mtt
->ref_count
);
4248 atomic_dec(&srq
->cq
->ref_count
);
4249 state
= RES_SRQ_ALLOCATED
;
4257 spin_lock_irq(mlx4_tlock(dev
));
4259 spin_unlock_irq(mlx4_tlock(dev
));
4262 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4264 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4265 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4266 struct list_head
*cq_list
=
4267 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4276 err
= move_all_busy(dev
, slave
, RES_CQ
);
4278 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs to "
4279 "busy for slave %d\n", slave
);
4281 spin_lock_irq(mlx4_tlock(dev
));
4282 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4283 spin_unlock_irq(mlx4_tlock(dev
));
4284 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4285 cqn
= cq
->com
.res_id
;
4286 state
= cq
->com
.from_state
;
4287 while (state
!= 0) {
4289 case RES_CQ_ALLOCATED
:
4290 __mlx4_cq_free_icm(dev
, cqn
);
4291 spin_lock_irq(mlx4_tlock(dev
));
4292 rb_erase(&cq
->com
.node
,
4293 &tracker
->res_tree
[RES_CQ
]);
4294 list_del(&cq
->com
.list
);
4295 spin_unlock_irq(mlx4_tlock(dev
));
4296 mlx4_release_resource(dev
, slave
,
4304 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4306 MLX4_CMD_TIME_CLASS_A
,
4309 mlx4_dbg(dev
, "rem_slave_cqs: failed"
4310 " to move slave %d cq %d to"
4313 atomic_dec(&cq
->mtt
->ref_count
);
4314 state
= RES_CQ_ALLOCATED
;
4322 spin_lock_irq(mlx4_tlock(dev
));
4324 spin_unlock_irq(mlx4_tlock(dev
));
4327 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4329 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4330 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4331 struct list_head
*mpt_list
=
4332 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4333 struct res_mpt
*mpt
;
4334 struct res_mpt
*tmp
;
4341 err
= move_all_busy(dev
, slave
, RES_MPT
);
4343 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts to "
4344 "busy for slave %d\n", slave
);
4346 spin_lock_irq(mlx4_tlock(dev
));
4347 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4348 spin_unlock_irq(mlx4_tlock(dev
));
4349 if (mpt
->com
.owner
== slave
) {
4350 mptn
= mpt
->com
.res_id
;
4351 state
= mpt
->com
.from_state
;
4352 while (state
!= 0) {
4354 case RES_MPT_RESERVED
:
4355 __mlx4_mpt_release(dev
, mpt
->key
);
4356 spin_lock_irq(mlx4_tlock(dev
));
4357 rb_erase(&mpt
->com
.node
,
4358 &tracker
->res_tree
[RES_MPT
]);
4359 list_del(&mpt
->com
.list
);
4360 spin_unlock_irq(mlx4_tlock(dev
));
4361 mlx4_release_resource(dev
, slave
,
4367 case RES_MPT_MAPPED
:
4368 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4369 state
= RES_MPT_RESERVED
;
4374 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4376 MLX4_CMD_TIME_CLASS_A
,
4379 mlx4_dbg(dev
, "rem_slave_mrs: failed"
4380 " to move slave %d mpt %d to"
4384 atomic_dec(&mpt
->mtt
->ref_count
);
4385 state
= RES_MPT_MAPPED
;
4392 spin_lock_irq(mlx4_tlock(dev
));
4394 spin_unlock_irq(mlx4_tlock(dev
));
4397 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4399 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4400 struct mlx4_resource_tracker
*tracker
=
4401 &priv
->mfunc
.master
.res_tracker
;
4402 struct list_head
*mtt_list
=
4403 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4404 struct res_mtt
*mtt
;
4405 struct res_mtt
*tmp
;
4411 err
= move_all_busy(dev
, slave
, RES_MTT
);
4413 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts to "
4414 "busy for slave %d\n", slave
);
4416 spin_lock_irq(mlx4_tlock(dev
));
4417 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4418 spin_unlock_irq(mlx4_tlock(dev
));
4419 if (mtt
->com
.owner
== slave
) {
4420 base
= mtt
->com
.res_id
;
4421 state
= mtt
->com
.from_state
;
4422 while (state
!= 0) {
4424 case RES_MTT_ALLOCATED
:
4425 __mlx4_free_mtt_range(dev
, base
,
4427 spin_lock_irq(mlx4_tlock(dev
));
4428 rb_erase(&mtt
->com
.node
,
4429 &tracker
->res_tree
[RES_MTT
]);
4430 list_del(&mtt
->com
.list
);
4431 spin_unlock_irq(mlx4_tlock(dev
));
4432 mlx4_release_resource(dev
, slave
, RES_MTT
,
4433 1 << mtt
->order
, 0);
4443 spin_lock_irq(mlx4_tlock(dev
));
4445 spin_unlock_irq(mlx4_tlock(dev
));
4448 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
4450 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4451 struct mlx4_resource_tracker
*tracker
=
4452 &priv
->mfunc
.master
.res_tracker
;
4453 struct list_head
*fs_rule_list
=
4454 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
4455 struct res_fs_rule
*fs_rule
;
4456 struct res_fs_rule
*tmp
;
4461 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
4463 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4466 spin_lock_irq(mlx4_tlock(dev
));
4467 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
4468 spin_unlock_irq(mlx4_tlock(dev
));
4469 if (fs_rule
->com
.owner
== slave
) {
4470 base
= fs_rule
->com
.res_id
;
4471 state
= fs_rule
->com
.from_state
;
4472 while (state
!= 0) {
4474 case RES_FS_RULE_ALLOCATED
:
4476 err
= mlx4_cmd(dev
, base
, 0, 0,
4477 MLX4_QP_FLOW_STEERING_DETACH
,
4478 MLX4_CMD_TIME_CLASS_A
,
4481 spin_lock_irq(mlx4_tlock(dev
));
4482 rb_erase(&fs_rule
->com
.node
,
4483 &tracker
->res_tree
[RES_FS_RULE
]);
4484 list_del(&fs_rule
->com
.list
);
4485 spin_unlock_irq(mlx4_tlock(dev
));
4495 spin_lock_irq(mlx4_tlock(dev
));
4497 spin_unlock_irq(mlx4_tlock(dev
));
4500 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
4502 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4503 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4504 struct list_head
*eq_list
=
4505 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
4512 struct mlx4_cmd_mailbox
*mailbox
;
4514 err
= move_all_busy(dev
, slave
, RES_EQ
);
4516 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs to "
4517 "busy for slave %d\n", slave
);
4519 spin_lock_irq(mlx4_tlock(dev
));
4520 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
4521 spin_unlock_irq(mlx4_tlock(dev
));
4522 if (eq
->com
.owner
== slave
) {
4523 eqn
= eq
->com
.res_id
;
4524 state
= eq
->com
.from_state
;
4525 while (state
!= 0) {
4527 case RES_EQ_RESERVED
:
4528 spin_lock_irq(mlx4_tlock(dev
));
4529 rb_erase(&eq
->com
.node
,
4530 &tracker
->res_tree
[RES_EQ
]);
4531 list_del(&eq
->com
.list
);
4532 spin_unlock_irq(mlx4_tlock(dev
));
4538 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4539 if (IS_ERR(mailbox
)) {
4543 err
= mlx4_cmd_box(dev
, slave
, 0,
4546 MLX4_CMD_TIME_CLASS_A
,
4549 mlx4_dbg(dev
, "rem_slave_eqs: failed"
4550 " to move slave %d eqs %d to"
4551 " SW ownership\n", slave
, eqn
);
4552 mlx4_free_cmd_mailbox(dev
, mailbox
);
4553 atomic_dec(&eq
->mtt
->ref_count
);
4554 state
= RES_EQ_RESERVED
;
4562 spin_lock_irq(mlx4_tlock(dev
));
4564 spin_unlock_irq(mlx4_tlock(dev
));
4567 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4569 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4570 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4571 struct list_head
*counter_list
=
4572 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4573 struct res_counter
*counter
;
4574 struct res_counter
*tmp
;
4578 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4580 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters to "
4581 "busy for slave %d\n", slave
);
4583 spin_lock_irq(mlx4_tlock(dev
));
4584 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4585 if (counter
->com
.owner
== slave
) {
4586 index
= counter
->com
.res_id
;
4587 rb_erase(&counter
->com
.node
,
4588 &tracker
->res_tree
[RES_COUNTER
]);
4589 list_del(&counter
->com
.list
);
4591 __mlx4_counter_free(dev
, index
);
4592 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
4595 spin_unlock_irq(mlx4_tlock(dev
));
4598 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4600 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4601 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4602 struct list_head
*xrcdn_list
=
4603 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4604 struct res_xrcdn
*xrcd
;
4605 struct res_xrcdn
*tmp
;
4609 err
= move_all_busy(dev
, slave
, RES_XRCD
);
4611 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns to "
4612 "busy for slave %d\n", slave
);
4614 spin_lock_irq(mlx4_tlock(dev
));
4615 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
4616 if (xrcd
->com
.owner
== slave
) {
4617 xrcdn
= xrcd
->com
.res_id
;
4618 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
4619 list_del(&xrcd
->com
.list
);
4621 __mlx4_xrcd_free(dev
, xrcdn
);
4624 spin_unlock_irq(mlx4_tlock(dev
));
4627 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
4629 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4631 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4632 rem_slave_vlans(dev
, slave
);
4633 rem_slave_macs(dev
, slave
);
4634 rem_slave_fs_rule(dev
, slave
);
4635 rem_slave_qps(dev
, slave
);
4636 rem_slave_srqs(dev
, slave
);
4637 rem_slave_cqs(dev
, slave
);
4638 rem_slave_mrs(dev
, slave
);
4639 rem_slave_eqs(dev
, slave
);
4640 rem_slave_mtts(dev
, slave
);
4641 rem_slave_counters(dev
, slave
);
4642 rem_slave_xrcdns(dev
, slave
);
4643 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4646 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
4648 struct mlx4_vf_immed_vlan_work
*work
=
4649 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
4650 struct mlx4_cmd_mailbox
*mailbox
;
4651 struct mlx4_update_qp_context
*upd_context
;
4652 struct mlx4_dev
*dev
= &work
->priv
->dev
;
4653 struct mlx4_resource_tracker
*tracker
=
4654 &work
->priv
->mfunc
.master
.res_tracker
;
4655 struct list_head
*qp_list
=
4656 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
4659 u64 qp_path_mask_vlan_ctrl
=
4660 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
4661 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
4662 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
4663 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
4664 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
4665 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
4667 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
4668 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
4669 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
4670 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
4671 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
4672 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
4673 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
4676 int port
, errors
= 0;
4679 if (mlx4_is_slave(dev
)) {
4680 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
4685 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4686 if (IS_ERR(mailbox
))
4688 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
4689 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4690 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
4691 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
4692 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4693 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
4694 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4695 else if (!work
->vlan_id
)
4696 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4697 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4699 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4700 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4701 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
4703 upd_context
= mailbox
->buf
;
4704 upd_context
->qp_mask
= cpu_to_be64(MLX4_UPD_QP_MASK_VSD
);
4706 spin_lock_irq(mlx4_tlock(dev
));
4707 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4708 spin_unlock_irq(mlx4_tlock(dev
));
4709 if (qp
->com
.owner
== work
->slave
) {
4710 if (qp
->com
.from_state
!= RES_QP_HW
||
4711 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
4712 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
4713 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
4714 spin_lock_irq(mlx4_tlock(dev
));
4717 port
= (qp
->sched_queue
>> 6 & 1) + 1;
4718 if (port
!= work
->port
) {
4719 spin_lock_irq(mlx4_tlock(dev
));
4722 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
4723 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
4725 upd_context
->primary_addr_path_mask
=
4726 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
4727 if (work
->vlan_id
== MLX4_VGT
) {
4728 upd_context
->qp_context
.param3
= qp
->param3
;
4729 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
4730 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
4731 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
4732 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
4733 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
4734 upd_context
->qp_context
.pri_path
.sched_queue
=
4737 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
4738 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
4739 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
4740 upd_context
->qp_context
.pri_path
.fvl_rx
=
4741 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
4742 upd_context
->qp_context
.pri_path
.fl
=
4743 qp
->pri_path_fl
| MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
4744 upd_context
->qp_context
.pri_path
.feup
=
4745 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
4746 upd_context
->qp_context
.pri_path
.sched_queue
=
4747 qp
->sched_queue
& 0xC7;
4748 upd_context
->qp_context
.pri_path
.sched_queue
|=
4749 ((work
->qos
& 0x7) << 3);
4752 err
= mlx4_cmd(dev
, mailbox
->dma
,
4753 qp
->local_qpn
& 0xffffff,
4754 0, MLX4_CMD_UPDATE_QP
,
4755 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
4757 mlx4_info(dev
, "UPDATE_QP failed for slave %d, "
4758 "port %d, qpn %d (%d)\n",
4759 work
->slave
, port
, qp
->local_qpn
,
4764 spin_lock_irq(mlx4_tlock(dev
));
4766 spin_unlock_irq(mlx4_tlock(dev
));
4767 mlx4_free_cmd_mailbox(dev
, mailbox
);
4770 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
4771 errors
, work
->slave
, work
->port
);
4773 /* unregister previous vlan_id if needed and we had no errors
4774 * while updating the QPs
4776 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
4777 NO_INDX
!= work
->orig_vlan_ix
)
4778 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
4779 work
->orig_vlan_id
);