2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list
;
61 struct list_head list
;
69 struct list_head list
;
84 struct list_head list
;
86 enum mlx4_protocol prot
;
87 enum mlx4_steer_type steer
;
92 RES_QP_BUSY
= RES_ANY_BUSY
,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com
;
110 struct list_head mcg_list
;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states
{
126 RES_MTT_BUSY
= RES_ANY_BUSY
,
130 static inline const char *mtt_states_str(enum res_mtt_states state
)
133 case RES_MTT_BUSY
: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED
: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com
;
145 enum res_mpt_states
{
146 RES_MPT_BUSY
= RES_ANY_BUSY
,
153 struct res_common com
;
159 RES_EQ_BUSY
= RES_ANY_BUSY
,
165 struct res_common com
;
170 RES_CQ_BUSY
= RES_ANY_BUSY
,
176 struct res_common com
;
181 enum res_srq_states
{
182 RES_SRQ_BUSY
= RES_ANY_BUSY
,
188 struct res_common com
;
194 enum res_counter_states
{
195 RES_COUNTER_BUSY
= RES_ANY_BUSY
,
196 RES_COUNTER_ALLOCATED
,
200 struct res_common com
;
204 enum res_xrcdn_states
{
205 RES_XRCD_BUSY
= RES_ANY_BUSY
,
210 struct res_common com
;
214 enum res_fs_rule_states
{
215 RES_FS_RULE_BUSY
= RES_ANY_BUSY
,
216 RES_FS_RULE_ALLOCATED
,
220 struct res_common com
;
224 static int mlx4_is_eth(struct mlx4_dev
*dev
, int port
)
226 return dev
->caps
.port_mask
[port
] == MLX4_PORT_TYPE_IB
? 0 : 1;
229 static void *res_tracker_lookup(struct rb_root
*root
, u64 res_id
)
231 struct rb_node
*node
= root
->rb_node
;
234 struct res_common
*res
= container_of(node
, struct res_common
,
237 if (res_id
< res
->res_id
)
238 node
= node
->rb_left
;
239 else if (res_id
> res
->res_id
)
240 node
= node
->rb_right
;
247 static int res_tracker_insert(struct rb_root
*root
, struct res_common
*res
)
249 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
251 /* Figure out where to put new node */
253 struct res_common
*this = container_of(*new, struct res_common
,
257 if (res
->res_id
< this->res_id
)
258 new = &((*new)->rb_left
);
259 else if (res
->res_id
> this->res_id
)
260 new = &((*new)->rb_right
);
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res
->node
, parent
, new);
267 rb_insert_color(&res
->node
, root
);
282 static const char *resource_str(enum mlx4_resource rt
)
285 case RES_QP
: return "RES_QP";
286 case RES_CQ
: return "RES_CQ";
287 case RES_SRQ
: return "RES_SRQ";
288 case RES_MPT
: return "RES_MPT";
289 case RES_MTT
: return "RES_MTT";
290 case RES_MAC
: return "RES_MAC";
291 case RES_VLAN
: return "RES_VLAN";
292 case RES_EQ
: return "RES_EQ";
293 case RES_COUNTER
: return "RES_COUNTER";
294 case RES_FS_RULE
: return "RES_FS_RULE";
295 case RES_XRCD
: return "RES_XRCD";
296 default: return "Unknown resource type !!!";
300 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
);
301 static inline int mlx4_grant_resource(struct mlx4_dev
*dev
, int slave
,
302 enum mlx4_resource res_type
, int count
,
305 struct mlx4_priv
*priv
= mlx4_priv(dev
);
306 struct resource_allocator
*res_alloc
=
307 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
309 int allocated
, free
, reserved
, guaranteed
, from_free
;
312 if (slave
> dev
->persist
->num_vfs
)
315 spin_lock(&res_alloc
->alloc_lock
);
316 allocated
= (port
> 0) ?
317 res_alloc
->allocated
[(port
- 1) *
318 (dev
->persist
->num_vfs
+ 1) + slave
] :
319 res_alloc
->allocated
[slave
];
320 free
= (port
> 0) ? res_alloc
->res_port_free
[port
- 1] :
322 reserved
= (port
> 0) ? res_alloc
->res_port_rsvd
[port
- 1] :
323 res_alloc
->res_reserved
;
324 guaranteed
= res_alloc
->guaranteed
[slave
];
326 if (allocated
+ count
> res_alloc
->quota
[slave
]) {
327 mlx4_warn(dev
, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
328 slave
, port
, resource_str(res_type
), count
,
329 allocated
, res_alloc
->quota
[slave
]);
333 if (allocated
+ count
<= guaranteed
) {
337 /* portion may need to be obtained from free area */
338 if (guaranteed
- allocated
> 0)
339 from_free
= count
- (guaranteed
- allocated
);
343 from_rsvd
= count
- from_free
;
345 if (free
- from_free
>= reserved
)
348 mlx4_warn(dev
, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
349 slave
, port
, resource_str(res_type
), free
,
350 from_free
, reserved
);
354 /* grant the request */
356 res_alloc
->allocated
[(port
- 1) *
357 (dev
->persist
->num_vfs
+ 1) + slave
] += count
;
358 res_alloc
->res_port_free
[port
- 1] -= count
;
359 res_alloc
->res_port_rsvd
[port
- 1] -= from_rsvd
;
361 res_alloc
->allocated
[slave
] += count
;
362 res_alloc
->res_free
-= count
;
363 res_alloc
->res_reserved
-= from_rsvd
;
368 spin_unlock(&res_alloc
->alloc_lock
);
372 static inline void mlx4_release_resource(struct mlx4_dev
*dev
, int slave
,
373 enum mlx4_resource res_type
, int count
,
376 struct mlx4_priv
*priv
= mlx4_priv(dev
);
377 struct resource_allocator
*res_alloc
=
378 &priv
->mfunc
.master
.res_tracker
.res_alloc
[res_type
];
379 int allocated
, guaranteed
, from_rsvd
;
381 if (slave
> dev
->persist
->num_vfs
)
384 spin_lock(&res_alloc
->alloc_lock
);
386 allocated
= (port
> 0) ?
387 res_alloc
->allocated
[(port
- 1) *
388 (dev
->persist
->num_vfs
+ 1) + slave
] :
389 res_alloc
->allocated
[slave
];
390 guaranteed
= res_alloc
->guaranteed
[slave
];
392 if (allocated
- count
>= guaranteed
) {
395 /* portion may need to be returned to reserved area */
396 if (allocated
- guaranteed
> 0)
397 from_rsvd
= count
- (allocated
- guaranteed
);
403 res_alloc
->allocated
[(port
- 1) *
404 (dev
->persist
->num_vfs
+ 1) + slave
] -= count
;
405 res_alloc
->res_port_free
[port
- 1] += count
;
406 res_alloc
->res_port_rsvd
[port
- 1] += from_rsvd
;
408 res_alloc
->allocated
[slave
] -= count
;
409 res_alloc
->res_free
+= count
;
410 res_alloc
->res_reserved
+= from_rsvd
;
413 spin_unlock(&res_alloc
->alloc_lock
);
417 static inline void initialize_res_quotas(struct mlx4_dev
*dev
,
418 struct resource_allocator
*res_alloc
,
419 enum mlx4_resource res_type
,
420 int vf
, int num_instances
)
422 res_alloc
->guaranteed
[vf
] = num_instances
/
423 (2 * (dev
->persist
->num_vfs
+ 1));
424 res_alloc
->quota
[vf
] = (num_instances
/ 2) + res_alloc
->guaranteed
[vf
];
425 if (vf
== mlx4_master_func_num(dev
)) {
426 res_alloc
->res_free
= num_instances
;
427 if (res_type
== RES_MTT
) {
428 /* reserved mtts will be taken out of the PF allocation */
429 res_alloc
->res_free
+= dev
->caps
.reserved_mtts
;
430 res_alloc
->guaranteed
[vf
] += dev
->caps
.reserved_mtts
;
431 res_alloc
->quota
[vf
] += dev
->caps
.reserved_mtts
;
436 void mlx4_init_quotas(struct mlx4_dev
*dev
)
438 struct mlx4_priv
*priv
= mlx4_priv(dev
);
441 /* quotas for VFs are initialized in mlx4_slave_cap */
442 if (mlx4_is_slave(dev
))
445 if (!mlx4_is_mfunc(dev
)) {
446 dev
->quotas
.qp
= dev
->caps
.num_qps
- dev
->caps
.reserved_qps
-
447 mlx4_num_reserved_sqps(dev
);
448 dev
->quotas
.cq
= dev
->caps
.num_cqs
- dev
->caps
.reserved_cqs
;
449 dev
->quotas
.srq
= dev
->caps
.num_srqs
- dev
->caps
.reserved_srqs
;
450 dev
->quotas
.mtt
= dev
->caps
.num_mtts
- dev
->caps
.reserved_mtts
;
451 dev
->quotas
.mpt
= dev
->caps
.num_mpts
- dev
->caps
.reserved_mrws
;
455 pf
= mlx4_master_func_num(dev
);
457 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[pf
];
459 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[pf
];
461 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[pf
];
463 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[pf
];
465 priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[pf
];
467 int mlx4_init_resource_tracker(struct mlx4_dev
*dev
)
469 struct mlx4_priv
*priv
= mlx4_priv(dev
);
473 priv
->mfunc
.master
.res_tracker
.slave_list
=
474 kzalloc(dev
->num_slaves
* sizeof(struct slave_list
),
476 if (!priv
->mfunc
.master
.res_tracker
.slave_list
)
479 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
480 for (t
= 0; t
< MLX4_NUM_OF_RESOURCE_TYPE
; ++t
)
481 INIT_LIST_HEAD(&priv
->mfunc
.master
.res_tracker
.
482 slave_list
[i
].res_list
[t
]);
483 mutex_init(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
486 mlx4_dbg(dev
, "Started init_resource_tracker: %ld slaves\n",
488 for (i
= 0 ; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++)
489 priv
->mfunc
.master
.res_tracker
.res_tree
[i
] = RB_ROOT
;
491 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
492 struct resource_allocator
*res_alloc
=
493 &priv
->mfunc
.master
.res_tracker
.res_alloc
[i
];
494 res_alloc
->quota
= kmalloc((dev
->persist
->num_vfs
+ 1) *
495 sizeof(int), GFP_KERNEL
);
496 res_alloc
->guaranteed
= kmalloc((dev
->persist
->num_vfs
+ 1) *
497 sizeof(int), GFP_KERNEL
);
498 if (i
== RES_MAC
|| i
== RES_VLAN
)
499 res_alloc
->allocated
= kzalloc(MLX4_MAX_PORTS
*
500 (dev
->persist
->num_vfs
502 sizeof(int), GFP_KERNEL
);
504 res_alloc
->allocated
= kzalloc((dev
->persist
->
506 sizeof(int), GFP_KERNEL
);
508 if (!res_alloc
->quota
|| !res_alloc
->guaranteed
||
509 !res_alloc
->allocated
)
512 spin_lock_init(&res_alloc
->alloc_lock
);
513 for (t
= 0; t
< dev
->persist
->num_vfs
+ 1; t
++) {
514 struct mlx4_active_ports actv_ports
=
515 mlx4_get_active_ports(dev
, t
);
518 initialize_res_quotas(dev
, res_alloc
, RES_QP
,
519 t
, dev
->caps
.num_qps
-
520 dev
->caps
.reserved_qps
-
521 mlx4_num_reserved_sqps(dev
));
524 initialize_res_quotas(dev
, res_alloc
, RES_CQ
,
525 t
, dev
->caps
.num_cqs
-
526 dev
->caps
.reserved_cqs
);
529 initialize_res_quotas(dev
, res_alloc
, RES_SRQ
,
530 t
, dev
->caps
.num_srqs
-
531 dev
->caps
.reserved_srqs
);
534 initialize_res_quotas(dev
, res_alloc
, RES_MPT
,
535 t
, dev
->caps
.num_mpts
-
536 dev
->caps
.reserved_mrws
);
539 initialize_res_quotas(dev
, res_alloc
, RES_MTT
,
540 t
, dev
->caps
.num_mtts
-
541 dev
->caps
.reserved_mtts
);
544 if (t
== mlx4_master_func_num(dev
)) {
545 int max_vfs_pport
= 0;
546 /* Calculate the max vfs per port for */
548 for (j
= 0; j
< dev
->caps
.num_ports
;
550 struct mlx4_slaves_pport slaves_pport
=
551 mlx4_phys_to_slaves_pport(dev
, j
+ 1);
552 unsigned current_slaves
=
553 bitmap_weight(slaves_pport
.slaves
,
554 dev
->caps
.num_ports
) - 1;
555 if (max_vfs_pport
< current_slaves
)
559 res_alloc
->quota
[t
] =
562 res_alloc
->guaranteed
[t
] = 2;
563 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
564 res_alloc
->res_port_free
[j
] =
567 res_alloc
->quota
[t
] = MLX4_MAX_MAC_NUM
;
568 res_alloc
->guaranteed
[t
] = 2;
572 if (t
== mlx4_master_func_num(dev
)) {
573 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
;
574 res_alloc
->guaranteed
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
575 for (j
= 0; j
< MLX4_MAX_PORTS
; j
++)
576 res_alloc
->res_port_free
[j
] =
579 res_alloc
->quota
[t
] = MLX4_MAX_VLAN_NUM
/ 2;
580 res_alloc
->guaranteed
[t
] = 0;
584 res_alloc
->quota
[t
] = dev
->caps
.max_counters
;
585 res_alloc
->guaranteed
[t
] = 0;
586 if (t
== mlx4_master_func_num(dev
))
587 res_alloc
->res_free
= res_alloc
->quota
[t
];
592 if (i
== RES_MAC
|| i
== RES_VLAN
) {
593 for (j
= 0; j
< dev
->caps
.num_ports
; j
++)
594 if (test_bit(j
, actv_ports
.ports
))
595 res_alloc
->res_port_rsvd
[j
] +=
596 res_alloc
->guaranteed
[t
];
598 res_alloc
->res_reserved
+= res_alloc
->guaranteed
[t
];
602 spin_lock_init(&priv
->mfunc
.master
.res_tracker
.lock
);
606 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
607 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
608 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
609 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
610 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
611 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
612 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
617 void mlx4_free_resource_tracker(struct mlx4_dev
*dev
,
618 enum mlx4_res_tracker_free_type type
)
620 struct mlx4_priv
*priv
= mlx4_priv(dev
);
623 if (priv
->mfunc
.master
.res_tracker
.slave_list
) {
624 if (type
!= RES_TR_FREE_STRUCTS_ONLY
) {
625 for (i
= 0; i
< dev
->num_slaves
; i
++) {
626 if (type
== RES_TR_FREE_ALL
||
627 dev
->caps
.function
!= i
)
628 mlx4_delete_all_resources_for_slave(dev
, i
);
630 /* free master's vlans */
631 i
= dev
->caps
.function
;
632 mlx4_reset_roce_gids(dev
, i
);
633 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
634 rem_slave_vlans(dev
, i
);
635 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[i
].mutex
);
638 if (type
!= RES_TR_FREE_SLAVES_ONLY
) {
639 for (i
= 0; i
< MLX4_NUM_OF_RESOURCE_TYPE
; i
++) {
640 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
);
641 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].allocated
= NULL
;
642 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
);
643 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].guaranteed
= NULL
;
644 kfree(priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
);
645 priv
->mfunc
.master
.res_tracker
.res_alloc
[i
].quota
= NULL
;
647 kfree(priv
->mfunc
.master
.res_tracker
.slave_list
);
648 priv
->mfunc
.master
.res_tracker
.slave_list
= NULL
;
653 static void update_pkey_index(struct mlx4_dev
*dev
, int slave
,
654 struct mlx4_cmd_mailbox
*inbox
)
656 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
657 u8 orig_index
= *(u8
*)(inbox
->buf
+ 35);
659 struct mlx4_priv
*priv
= mlx4_priv(dev
);
662 port
= (sched
>> 6 & 1) + 1;
664 new_index
= priv
->virt2phys_pkey
[slave
][port
- 1][orig_index
];
665 *(u8
*)(inbox
->buf
+ 35) = new_index
;
668 static void update_gid(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*inbox
,
671 struct mlx4_qp_context
*qp_ctx
= inbox
->buf
+ 8;
672 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
673 u32 ts
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
676 if (MLX4_QP_ST_UD
== ts
) {
677 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
678 if (mlx4_is_eth(dev
, port
))
679 qp_ctx
->pri_path
.mgid_index
=
680 mlx4_get_base_gid_ix(dev
, slave
, port
) | 0x80;
682 qp_ctx
->pri_path
.mgid_index
= slave
| 0x80;
684 } else if (MLX4_QP_ST_RC
== ts
|| MLX4_QP_ST_XRC
== ts
|| MLX4_QP_ST_UC
== ts
) {
685 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
686 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
687 if (mlx4_is_eth(dev
, port
)) {
688 qp_ctx
->pri_path
.mgid_index
+=
689 mlx4_get_base_gid_ix(dev
, slave
, port
);
690 qp_ctx
->pri_path
.mgid_index
&= 0x7f;
692 qp_ctx
->pri_path
.mgid_index
= slave
& 0x7F;
695 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
696 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
697 if (mlx4_is_eth(dev
, port
)) {
698 qp_ctx
->alt_path
.mgid_index
+=
699 mlx4_get_base_gid_ix(dev
, slave
, port
);
700 qp_ctx
->alt_path
.mgid_index
&= 0x7f;
702 qp_ctx
->alt_path
.mgid_index
= slave
& 0x7F;
708 static int update_vport_qp_param(struct mlx4_dev
*dev
,
709 struct mlx4_cmd_mailbox
*inbox
,
712 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
713 struct mlx4_vport_oper_state
*vp_oper
;
714 struct mlx4_priv
*priv
;
718 port
= (qpc
->pri_path
.sched_queue
& 0x40) ? 2 : 1;
719 priv
= mlx4_priv(dev
);
720 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
721 qp_type
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
723 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
724 /* the reserved QPs (special, proxy, tunnel)
725 * do not operate over vlans
727 if (mlx4_is_qp_reserved(dev
, qpn
))
730 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
731 if (qp_type
== MLX4_QP_ST_UD
||
732 (qp_type
== MLX4_QP_ST_MLX
&& mlx4_is_eth(dev
, port
))) {
733 if (dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_VSD_INIT2RTR
) {
734 *(__be32
*)inbox
->buf
=
735 cpu_to_be32(be32_to_cpu(*(__be32
*)inbox
->buf
) |
736 MLX4_QP_OPTPAR_VLAN_STRIPPING
);
737 qpc
->param3
&= ~cpu_to_be32(MLX4_STRIP_VLAN
);
739 struct mlx4_update_qp_params params
= {.flags
= 0};
741 err
= mlx4_update_qp(dev
, qpn
, MLX4_UPDATE_QP_VSD
, ¶ms
);
747 if (vp_oper
->state
.link_state
== IFLA_VF_LINK_STATE_DISABLE
&&
748 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
) {
749 qpc
->pri_path
.vlan_control
=
750 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
751 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
752 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
753 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
754 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
755 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
756 } else if (0 != vp_oper
->state
.default_vlan
) {
757 qpc
->pri_path
.vlan_control
=
758 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
759 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
760 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
761 } else { /* priority tagged */
762 qpc
->pri_path
.vlan_control
=
763 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
764 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
767 qpc
->pri_path
.fvl_rx
|= MLX4_FVL_RX_FORCE_ETH_VLAN
;
768 qpc
->pri_path
.vlan_index
= vp_oper
->vlan_idx
;
769 qpc
->pri_path
.fl
|= MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
770 qpc
->pri_path
.feup
|= MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
771 qpc
->pri_path
.sched_queue
&= 0xC7;
772 qpc
->pri_path
.sched_queue
|= (vp_oper
->state
.default_qos
) << 3;
774 if (vp_oper
->state
.spoofchk
) {
775 qpc
->pri_path
.feup
|= MLX4_FSM_FORCE_ETH_SRC_MAC
;
776 qpc
->pri_path
.grh_mylmc
= (0x80 & qpc
->pri_path
.grh_mylmc
) + vp_oper
->mac_idx
;
782 static int mpt_mask(struct mlx4_dev
*dev
)
784 return dev
->caps
.num_mpts
- 1;
787 static void *find_res(struct mlx4_dev
*dev
, u64 res_id
,
788 enum mlx4_resource type
)
790 struct mlx4_priv
*priv
= mlx4_priv(dev
);
792 return res_tracker_lookup(&priv
->mfunc
.master
.res_tracker
.res_tree
[type
],
796 static int get_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
797 enum mlx4_resource type
,
800 struct res_common
*r
;
803 spin_lock_irq(mlx4_tlock(dev
));
804 r
= find_res(dev
, res_id
, type
);
810 if (r
->state
== RES_ANY_BUSY
) {
815 if (r
->owner
!= slave
) {
820 r
->from_state
= r
->state
;
821 r
->state
= RES_ANY_BUSY
;
824 *((struct res_common
**)res
) = r
;
827 spin_unlock_irq(mlx4_tlock(dev
));
831 int mlx4_get_slave_from_resource_id(struct mlx4_dev
*dev
,
832 enum mlx4_resource type
,
833 u64 res_id
, int *slave
)
836 struct res_common
*r
;
842 spin_lock(mlx4_tlock(dev
));
844 r
= find_res(dev
, id
, type
);
849 spin_unlock(mlx4_tlock(dev
));
854 static void put_res(struct mlx4_dev
*dev
, int slave
, u64 res_id
,
855 enum mlx4_resource type
)
857 struct res_common
*r
;
859 spin_lock_irq(mlx4_tlock(dev
));
860 r
= find_res(dev
, res_id
, type
);
862 r
->state
= r
->from_state
;
863 spin_unlock_irq(mlx4_tlock(dev
));
866 static struct res_common
*alloc_qp_tr(int id
)
870 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
874 ret
->com
.res_id
= id
;
875 ret
->com
.state
= RES_QP_RESERVED
;
877 INIT_LIST_HEAD(&ret
->mcg_list
);
878 spin_lock_init(&ret
->mcg_spl
);
879 atomic_set(&ret
->ref_count
, 0);
884 static struct res_common
*alloc_mtt_tr(int id
, int order
)
888 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
892 ret
->com
.res_id
= id
;
894 ret
->com
.state
= RES_MTT_ALLOCATED
;
895 atomic_set(&ret
->ref_count
, 0);
900 static struct res_common
*alloc_mpt_tr(int id
, int key
)
904 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
908 ret
->com
.res_id
= id
;
909 ret
->com
.state
= RES_MPT_RESERVED
;
915 static struct res_common
*alloc_eq_tr(int id
)
919 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
923 ret
->com
.res_id
= id
;
924 ret
->com
.state
= RES_EQ_RESERVED
;
929 static struct res_common
*alloc_cq_tr(int id
)
933 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
937 ret
->com
.res_id
= id
;
938 ret
->com
.state
= RES_CQ_ALLOCATED
;
939 atomic_set(&ret
->ref_count
, 0);
944 static struct res_common
*alloc_srq_tr(int id
)
948 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
952 ret
->com
.res_id
= id
;
953 ret
->com
.state
= RES_SRQ_ALLOCATED
;
954 atomic_set(&ret
->ref_count
, 0);
959 static struct res_common
*alloc_counter_tr(int id
)
961 struct res_counter
*ret
;
963 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
967 ret
->com
.res_id
= id
;
968 ret
->com
.state
= RES_COUNTER_ALLOCATED
;
973 static struct res_common
*alloc_xrcdn_tr(int id
)
975 struct res_xrcdn
*ret
;
977 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
981 ret
->com
.res_id
= id
;
982 ret
->com
.state
= RES_XRCD_ALLOCATED
;
987 static struct res_common
*alloc_fs_rule_tr(u64 id
, int qpn
)
989 struct res_fs_rule
*ret
;
991 ret
= kzalloc(sizeof *ret
, GFP_KERNEL
);
995 ret
->com
.res_id
= id
;
996 ret
->com
.state
= RES_FS_RULE_ALLOCATED
;
1001 static struct res_common
*alloc_tr(u64 id
, enum mlx4_resource type
, int slave
,
1004 struct res_common
*ret
;
1008 ret
= alloc_qp_tr(id
);
1011 ret
= alloc_mpt_tr(id
, extra
);
1014 ret
= alloc_mtt_tr(id
, extra
);
1017 ret
= alloc_eq_tr(id
);
1020 ret
= alloc_cq_tr(id
);
1023 ret
= alloc_srq_tr(id
);
1026 pr_err("implementation missing\n");
1029 ret
= alloc_counter_tr(id
);
1032 ret
= alloc_xrcdn_tr(id
);
1035 ret
= alloc_fs_rule_tr(id
, extra
);
1046 static int add_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1047 enum mlx4_resource type
, int extra
)
1051 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1052 struct res_common
**res_arr
;
1053 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1054 struct rb_root
*root
= &tracker
->res_tree
[type
];
1056 res_arr
= kzalloc(count
* sizeof *res_arr
, GFP_KERNEL
);
1060 for (i
= 0; i
< count
; ++i
) {
1061 res_arr
[i
] = alloc_tr(base
+ i
, type
, slave
, extra
);
1063 for (--i
; i
>= 0; --i
)
1071 spin_lock_irq(mlx4_tlock(dev
));
1072 for (i
= 0; i
< count
; ++i
) {
1073 if (find_res(dev
, base
+ i
, type
)) {
1077 err
= res_tracker_insert(root
, res_arr
[i
]);
1080 list_add_tail(&res_arr
[i
]->list
,
1081 &tracker
->slave_list
[slave
].res_list
[type
]);
1083 spin_unlock_irq(mlx4_tlock(dev
));
1089 for (--i
; i
>= base
; --i
)
1090 rb_erase(&res_arr
[i
]->node
, root
);
1092 spin_unlock_irq(mlx4_tlock(dev
));
1094 for (i
= 0; i
< count
; ++i
)
1102 static int remove_qp_ok(struct res_qp
*res
)
1104 if (res
->com
.state
== RES_QP_BUSY
|| atomic_read(&res
->ref_count
) ||
1105 !list_empty(&res
->mcg_list
)) {
1106 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1107 res
->com
.state
, atomic_read(&res
->ref_count
));
1109 } else if (res
->com
.state
!= RES_QP_RESERVED
) {
1116 static int remove_mtt_ok(struct res_mtt
*res
, int order
)
1118 if (res
->com
.state
== RES_MTT_BUSY
||
1119 atomic_read(&res
->ref_count
)) {
1120 pr_devel("%s-%d: state %s, ref_count %d\n",
1122 mtt_states_str(res
->com
.state
),
1123 atomic_read(&res
->ref_count
));
1125 } else if (res
->com
.state
!= RES_MTT_ALLOCATED
)
1127 else if (res
->order
!= order
)
1133 static int remove_mpt_ok(struct res_mpt
*res
)
1135 if (res
->com
.state
== RES_MPT_BUSY
)
1137 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1143 static int remove_eq_ok(struct res_eq
*res
)
1145 if (res
->com
.state
== RES_MPT_BUSY
)
1147 else if (res
->com
.state
!= RES_MPT_RESERVED
)
1153 static int remove_counter_ok(struct res_counter
*res
)
1155 if (res
->com
.state
== RES_COUNTER_BUSY
)
1157 else if (res
->com
.state
!= RES_COUNTER_ALLOCATED
)
1163 static int remove_xrcdn_ok(struct res_xrcdn
*res
)
1165 if (res
->com
.state
== RES_XRCD_BUSY
)
1167 else if (res
->com
.state
!= RES_XRCD_ALLOCATED
)
1173 static int remove_fs_rule_ok(struct res_fs_rule
*res
)
1175 if (res
->com
.state
== RES_FS_RULE_BUSY
)
1177 else if (res
->com
.state
!= RES_FS_RULE_ALLOCATED
)
1183 static int remove_cq_ok(struct res_cq
*res
)
1185 if (res
->com
.state
== RES_CQ_BUSY
)
1187 else if (res
->com
.state
!= RES_CQ_ALLOCATED
)
1193 static int remove_srq_ok(struct res_srq
*res
)
1195 if (res
->com
.state
== RES_SRQ_BUSY
)
1197 else if (res
->com
.state
!= RES_SRQ_ALLOCATED
)
1203 static int remove_ok(struct res_common
*res
, enum mlx4_resource type
, int extra
)
1207 return remove_qp_ok((struct res_qp
*)res
);
1209 return remove_cq_ok((struct res_cq
*)res
);
1211 return remove_srq_ok((struct res_srq
*)res
);
1213 return remove_mpt_ok((struct res_mpt
*)res
);
1215 return remove_mtt_ok((struct res_mtt
*)res
, extra
);
1219 return remove_eq_ok((struct res_eq
*)res
);
1221 return remove_counter_ok((struct res_counter
*)res
);
1223 return remove_xrcdn_ok((struct res_xrcdn
*)res
);
1225 return remove_fs_rule_ok((struct res_fs_rule
*)res
);
1231 static int rem_res_range(struct mlx4_dev
*dev
, int slave
, u64 base
, int count
,
1232 enum mlx4_resource type
, int extra
)
1236 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1237 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1238 struct res_common
*r
;
1240 spin_lock_irq(mlx4_tlock(dev
));
1241 for (i
= base
; i
< base
+ count
; ++i
) {
1242 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1247 if (r
->owner
!= slave
) {
1251 err
= remove_ok(r
, type
, extra
);
1256 for (i
= base
; i
< base
+ count
; ++i
) {
1257 r
= res_tracker_lookup(&tracker
->res_tree
[type
], i
);
1258 rb_erase(&r
->node
, &tracker
->res_tree
[type
]);
1265 spin_unlock_irq(mlx4_tlock(dev
));
1270 static int qp_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int qpn
,
1271 enum res_qp_states state
, struct res_qp
**qp
,
1274 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1275 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1279 spin_lock_irq(mlx4_tlock(dev
));
1280 r
= res_tracker_lookup(&tracker
->res_tree
[RES_QP
], qpn
);
1283 else if (r
->com
.owner
!= slave
)
1288 mlx4_dbg(dev
, "%s: failed RES_QP, 0x%llx\n",
1289 __func__
, r
->com
.res_id
);
1293 case RES_QP_RESERVED
:
1294 if (r
->com
.state
== RES_QP_MAPPED
&& !alloc
)
1297 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n", r
->com
.res_id
);
1302 if ((r
->com
.state
== RES_QP_RESERVED
&& alloc
) ||
1303 r
->com
.state
== RES_QP_HW
)
1306 mlx4_dbg(dev
, "failed RES_QP, 0x%llx\n",
1314 if (r
->com
.state
!= RES_QP_MAPPED
)
1322 r
->com
.from_state
= r
->com
.state
;
1323 r
->com
.to_state
= state
;
1324 r
->com
.state
= RES_QP_BUSY
;
1330 spin_unlock_irq(mlx4_tlock(dev
));
1335 static int mr_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1336 enum res_mpt_states state
, struct res_mpt
**mpt
)
1338 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1339 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1343 spin_lock_irq(mlx4_tlock(dev
));
1344 r
= res_tracker_lookup(&tracker
->res_tree
[RES_MPT
], index
);
1347 else if (r
->com
.owner
!= slave
)
1355 case RES_MPT_RESERVED
:
1356 if (r
->com
.state
!= RES_MPT_MAPPED
)
1360 case RES_MPT_MAPPED
:
1361 if (r
->com
.state
!= RES_MPT_RESERVED
&&
1362 r
->com
.state
!= RES_MPT_HW
)
1367 if (r
->com
.state
!= RES_MPT_MAPPED
)
1375 r
->com
.from_state
= r
->com
.state
;
1376 r
->com
.to_state
= state
;
1377 r
->com
.state
= RES_MPT_BUSY
;
1383 spin_unlock_irq(mlx4_tlock(dev
));
1388 static int eq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1389 enum res_eq_states state
, struct res_eq
**eq
)
1391 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1392 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1396 spin_lock_irq(mlx4_tlock(dev
));
1397 r
= res_tracker_lookup(&tracker
->res_tree
[RES_EQ
], index
);
1400 else if (r
->com
.owner
!= slave
)
1408 case RES_EQ_RESERVED
:
1409 if (r
->com
.state
!= RES_EQ_HW
)
1414 if (r
->com
.state
!= RES_EQ_RESERVED
)
1423 r
->com
.from_state
= r
->com
.state
;
1424 r
->com
.to_state
= state
;
1425 r
->com
.state
= RES_EQ_BUSY
;
1431 spin_unlock_irq(mlx4_tlock(dev
));
1436 static int cq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int cqn
,
1437 enum res_cq_states state
, struct res_cq
**cq
)
1439 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1440 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1444 spin_lock_irq(mlx4_tlock(dev
));
1445 r
= res_tracker_lookup(&tracker
->res_tree
[RES_CQ
], cqn
);
1448 } else if (r
->com
.owner
!= slave
) {
1450 } else if (state
== RES_CQ_ALLOCATED
) {
1451 if (r
->com
.state
!= RES_CQ_HW
)
1453 else if (atomic_read(&r
->ref_count
))
1457 } else if (state
!= RES_CQ_HW
|| r
->com
.state
!= RES_CQ_ALLOCATED
) {
1464 r
->com
.from_state
= r
->com
.state
;
1465 r
->com
.to_state
= state
;
1466 r
->com
.state
= RES_CQ_BUSY
;
1471 spin_unlock_irq(mlx4_tlock(dev
));
1476 static int srq_res_start_move_to(struct mlx4_dev
*dev
, int slave
, int index
,
1477 enum res_srq_states state
, struct res_srq
**srq
)
1479 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1480 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1484 spin_lock_irq(mlx4_tlock(dev
));
1485 r
= res_tracker_lookup(&tracker
->res_tree
[RES_SRQ
], index
);
1488 } else if (r
->com
.owner
!= slave
) {
1490 } else if (state
== RES_SRQ_ALLOCATED
) {
1491 if (r
->com
.state
!= RES_SRQ_HW
)
1493 else if (atomic_read(&r
->ref_count
))
1495 } else if (state
!= RES_SRQ_HW
|| r
->com
.state
!= RES_SRQ_ALLOCATED
) {
1500 r
->com
.from_state
= r
->com
.state
;
1501 r
->com
.to_state
= state
;
1502 r
->com
.state
= RES_SRQ_BUSY
;
1507 spin_unlock_irq(mlx4_tlock(dev
));
1512 static void res_abort_move(struct mlx4_dev
*dev
, int slave
,
1513 enum mlx4_resource type
, int id
)
1515 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1516 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1517 struct res_common
*r
;
1519 spin_lock_irq(mlx4_tlock(dev
));
1520 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1521 if (r
&& (r
->owner
== slave
))
1522 r
->state
= r
->from_state
;
1523 spin_unlock_irq(mlx4_tlock(dev
));
1526 static void res_end_move(struct mlx4_dev
*dev
, int slave
,
1527 enum mlx4_resource type
, int id
)
1529 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1530 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1531 struct res_common
*r
;
1533 spin_lock_irq(mlx4_tlock(dev
));
1534 r
= res_tracker_lookup(&tracker
->res_tree
[type
], id
);
1535 if (r
&& (r
->owner
== slave
))
1536 r
->state
= r
->to_state
;
1537 spin_unlock_irq(mlx4_tlock(dev
));
1540 static int valid_reserved(struct mlx4_dev
*dev
, int slave
, int qpn
)
1542 return mlx4_is_qp_reserved(dev
, qpn
) &&
1543 (mlx4_is_master(dev
) || mlx4_is_guest_proxy(dev
, slave
, qpn
));
1546 static int fw_reserved(struct mlx4_dev
*dev
, int qpn
)
1548 return qpn
< dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
1551 static int qp_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1552 u64 in_param
, u64
*out_param
)
1562 case RES_OP_RESERVE
:
1563 count
= get_param_l(&in_param
) & 0xffffff;
1564 /* Turn off all unsupported QP allocation flags that the
1565 * slave tries to set.
1567 flags
= (get_param_l(&in_param
) >> 24) & dev
->caps
.alloc_res_qp_mask
;
1568 align
= get_param_h(&in_param
);
1569 err
= mlx4_grant_resource(dev
, slave
, RES_QP
, count
, 0);
1573 err
= __mlx4_qp_reserve_range(dev
, count
, align
, &base
, flags
);
1575 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1579 err
= add_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
1581 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
1582 __mlx4_qp_release_range(dev
, base
, count
);
1585 set_param_l(out_param
, base
);
1587 case RES_OP_MAP_ICM
:
1588 qpn
= get_param_l(&in_param
) & 0x7fffff;
1589 if (valid_reserved(dev
, slave
, qpn
)) {
1590 err
= add_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
1595 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
,
1600 if (!fw_reserved(dev
, qpn
)) {
1601 err
= __mlx4_qp_alloc_icm(dev
, qpn
, GFP_KERNEL
);
1603 res_abort_move(dev
, slave
, RES_QP
, qpn
);
1608 res_end_move(dev
, slave
, RES_QP
, qpn
);
1618 static int mtt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1619 u64 in_param
, u64
*out_param
)
1625 if (op
!= RES_OP_RESERVE_AND_MAP
)
1628 order
= get_param_l(&in_param
);
1630 err
= mlx4_grant_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1634 base
= __mlx4_alloc_mtt_range(dev
, order
);
1636 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1640 err
= add_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
1642 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
1643 __mlx4_free_mtt_range(dev
, base
, order
);
1645 set_param_l(out_param
, base
);
1651 static int mpt_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1652 u64 in_param
, u64
*out_param
)
1657 struct res_mpt
*mpt
;
1660 case RES_OP_RESERVE
:
1661 err
= mlx4_grant_resource(dev
, slave
, RES_MPT
, 1, 0);
1665 index
= __mlx4_mpt_reserve(dev
);
1667 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1670 id
= index
& mpt_mask(dev
);
1672 err
= add_res_range(dev
, slave
, id
, 1, RES_MPT
, index
);
1674 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
1675 __mlx4_mpt_release(dev
, index
);
1678 set_param_l(out_param
, index
);
1680 case RES_OP_MAP_ICM
:
1681 index
= get_param_l(&in_param
);
1682 id
= index
& mpt_mask(dev
);
1683 err
= mr_res_start_move_to(dev
, slave
, id
,
1684 RES_MPT_MAPPED
, &mpt
);
1688 err
= __mlx4_mpt_alloc_icm(dev
, mpt
->key
, GFP_KERNEL
);
1690 res_abort_move(dev
, slave
, RES_MPT
, id
);
1694 res_end_move(dev
, slave
, RES_MPT
, id
);
1700 static int cq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1701 u64 in_param
, u64
*out_param
)
1707 case RES_OP_RESERVE_AND_MAP
:
1708 err
= mlx4_grant_resource(dev
, slave
, RES_CQ
, 1, 0);
1712 err
= __mlx4_cq_alloc_icm(dev
, &cqn
);
1714 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1718 err
= add_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
1720 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
1721 __mlx4_cq_free_icm(dev
, cqn
);
1725 set_param_l(out_param
, cqn
);
1735 static int srq_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1736 u64 in_param
, u64
*out_param
)
1742 case RES_OP_RESERVE_AND_MAP
:
1743 err
= mlx4_grant_resource(dev
, slave
, RES_SRQ
, 1, 0);
1747 err
= __mlx4_srq_alloc_icm(dev
, &srqn
);
1749 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1753 err
= add_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
1755 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
1756 __mlx4_srq_free_icm(dev
, srqn
);
1760 set_param_l(out_param
, srqn
);
1770 static int mac_find_smac_ix_in_slave(struct mlx4_dev
*dev
, int slave
, int port
,
1771 u8 smac_index
, u64
*mac
)
1773 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1774 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1775 struct list_head
*mac_list
=
1776 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1777 struct mac_res
*res
, *tmp
;
1779 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1780 if (res
->smac_index
== smac_index
&& res
->port
== (u8
) port
) {
1788 static int mac_add_to_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
, int port
, u8 smac_index
)
1790 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1791 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1792 struct list_head
*mac_list
=
1793 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1794 struct mac_res
*res
, *tmp
;
1796 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1797 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1798 /* mac found. update ref count */
1804 if (mlx4_grant_resource(dev
, slave
, RES_MAC
, 1, port
))
1806 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
1808 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1812 res
->port
= (u8
) port
;
1813 res
->smac_index
= smac_index
;
1815 list_add_tail(&res
->list
,
1816 &tracker
->slave_list
[slave
].res_list
[RES_MAC
]);
1820 static void mac_del_from_slave(struct mlx4_dev
*dev
, int slave
, u64 mac
,
1823 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1824 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1825 struct list_head
*mac_list
=
1826 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1827 struct mac_res
*res
, *tmp
;
1829 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1830 if (res
->mac
== mac
&& res
->port
== (u8
) port
) {
1831 if (!--res
->ref_count
) {
1832 list_del(&res
->list
);
1833 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, port
);
1841 static void rem_slave_macs(struct mlx4_dev
*dev
, int slave
)
1843 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1844 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1845 struct list_head
*mac_list
=
1846 &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
1847 struct mac_res
*res
, *tmp
;
1850 list_for_each_entry_safe(res
, tmp
, mac_list
, list
) {
1851 list_del(&res
->list
);
1852 /* dereference the mac the num times the slave referenced it */
1853 for (i
= 0; i
< res
->ref_count
; i
++)
1854 __mlx4_unregister_mac(dev
, res
->port
, res
->mac
);
1855 mlx4_release_resource(dev
, slave
, RES_MAC
, 1, res
->port
);
1860 static int mac_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1861 u64 in_param
, u64
*out_param
, int in_port
)
1868 if (op
!= RES_OP_RESERVE_AND_MAP
)
1871 port
= !in_port
? get_param_l(out_param
) : in_port
;
1872 port
= mlx4_slave_convert_port(
1879 err
= __mlx4_register_mac(dev
, port
, mac
);
1882 set_param_l(out_param
, err
);
1887 err
= mac_add_to_slave(dev
, slave
, mac
, port
, smac_index
);
1889 __mlx4_unregister_mac(dev
, port
, mac
);
1894 static int vlan_add_to_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1895 int port
, int vlan_index
)
1897 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1898 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1899 struct list_head
*vlan_list
=
1900 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1901 struct vlan_res
*res
, *tmp
;
1903 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1904 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1905 /* vlan found. update ref count */
1911 if (mlx4_grant_resource(dev
, slave
, RES_VLAN
, 1, port
))
1913 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
1915 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, port
);
1919 res
->port
= (u8
) port
;
1920 res
->vlan_index
= vlan_index
;
1922 list_add_tail(&res
->list
,
1923 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
]);
1928 static void vlan_del_from_slave(struct mlx4_dev
*dev
, int slave
, u16 vlan
,
1931 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1932 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1933 struct list_head
*vlan_list
=
1934 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1935 struct vlan_res
*res
, *tmp
;
1937 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1938 if (res
->vlan
== vlan
&& res
->port
== (u8
) port
) {
1939 if (!--res
->ref_count
) {
1940 list_del(&res
->list
);
1941 mlx4_release_resource(dev
, slave
, RES_VLAN
,
1950 static void rem_slave_vlans(struct mlx4_dev
*dev
, int slave
)
1952 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1953 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
1954 struct list_head
*vlan_list
=
1955 &tracker
->slave_list
[slave
].res_list
[RES_VLAN
];
1956 struct vlan_res
*res
, *tmp
;
1959 list_for_each_entry_safe(res
, tmp
, vlan_list
, list
) {
1960 list_del(&res
->list
);
1961 /* dereference the vlan the num times the slave referenced it */
1962 for (i
= 0; i
< res
->ref_count
; i
++)
1963 __mlx4_unregister_vlan(dev
, res
->port
, res
->vlan
);
1964 mlx4_release_resource(dev
, slave
, RES_VLAN
, 1, res
->port
);
1969 static int vlan_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
1970 u64 in_param
, u64
*out_param
, int in_port
)
1972 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1973 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1979 port
= !in_port
? get_param_l(out_param
) : in_port
;
1981 if (!port
|| op
!= RES_OP_RESERVE_AND_MAP
)
1984 port
= mlx4_slave_convert_port(
1989 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1990 if (!in_port
&& port
> 0 && port
<= dev
->caps
.num_ports
) {
1991 slave_state
[slave
].old_vlan_api
= true;
1995 vlan
= (u16
) in_param
;
1997 err
= __mlx4_register_vlan(dev
, port
, vlan
, &vlan_index
);
1999 set_param_l(out_param
, (u32
) vlan_index
);
2000 err
= vlan_add_to_slave(dev
, slave
, vlan
, port
, vlan_index
);
2002 __mlx4_unregister_vlan(dev
, port
, vlan
);
2007 static int counter_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2008 u64 in_param
, u64
*out_param
)
2013 if (op
!= RES_OP_RESERVE
)
2016 err
= mlx4_grant_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2020 err
= __mlx4_counter_alloc(dev
, &index
);
2022 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2026 err
= add_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2028 __mlx4_counter_free(dev
, index
);
2029 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2031 set_param_l(out_param
, index
);
2037 static int xrcdn_alloc_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2038 u64 in_param
, u64
*out_param
)
2043 if (op
!= RES_OP_RESERVE
)
2046 err
= __mlx4_xrcd_alloc(dev
, &xrcdn
);
2050 err
= add_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2052 __mlx4_xrcd_free(dev
, xrcdn
);
2054 set_param_l(out_param
, xrcdn
);
2059 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2060 struct mlx4_vhcr
*vhcr
,
2061 struct mlx4_cmd_mailbox
*inbox
,
2062 struct mlx4_cmd_mailbox
*outbox
,
2063 struct mlx4_cmd_info
*cmd
)
2066 int alop
= vhcr
->op_modifier
;
2068 switch (vhcr
->in_modifier
& 0xFF) {
2070 err
= qp_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2071 vhcr
->in_param
, &vhcr
->out_param
);
2075 err
= mtt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2076 vhcr
->in_param
, &vhcr
->out_param
);
2080 err
= mpt_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2081 vhcr
->in_param
, &vhcr
->out_param
);
2085 err
= cq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2086 vhcr
->in_param
, &vhcr
->out_param
);
2090 err
= srq_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2091 vhcr
->in_param
, &vhcr
->out_param
);
2095 err
= mac_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2096 vhcr
->in_param
, &vhcr
->out_param
,
2097 (vhcr
->in_modifier
>> 8) & 0xFF);
2101 err
= vlan_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2102 vhcr
->in_param
, &vhcr
->out_param
,
2103 (vhcr
->in_modifier
>> 8) & 0xFF);
2107 err
= counter_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2108 vhcr
->in_param
, &vhcr
->out_param
);
2112 err
= xrcdn_alloc_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2113 vhcr
->in_param
, &vhcr
->out_param
);
2124 static int qp_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2133 case RES_OP_RESERVE
:
2134 base
= get_param_l(&in_param
) & 0x7fffff;
2135 count
= get_param_h(&in_param
);
2136 err
= rem_res_range(dev
, slave
, base
, count
, RES_QP
, 0);
2139 mlx4_release_resource(dev
, slave
, RES_QP
, count
, 0);
2140 __mlx4_qp_release_range(dev
, base
, count
);
2142 case RES_OP_MAP_ICM
:
2143 qpn
= get_param_l(&in_param
) & 0x7fffff;
2144 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_RESERVED
,
2149 if (!fw_reserved(dev
, qpn
))
2150 __mlx4_qp_free_icm(dev
, qpn
);
2152 res_end_move(dev
, slave
, RES_QP
, qpn
);
2154 if (valid_reserved(dev
, slave
, qpn
))
2155 err
= rem_res_range(dev
, slave
, qpn
, 1, RES_QP
, 0);
2164 static int mtt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2165 u64 in_param
, u64
*out_param
)
2171 if (op
!= RES_OP_RESERVE_AND_MAP
)
2174 base
= get_param_l(&in_param
);
2175 order
= get_param_h(&in_param
);
2176 err
= rem_res_range(dev
, slave
, base
, 1, RES_MTT
, order
);
2178 mlx4_release_resource(dev
, slave
, RES_MTT
, 1 << order
, 0);
2179 __mlx4_free_mtt_range(dev
, base
, order
);
2184 static int mpt_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2190 struct res_mpt
*mpt
;
2193 case RES_OP_RESERVE
:
2194 index
= get_param_l(&in_param
);
2195 id
= index
& mpt_mask(dev
);
2196 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2200 put_res(dev
, slave
, id
, RES_MPT
);
2202 err
= rem_res_range(dev
, slave
, id
, 1, RES_MPT
, 0);
2205 mlx4_release_resource(dev
, slave
, RES_MPT
, 1, 0);
2206 __mlx4_mpt_release(dev
, index
);
2208 case RES_OP_MAP_ICM
:
2209 index
= get_param_l(&in_param
);
2210 id
= index
& mpt_mask(dev
);
2211 err
= mr_res_start_move_to(dev
, slave
, id
,
2212 RES_MPT_RESERVED
, &mpt
);
2216 __mlx4_mpt_free_icm(dev
, mpt
->key
);
2217 res_end_move(dev
, slave
, RES_MPT
, id
);
2227 static int cq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2228 u64 in_param
, u64
*out_param
)
2234 case RES_OP_RESERVE_AND_MAP
:
2235 cqn
= get_param_l(&in_param
);
2236 err
= rem_res_range(dev
, slave
, cqn
, 1, RES_CQ
, 0);
2240 mlx4_release_resource(dev
, slave
, RES_CQ
, 1, 0);
2241 __mlx4_cq_free_icm(dev
, cqn
);
2252 static int srq_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2253 u64 in_param
, u64
*out_param
)
2259 case RES_OP_RESERVE_AND_MAP
:
2260 srqn
= get_param_l(&in_param
);
2261 err
= rem_res_range(dev
, slave
, srqn
, 1, RES_SRQ
, 0);
2265 mlx4_release_resource(dev
, slave
, RES_SRQ
, 1, 0);
2266 __mlx4_srq_free_icm(dev
, srqn
);
2277 static int mac_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2278 u64 in_param
, u64
*out_param
, int in_port
)
2284 case RES_OP_RESERVE_AND_MAP
:
2285 port
= !in_port
? get_param_l(out_param
) : in_port
;
2286 port
= mlx4_slave_convert_port(
2291 mac_del_from_slave(dev
, slave
, in_param
, port
);
2292 __mlx4_unregister_mac(dev
, port
, in_param
);
2303 static int vlan_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2304 u64 in_param
, u64
*out_param
, int port
)
2306 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2307 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2310 port
= mlx4_slave_convert_port(
2316 case RES_OP_RESERVE_AND_MAP
:
2317 if (slave_state
[slave
].old_vlan_api
)
2321 vlan_del_from_slave(dev
, slave
, in_param
, port
);
2322 __mlx4_unregister_vlan(dev
, port
, in_param
);
2332 static int counter_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2333 u64 in_param
, u64
*out_param
)
2338 if (op
!= RES_OP_RESERVE
)
2341 index
= get_param_l(&in_param
);
2342 err
= rem_res_range(dev
, slave
, index
, 1, RES_COUNTER
, 0);
2346 __mlx4_counter_free(dev
, index
);
2347 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
2352 static int xrcdn_free_res(struct mlx4_dev
*dev
, int slave
, int op
, int cmd
,
2353 u64 in_param
, u64
*out_param
)
2358 if (op
!= RES_OP_RESERVE
)
2361 xrcdn
= get_param_l(&in_param
);
2362 err
= rem_res_range(dev
, slave
, xrcdn
, 1, RES_XRCD
, 0);
2366 __mlx4_xrcd_free(dev
, xrcdn
);
2371 int mlx4_FREE_RES_wrapper(struct mlx4_dev
*dev
, int slave
,
2372 struct mlx4_vhcr
*vhcr
,
2373 struct mlx4_cmd_mailbox
*inbox
,
2374 struct mlx4_cmd_mailbox
*outbox
,
2375 struct mlx4_cmd_info
*cmd
)
2378 int alop
= vhcr
->op_modifier
;
2380 switch (vhcr
->in_modifier
& 0xFF) {
2382 err
= qp_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2387 err
= mtt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2388 vhcr
->in_param
, &vhcr
->out_param
);
2392 err
= mpt_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2397 err
= cq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2398 vhcr
->in_param
, &vhcr
->out_param
);
2402 err
= srq_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2403 vhcr
->in_param
, &vhcr
->out_param
);
2407 err
= mac_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2408 vhcr
->in_param
, &vhcr
->out_param
,
2409 (vhcr
->in_modifier
>> 8) & 0xFF);
2413 err
= vlan_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2414 vhcr
->in_param
, &vhcr
->out_param
,
2415 (vhcr
->in_modifier
>> 8) & 0xFF);
2419 err
= counter_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2420 vhcr
->in_param
, &vhcr
->out_param
);
2424 err
= xrcdn_free_res(dev
, slave
, vhcr
->op_modifier
, alop
,
2425 vhcr
->in_param
, &vhcr
->out_param
);
2433 /* ugly but other choices are uglier */
2434 static int mr_phys_mpt(struct mlx4_mpt_entry
*mpt
)
2436 return (be32_to_cpu(mpt
->flags
) >> 9) & 1;
2439 static int mr_get_mtt_addr(struct mlx4_mpt_entry
*mpt
)
2441 return (int)be64_to_cpu(mpt
->mtt_addr
) & 0xfffffff8;
2444 static int mr_get_mtt_size(struct mlx4_mpt_entry
*mpt
)
2446 return be32_to_cpu(mpt
->mtt_sz
);
2449 static u32
mr_get_pd(struct mlx4_mpt_entry
*mpt
)
2451 return be32_to_cpu(mpt
->pd_flags
) & 0x00ffffff;
2454 static int mr_is_fmr(struct mlx4_mpt_entry
*mpt
)
2456 return be32_to_cpu(mpt
->pd_flags
) & MLX4_MPT_PD_FLAG_FAST_REG
;
2459 static int mr_is_bind_enabled(struct mlx4_mpt_entry
*mpt
)
2461 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_BIND_ENABLE
;
2464 static int mr_is_region(struct mlx4_mpt_entry
*mpt
)
2466 return be32_to_cpu(mpt
->flags
) & MLX4_MPT_FLAG_REGION
;
2469 static int qp_get_mtt_addr(struct mlx4_qp_context
*qpc
)
2471 return be32_to_cpu(qpc
->mtt_base_addr_l
) & 0xfffffff8;
2474 static int srq_get_mtt_addr(struct mlx4_srq_context
*srqc
)
2476 return be32_to_cpu(srqc
->mtt_base_addr_l
) & 0xfffffff8;
2479 static int qp_get_mtt_size(struct mlx4_qp_context
*qpc
)
2481 int page_shift
= (qpc
->log_page_size
& 0x3f) + 12;
2482 int log_sq_size
= (qpc
->sq_size_stride
>> 3) & 0xf;
2483 int log_sq_sride
= qpc
->sq_size_stride
& 7;
2484 int log_rq_size
= (qpc
->rq_size_stride
>> 3) & 0xf;
2485 int log_rq_stride
= qpc
->rq_size_stride
& 7;
2486 int srq
= (be32_to_cpu(qpc
->srqn
) >> 24) & 1;
2487 int rss
= (be32_to_cpu(qpc
->flags
) >> 13) & 1;
2488 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
2489 int xrc
= (ts
== MLX4_QP_ST_XRC
) ? 1 : 0;
2494 int page_offset
= (be32_to_cpu(qpc
->params2
) >> 6) & 0x3f;
2496 sq_size
= 1 << (log_sq_size
+ log_sq_sride
+ 4);
2497 rq_size
= (srq
|rss
|xrc
) ? 0 : (1 << (log_rq_size
+ log_rq_stride
+ 4));
2498 total_mem
= sq_size
+ rq_size
;
2500 roundup_pow_of_two((total_mem
+ (page_offset
<< 6)) >>
2506 static int check_mtt_range(struct mlx4_dev
*dev
, int slave
, int start
,
2507 int size
, struct res_mtt
*mtt
)
2509 int res_start
= mtt
->com
.res_id
;
2510 int res_size
= (1 << mtt
->order
);
2512 if (start
< res_start
|| start
+ size
> res_start
+ res_size
)
2517 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2518 struct mlx4_vhcr
*vhcr
,
2519 struct mlx4_cmd_mailbox
*inbox
,
2520 struct mlx4_cmd_mailbox
*outbox
,
2521 struct mlx4_cmd_info
*cmd
)
2524 int index
= vhcr
->in_modifier
;
2525 struct res_mtt
*mtt
;
2526 struct res_mpt
*mpt
;
2527 int mtt_base
= mr_get_mtt_addr(inbox
->buf
) / dev
->caps
.mtt_entry_sz
;
2533 id
= index
& mpt_mask(dev
);
2534 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_HW
, &mpt
);
2538 /* Disable memory windows for VFs. */
2539 if (!mr_is_region(inbox
->buf
)) {
2544 /* Make sure that the PD bits related to the slave id are zeros. */
2545 pd
= mr_get_pd(inbox
->buf
);
2546 pd_slave
= (pd
>> 17) & 0x7f;
2547 if (pd_slave
!= 0 && --pd_slave
!= slave
) {
2552 if (mr_is_fmr(inbox
->buf
)) {
2553 /* FMR and Bind Enable are forbidden in slave devices. */
2554 if (mr_is_bind_enabled(inbox
->buf
)) {
2558 /* FMR and Memory Windows are also forbidden. */
2559 if (!mr_is_region(inbox
->buf
)) {
2565 phys
= mr_phys_mpt(inbox
->buf
);
2567 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2571 err
= check_mtt_range(dev
, slave
, mtt_base
,
2572 mr_get_mtt_size(inbox
->buf
), mtt
);
2579 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2584 atomic_inc(&mtt
->ref_count
);
2585 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2588 res_end_move(dev
, slave
, RES_MPT
, id
);
2593 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2595 res_abort_move(dev
, slave
, RES_MPT
, id
);
2600 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2601 struct mlx4_vhcr
*vhcr
,
2602 struct mlx4_cmd_mailbox
*inbox
,
2603 struct mlx4_cmd_mailbox
*outbox
,
2604 struct mlx4_cmd_info
*cmd
)
2607 int index
= vhcr
->in_modifier
;
2608 struct res_mpt
*mpt
;
2611 id
= index
& mpt_mask(dev
);
2612 err
= mr_res_start_move_to(dev
, slave
, id
, RES_MPT_MAPPED
, &mpt
);
2616 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2621 atomic_dec(&mpt
->mtt
->ref_count
);
2623 res_end_move(dev
, slave
, RES_MPT
, id
);
2627 res_abort_move(dev
, slave
, RES_MPT
, id
);
2632 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev
*dev
, int slave
,
2633 struct mlx4_vhcr
*vhcr
,
2634 struct mlx4_cmd_mailbox
*inbox
,
2635 struct mlx4_cmd_mailbox
*outbox
,
2636 struct mlx4_cmd_info
*cmd
)
2639 int index
= vhcr
->in_modifier
;
2640 struct res_mpt
*mpt
;
2643 id
= index
& mpt_mask(dev
);
2644 err
= get_res(dev
, slave
, id
, RES_MPT
, &mpt
);
2648 if (mpt
->com
.from_state
== RES_MPT_MAPPED
) {
2649 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2650 * that, the VF must read the MPT. But since the MPT entry memory is not
2651 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2652 * entry contents. To guarantee that the MPT cannot be changed, the driver
2653 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2654 * ownership fofollowing the change. The change here allows the VF to
2655 * perform QUERY_MPT also when the entry is in SW ownership.
2657 struct mlx4_mpt_entry
*mpt_entry
= mlx4_table_find(
2658 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
2661 if (NULL
== mpt_entry
|| NULL
== outbox
->buf
) {
2666 memcpy(outbox
->buf
, mpt_entry
, sizeof(*mpt_entry
));
2669 } else if (mpt
->com
.from_state
== RES_MPT_HW
) {
2670 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2678 put_res(dev
, slave
, id
, RES_MPT
);
2682 static int qp_get_rcqn(struct mlx4_qp_context
*qpc
)
2684 return be32_to_cpu(qpc
->cqn_recv
) & 0xffffff;
2687 static int qp_get_scqn(struct mlx4_qp_context
*qpc
)
2689 return be32_to_cpu(qpc
->cqn_send
) & 0xffffff;
2692 static u32
qp_get_srqn(struct mlx4_qp_context
*qpc
)
2694 return be32_to_cpu(qpc
->srqn
) & 0x1ffffff;
2697 static void adjust_proxy_tun_qkey(struct mlx4_dev
*dev
, struct mlx4_vhcr
*vhcr
,
2698 struct mlx4_qp_context
*context
)
2700 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
2703 if (mlx4_get_parav_qkey(dev
, qpn
, &qkey
))
2706 /* adjust qkey in qp context */
2707 context
->qkey
= cpu_to_be32(qkey
);
2710 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
2711 struct mlx4_vhcr
*vhcr
,
2712 struct mlx4_cmd_mailbox
*inbox
,
2713 struct mlx4_cmd_mailbox
*outbox
,
2714 struct mlx4_cmd_info
*cmd
)
2717 int qpn
= vhcr
->in_modifier
& 0x7fffff;
2718 struct res_mtt
*mtt
;
2720 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
2721 int mtt_base
= qp_get_mtt_addr(qpc
) / dev
->caps
.mtt_entry_sz
;
2722 int mtt_size
= qp_get_mtt_size(qpc
);
2725 int rcqn
= qp_get_rcqn(qpc
);
2726 int scqn
= qp_get_scqn(qpc
);
2727 u32 srqn
= qp_get_srqn(qpc
) & 0xffffff;
2728 int use_srq
= (qp_get_srqn(qpc
) >> 24) & 1;
2729 struct res_srq
*srq
;
2730 int local_qpn
= be32_to_cpu(qpc
->local_qpn
) & 0xffffff;
2732 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_HW
, &qp
, 0);
2735 qp
->local_qpn
= local_qpn
;
2736 qp
->sched_queue
= 0;
2738 qp
->vlan_control
= 0;
2740 qp
->pri_path_fl
= 0;
2743 qp
->qpc_flags
= be32_to_cpu(qpc
->flags
);
2745 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2749 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2753 err
= get_res(dev
, slave
, rcqn
, RES_CQ
, &rcq
);
2758 err
= get_res(dev
, slave
, scqn
, RES_CQ
, &scq
);
2765 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
2770 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
2771 update_pkey_index(dev
, slave
, inbox
);
2772 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2775 atomic_inc(&mtt
->ref_count
);
2777 atomic_inc(&rcq
->ref_count
);
2779 atomic_inc(&scq
->ref_count
);
2783 put_res(dev
, slave
, scqn
, RES_CQ
);
2786 atomic_inc(&srq
->ref_count
);
2787 put_res(dev
, slave
, srqn
, RES_SRQ
);
2790 put_res(dev
, slave
, rcqn
, RES_CQ
);
2791 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2792 res_end_move(dev
, slave
, RES_QP
, qpn
);
2798 put_res(dev
, slave
, srqn
, RES_SRQ
);
2801 put_res(dev
, slave
, scqn
, RES_CQ
);
2803 put_res(dev
, slave
, rcqn
, RES_CQ
);
2805 put_res(dev
, slave
, mtt_base
, RES_MTT
);
2807 res_abort_move(dev
, slave
, RES_QP
, qpn
);
2812 static int eq_get_mtt_addr(struct mlx4_eq_context
*eqc
)
2814 return be32_to_cpu(eqc
->mtt_base_addr_l
) & 0xfffffff8;
2817 static int eq_get_mtt_size(struct mlx4_eq_context
*eqc
)
2819 int log_eq_size
= eqc
->log_eq_size
& 0x1f;
2820 int page_shift
= (eqc
->log_page_size
& 0x3f) + 12;
2822 if (log_eq_size
+ 5 < page_shift
)
2825 return 1 << (log_eq_size
+ 5 - page_shift
);
2828 static int cq_get_mtt_addr(struct mlx4_cq_context
*cqc
)
2830 return be32_to_cpu(cqc
->mtt_base_addr_l
) & 0xfffffff8;
2833 static int cq_get_mtt_size(struct mlx4_cq_context
*cqc
)
2835 int log_cq_size
= (be32_to_cpu(cqc
->logsize_usrpage
) >> 24) & 0x1f;
2836 int page_shift
= (cqc
->log_page_size
& 0x3f) + 12;
2838 if (log_cq_size
+ 5 < page_shift
)
2841 return 1 << (log_cq_size
+ 5 - page_shift
);
2844 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
2845 struct mlx4_vhcr
*vhcr
,
2846 struct mlx4_cmd_mailbox
*inbox
,
2847 struct mlx4_cmd_mailbox
*outbox
,
2848 struct mlx4_cmd_info
*cmd
)
2851 int eqn
= vhcr
->in_modifier
;
2852 int res_id
= (slave
<< 8) | eqn
;
2853 struct mlx4_eq_context
*eqc
= inbox
->buf
;
2854 int mtt_base
= eq_get_mtt_addr(eqc
) / dev
->caps
.mtt_entry_sz
;
2855 int mtt_size
= eq_get_mtt_size(eqc
);
2857 struct res_mtt
*mtt
;
2859 err
= add_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2862 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_HW
, &eq
);
2866 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
2870 err
= check_mtt_range(dev
, slave
, mtt_base
, mtt_size
, mtt
);
2874 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2878 atomic_inc(&mtt
->ref_count
);
2880 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2881 res_end_move(dev
, slave
, RES_EQ
, res_id
);
2885 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
2887 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
2889 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
2893 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev
*dev
, int slave
,
2894 struct mlx4_vhcr
*vhcr
,
2895 struct mlx4_cmd_mailbox
*inbox
,
2896 struct mlx4_cmd_mailbox
*outbox
,
2897 struct mlx4_cmd_info
*cmd
)
2900 u8 get
= vhcr
->op_modifier
;
2905 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
2910 static int get_containing_mtt(struct mlx4_dev
*dev
, int slave
, int start
,
2911 int len
, struct res_mtt
**res
)
2913 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2914 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
2915 struct res_mtt
*mtt
;
2918 spin_lock_irq(mlx4_tlock(dev
));
2919 list_for_each_entry(mtt
, &tracker
->slave_list
[slave
].res_list
[RES_MTT
],
2921 if (!check_mtt_range(dev
, slave
, start
, len
, mtt
)) {
2923 mtt
->com
.from_state
= mtt
->com
.state
;
2924 mtt
->com
.state
= RES_MTT_BUSY
;
2929 spin_unlock_irq(mlx4_tlock(dev
));
2934 static int verify_qp_parameters(struct mlx4_dev
*dev
,
2935 struct mlx4_vhcr
*vhcr
,
2936 struct mlx4_cmd_mailbox
*inbox
,
2937 enum qp_transition transition
, u8 slave
)
2941 struct mlx4_qp_context
*qp_ctx
;
2942 enum mlx4_qp_optpar optpar
;
2946 qp_ctx
= inbox
->buf
+ 8;
2947 qp_type
= (be32_to_cpu(qp_ctx
->flags
) >> 16) & 0xff;
2948 optpar
= be32_to_cpu(*(__be32
*) inbox
->buf
);
2950 if (slave
!= mlx4_master_func_num(dev
))
2951 qp_ctx
->params2
&= ~MLX4_QP_BIT_FPP
;
2955 case MLX4_QP_ST_XRC
:
2957 switch (transition
) {
2958 case QP_TRANS_INIT2RTR
:
2959 case QP_TRANS_RTR2RTS
:
2960 case QP_TRANS_RTS2RTS
:
2961 case QP_TRANS_SQD2SQD
:
2962 case QP_TRANS_SQD2RTS
:
2963 if (slave
!= mlx4_master_func_num(dev
))
2964 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
) {
2965 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
2966 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
2967 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
2970 if (qp_ctx
->pri_path
.mgid_index
>= num_gids
)
2973 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
2974 port
= (qp_ctx
->alt_path
.sched_queue
>> 6 & 1) + 1;
2975 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
)
2976 num_gids
= mlx4_get_slave_num_gids(dev
, slave
, port
);
2979 if (qp_ctx
->alt_path
.mgid_index
>= num_gids
)
2988 case MLX4_QP_ST_MLX
:
2989 qpn
= vhcr
->in_modifier
& 0x7fffff;
2990 port
= (qp_ctx
->pri_path
.sched_queue
>> 6 & 1) + 1;
2991 if (transition
== QP_TRANS_INIT2RTR
&&
2992 slave
!= mlx4_master_func_num(dev
) &&
2993 mlx4_is_qp_reserved(dev
, qpn
) &&
2994 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
2995 /* only enabled VFs may create MLX proxy QPs */
2996 mlx4_err(dev
, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2997 __func__
, slave
, port
);
3009 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev
*dev
, int slave
,
3010 struct mlx4_vhcr
*vhcr
,
3011 struct mlx4_cmd_mailbox
*inbox
,
3012 struct mlx4_cmd_mailbox
*outbox
,
3013 struct mlx4_cmd_info
*cmd
)
3015 struct mlx4_mtt mtt
;
3016 __be64
*page_list
= inbox
->buf
;
3017 u64
*pg_list
= (u64
*)page_list
;
3019 struct res_mtt
*rmtt
= NULL
;
3020 int start
= be64_to_cpu(page_list
[0]);
3021 int npages
= vhcr
->in_modifier
;
3024 err
= get_containing_mtt(dev
, slave
, start
, npages
, &rmtt
);
3028 /* Call the SW implementation of write_mtt:
3029 * - Prepare a dummy mtt struct
3030 * - Translate inbox contents to simple addresses in host endianess */
3031 mtt
.offset
= 0; /* TBD this is broken but I don't handle it since
3032 we don't really use it */
3035 for (i
= 0; i
< npages
; ++i
)
3036 pg_list
[i
+ 2] = (be64_to_cpu(page_list
[i
+ 2]) & ~1ULL);
3038 err
= __mlx4_write_mtt(dev
, &mtt
, be64_to_cpu(page_list
[0]), npages
,
3039 ((u64
*)page_list
+ 2));
3042 put_res(dev
, slave
, rmtt
->com
.res_id
, RES_MTT
);
3047 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3048 struct mlx4_vhcr
*vhcr
,
3049 struct mlx4_cmd_mailbox
*inbox
,
3050 struct mlx4_cmd_mailbox
*outbox
,
3051 struct mlx4_cmd_info
*cmd
)
3053 int eqn
= vhcr
->in_modifier
;
3054 int res_id
= eqn
| (slave
<< 8);
3058 err
= eq_res_start_move_to(dev
, slave
, res_id
, RES_EQ_RESERVED
, &eq
);
3062 err
= get_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
, NULL
);
3066 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3070 atomic_dec(&eq
->mtt
->ref_count
);
3071 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3072 res_end_move(dev
, slave
, RES_EQ
, res_id
);
3073 rem_res_range(dev
, slave
, res_id
, 1, RES_EQ
, 0);
3078 put_res(dev
, slave
, eq
->mtt
->com
.res_id
, RES_MTT
);
3080 res_abort_move(dev
, slave
, RES_EQ
, res_id
);
3085 int mlx4_GEN_EQE(struct mlx4_dev
*dev
, int slave
, struct mlx4_eqe
*eqe
)
3087 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3088 struct mlx4_slave_event_eq_info
*event_eq
;
3089 struct mlx4_cmd_mailbox
*mailbox
;
3090 u32 in_modifier
= 0;
3095 if (!priv
->mfunc
.master
.slave_state
)
3098 /* check for slave valid, slave not PF, and slave active */
3099 if (slave
< 0 || slave
> dev
->persist
->num_vfs
||
3100 slave
== dev
->caps
.function
||
3101 !priv
->mfunc
.master
.slave_state
[slave
].active
)
3104 event_eq
= &priv
->mfunc
.master
.slave_state
[slave
].event_eq
[eqe
->type
];
3106 /* Create the event only if the slave is registered */
3107 if (event_eq
->eqn
< 0)
3110 mutex_lock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3111 res_id
= (slave
<< 8) | event_eq
->eqn
;
3112 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &req
);
3116 if (req
->com
.from_state
!= RES_EQ_HW
) {
3121 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3122 if (IS_ERR(mailbox
)) {
3123 err
= PTR_ERR(mailbox
);
3127 if (eqe
->type
== MLX4_EVENT_TYPE_CMD
) {
3129 eqe
->event
.cmd
.token
= cpu_to_be16(event_eq
->token
);
3132 memcpy(mailbox
->buf
, (u8
*) eqe
, 28);
3134 in_modifier
= (slave
& 0xff) | ((event_eq
->eqn
& 0xff) << 16);
3136 err
= mlx4_cmd(dev
, mailbox
->dma
, in_modifier
, 0,
3137 MLX4_CMD_GEN_EQE
, MLX4_CMD_TIME_CLASS_B
,
3140 put_res(dev
, slave
, res_id
, RES_EQ
);
3141 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3142 mlx4_free_cmd_mailbox(dev
, mailbox
);
3146 put_res(dev
, slave
, res_id
, RES_EQ
);
3149 mutex_unlock(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
3153 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3154 struct mlx4_vhcr
*vhcr
,
3155 struct mlx4_cmd_mailbox
*inbox
,
3156 struct mlx4_cmd_mailbox
*outbox
,
3157 struct mlx4_cmd_info
*cmd
)
3159 int eqn
= vhcr
->in_modifier
;
3160 int res_id
= eqn
| (slave
<< 8);
3164 err
= get_res(dev
, slave
, res_id
, RES_EQ
, &eq
);
3168 if (eq
->com
.from_state
!= RES_EQ_HW
) {
3173 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3176 put_res(dev
, slave
, res_id
, RES_EQ
);
3180 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3181 struct mlx4_vhcr
*vhcr
,
3182 struct mlx4_cmd_mailbox
*inbox
,
3183 struct mlx4_cmd_mailbox
*outbox
,
3184 struct mlx4_cmd_info
*cmd
)
3187 int cqn
= vhcr
->in_modifier
;
3188 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3189 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3191 struct res_mtt
*mtt
;
3193 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_HW
, &cq
);
3196 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3199 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3202 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3205 atomic_inc(&mtt
->ref_count
);
3207 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3208 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3212 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3214 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3218 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3219 struct mlx4_vhcr
*vhcr
,
3220 struct mlx4_cmd_mailbox
*inbox
,
3221 struct mlx4_cmd_mailbox
*outbox
,
3222 struct mlx4_cmd_info
*cmd
)
3225 int cqn
= vhcr
->in_modifier
;
3228 err
= cq_res_start_move_to(dev
, slave
, cqn
, RES_CQ_ALLOCATED
, &cq
);
3231 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3234 atomic_dec(&cq
->mtt
->ref_count
);
3235 res_end_move(dev
, slave
, RES_CQ
, cqn
);
3239 res_abort_move(dev
, slave
, RES_CQ
, cqn
);
3243 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3244 struct mlx4_vhcr
*vhcr
,
3245 struct mlx4_cmd_mailbox
*inbox
,
3246 struct mlx4_cmd_mailbox
*outbox
,
3247 struct mlx4_cmd_info
*cmd
)
3249 int cqn
= vhcr
->in_modifier
;
3253 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3257 if (cq
->com
.from_state
!= RES_CQ_HW
)
3260 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3262 put_res(dev
, slave
, cqn
, RES_CQ
);
3267 static int handle_resize(struct mlx4_dev
*dev
, int slave
,
3268 struct mlx4_vhcr
*vhcr
,
3269 struct mlx4_cmd_mailbox
*inbox
,
3270 struct mlx4_cmd_mailbox
*outbox
,
3271 struct mlx4_cmd_info
*cmd
,
3275 struct res_mtt
*orig_mtt
;
3276 struct res_mtt
*mtt
;
3277 struct mlx4_cq_context
*cqc
= inbox
->buf
;
3278 int mtt_base
= cq_get_mtt_addr(cqc
) / dev
->caps
.mtt_entry_sz
;
3280 err
= get_res(dev
, slave
, cq
->mtt
->com
.res_id
, RES_MTT
, &orig_mtt
);
3284 if (orig_mtt
!= cq
->mtt
) {
3289 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3293 err
= check_mtt_range(dev
, slave
, mtt_base
, cq_get_mtt_size(cqc
), mtt
);
3296 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3299 atomic_dec(&orig_mtt
->ref_count
);
3300 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3301 atomic_inc(&mtt
->ref_count
);
3303 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3307 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3309 put_res(dev
, slave
, orig_mtt
->com
.res_id
, RES_MTT
);
3315 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3316 struct mlx4_vhcr
*vhcr
,
3317 struct mlx4_cmd_mailbox
*inbox
,
3318 struct mlx4_cmd_mailbox
*outbox
,
3319 struct mlx4_cmd_info
*cmd
)
3321 int cqn
= vhcr
->in_modifier
;
3325 err
= get_res(dev
, slave
, cqn
, RES_CQ
, &cq
);
3329 if (cq
->com
.from_state
!= RES_CQ_HW
)
3332 if (vhcr
->op_modifier
== 0) {
3333 err
= handle_resize(dev
, slave
, vhcr
, inbox
, outbox
, cmd
, cq
);
3337 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3339 put_res(dev
, slave
, cqn
, RES_CQ
);
3344 static int srq_get_mtt_size(struct mlx4_srq_context
*srqc
)
3346 int log_srq_size
= (be32_to_cpu(srqc
->state_logsize_srqn
) >> 24) & 0xf;
3347 int log_rq_stride
= srqc
->logstride
& 7;
3348 int page_shift
= (srqc
->log_page_size
& 0x3f) + 12;
3350 if (log_srq_size
+ log_rq_stride
+ 4 < page_shift
)
3353 return 1 << (log_srq_size
+ log_rq_stride
+ 4 - page_shift
);
3356 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3357 struct mlx4_vhcr
*vhcr
,
3358 struct mlx4_cmd_mailbox
*inbox
,
3359 struct mlx4_cmd_mailbox
*outbox
,
3360 struct mlx4_cmd_info
*cmd
)
3363 int srqn
= vhcr
->in_modifier
;
3364 struct res_mtt
*mtt
;
3365 struct res_srq
*srq
;
3366 struct mlx4_srq_context
*srqc
= inbox
->buf
;
3367 int mtt_base
= srq_get_mtt_addr(srqc
) / dev
->caps
.mtt_entry_sz
;
3369 if (srqn
!= (be32_to_cpu(srqc
->state_logsize_srqn
) & 0xffffff))
3372 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_HW
, &srq
);
3375 err
= get_res(dev
, slave
, mtt_base
, RES_MTT
, &mtt
);
3378 err
= check_mtt_range(dev
, slave
, mtt_base
, srq_get_mtt_size(srqc
),
3383 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3387 atomic_inc(&mtt
->ref_count
);
3389 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3390 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3394 put_res(dev
, slave
, mtt
->com
.res_id
, RES_MTT
);
3396 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3401 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3402 struct mlx4_vhcr
*vhcr
,
3403 struct mlx4_cmd_mailbox
*inbox
,
3404 struct mlx4_cmd_mailbox
*outbox
,
3405 struct mlx4_cmd_info
*cmd
)
3408 int srqn
= vhcr
->in_modifier
;
3409 struct res_srq
*srq
;
3411 err
= srq_res_start_move_to(dev
, slave
, srqn
, RES_SRQ_ALLOCATED
, &srq
);
3414 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3417 atomic_dec(&srq
->mtt
->ref_count
);
3419 atomic_dec(&srq
->cq
->ref_count
);
3420 res_end_move(dev
, slave
, RES_SRQ
, srqn
);
3425 res_abort_move(dev
, slave
, RES_SRQ
, srqn
);
3430 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3431 struct mlx4_vhcr
*vhcr
,
3432 struct mlx4_cmd_mailbox
*inbox
,
3433 struct mlx4_cmd_mailbox
*outbox
,
3434 struct mlx4_cmd_info
*cmd
)
3437 int srqn
= vhcr
->in_modifier
;
3438 struct res_srq
*srq
;
3440 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3443 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3447 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3449 put_res(dev
, slave
, srqn
, RES_SRQ
);
3453 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev
*dev
, int slave
,
3454 struct mlx4_vhcr
*vhcr
,
3455 struct mlx4_cmd_mailbox
*inbox
,
3456 struct mlx4_cmd_mailbox
*outbox
,
3457 struct mlx4_cmd_info
*cmd
)
3460 int srqn
= vhcr
->in_modifier
;
3461 struct res_srq
*srq
;
3463 err
= get_res(dev
, slave
, srqn
, RES_SRQ
, &srq
);
3467 if (srq
->com
.from_state
!= RES_SRQ_HW
) {
3472 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3474 put_res(dev
, slave
, srqn
, RES_SRQ
);
3478 int mlx4_GEN_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3479 struct mlx4_vhcr
*vhcr
,
3480 struct mlx4_cmd_mailbox
*inbox
,
3481 struct mlx4_cmd_mailbox
*outbox
,
3482 struct mlx4_cmd_info
*cmd
)
3485 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3488 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3491 if (qp
->com
.from_state
!= RES_QP_HW
) {
3496 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3498 put_res(dev
, slave
, qpn
, RES_QP
);
3502 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3503 struct mlx4_vhcr
*vhcr
,
3504 struct mlx4_cmd_mailbox
*inbox
,
3505 struct mlx4_cmd_mailbox
*outbox
,
3506 struct mlx4_cmd_info
*cmd
)
3508 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3509 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3510 update_pkey_index(dev
, slave
, inbox
);
3511 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3514 static int adjust_qp_sched_queue(struct mlx4_dev
*dev
, int slave
,
3515 struct mlx4_qp_context
*qpc
,
3516 struct mlx4_cmd_mailbox
*inbox
)
3518 enum mlx4_qp_optpar optpar
= be32_to_cpu(*(__be32
*)inbox
->buf
);
3520 int port
= mlx4_slave_convert_port(
3521 dev
, slave
, (qpc
->pri_path
.sched_queue
>> 6 & 1) + 1) - 1;
3526 pri_sched_queue
= (qpc
->pri_path
.sched_queue
& ~(1 << 6)) |
3529 if (optpar
& MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
||
3530 mlx4_is_eth(dev
, port
+ 1)) {
3531 qpc
->pri_path
.sched_queue
= pri_sched_queue
;
3534 if (optpar
& MLX4_QP_OPTPAR_ALT_ADDR_PATH
) {
3535 port
= mlx4_slave_convert_port(
3536 dev
, slave
, (qpc
->alt_path
.sched_queue
>> 6 & 1)
3540 qpc
->alt_path
.sched_queue
=
3541 (qpc
->alt_path
.sched_queue
& ~(1 << 6)) |
3547 static int roce_verify_mac(struct mlx4_dev
*dev
, int slave
,
3548 struct mlx4_qp_context
*qpc
,
3549 struct mlx4_cmd_mailbox
*inbox
)
3553 u32 ts
= (be32_to_cpu(qpc
->flags
) >> 16) & 0xff;
3554 u8 sched
= *(u8
*)(inbox
->buf
+ 64);
3557 port
= (sched
>> 6 & 1) + 1;
3558 if (mlx4_is_eth(dev
, port
) && (ts
!= MLX4_QP_ST_MLX
)) {
3559 smac_ix
= qpc
->pri_path
.grh_mylmc
& 0x7f;
3560 if (mac_find_smac_ix_in_slave(dev
, slave
, port
, smac_ix
, &mac
))
3566 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3567 struct mlx4_vhcr
*vhcr
,
3568 struct mlx4_cmd_mailbox
*inbox
,
3569 struct mlx4_cmd_mailbox
*outbox
,
3570 struct mlx4_cmd_info
*cmd
)
3573 struct mlx4_qp_context
*qpc
= inbox
->buf
+ 8;
3574 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3576 u8 orig_sched_queue
;
3577 __be32 orig_param3
= qpc
->param3
;
3578 u8 orig_vlan_control
= qpc
->pri_path
.vlan_control
;
3579 u8 orig_fvl_rx
= qpc
->pri_path
.fvl_rx
;
3580 u8 orig_pri_path_fl
= qpc
->pri_path
.fl
;
3581 u8 orig_vlan_index
= qpc
->pri_path
.vlan_index
;
3582 u8 orig_feup
= qpc
->pri_path
.feup
;
3584 err
= adjust_qp_sched_queue(dev
, slave
, qpc
, inbox
);
3587 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_INIT2RTR
, slave
);
3591 if (roce_verify_mac(dev
, slave
, qpc
, inbox
))
3594 update_pkey_index(dev
, slave
, inbox
);
3595 update_gid(dev
, inbox
, (u8
)slave
);
3596 adjust_proxy_tun_qkey(dev
, vhcr
, qpc
);
3597 orig_sched_queue
= qpc
->pri_path
.sched_queue
;
3598 err
= update_vport_qp_param(dev
, inbox
, slave
, qpn
);
3602 err
= get_res(dev
, slave
, qpn
, RES_QP
, &qp
);
3605 if (qp
->com
.from_state
!= RES_QP_HW
) {
3610 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3612 /* if no error, save sched queue value passed in by VF. This is
3613 * essentially the QOS value provided by the VF. This will be useful
3614 * if we allow dynamic changes from VST back to VGT
3617 qp
->sched_queue
= orig_sched_queue
;
3618 qp
->param3
= orig_param3
;
3619 qp
->vlan_control
= orig_vlan_control
;
3620 qp
->fvl_rx
= orig_fvl_rx
;
3621 qp
->pri_path_fl
= orig_pri_path_fl
;
3622 qp
->vlan_index
= orig_vlan_index
;
3623 qp
->feup
= orig_feup
;
3625 put_res(dev
, slave
, qpn
, RES_QP
);
3629 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3630 struct mlx4_vhcr
*vhcr
,
3631 struct mlx4_cmd_mailbox
*inbox
,
3632 struct mlx4_cmd_mailbox
*outbox
,
3633 struct mlx4_cmd_info
*cmd
)
3636 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3638 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3641 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTR2RTS
, slave
);
3645 update_pkey_index(dev
, slave
, inbox
);
3646 update_gid(dev
, inbox
, (u8
)slave
);
3647 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3648 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3651 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3652 struct mlx4_vhcr
*vhcr
,
3653 struct mlx4_cmd_mailbox
*inbox
,
3654 struct mlx4_cmd_mailbox
*outbox
,
3655 struct mlx4_cmd_info
*cmd
)
3658 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3660 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3663 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_RTS2RTS
, slave
);
3667 update_pkey_index(dev
, slave
, inbox
);
3668 update_gid(dev
, inbox
, (u8
)slave
);
3669 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3670 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3674 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3675 struct mlx4_vhcr
*vhcr
,
3676 struct mlx4_cmd_mailbox
*inbox
,
3677 struct mlx4_cmd_mailbox
*outbox
,
3678 struct mlx4_cmd_info
*cmd
)
3680 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3681 int err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3684 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3685 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3688 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3689 struct mlx4_vhcr
*vhcr
,
3690 struct mlx4_cmd_mailbox
*inbox
,
3691 struct mlx4_cmd_mailbox
*outbox
,
3692 struct mlx4_cmd_info
*cmd
)
3695 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3697 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3700 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2SQD
, slave
);
3704 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3705 update_gid(dev
, inbox
, (u8
)slave
);
3706 update_pkey_index(dev
, slave
, inbox
);
3707 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3710 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3711 struct mlx4_vhcr
*vhcr
,
3712 struct mlx4_cmd_mailbox
*inbox
,
3713 struct mlx4_cmd_mailbox
*outbox
,
3714 struct mlx4_cmd_info
*cmd
)
3717 struct mlx4_qp_context
*context
= inbox
->buf
+ 8;
3719 err
= adjust_qp_sched_queue(dev
, slave
, context
, inbox
);
3722 err
= verify_qp_parameters(dev
, vhcr
, inbox
, QP_TRANS_SQD2RTS
, slave
);
3726 adjust_proxy_tun_qkey(dev
, vhcr
, context
);
3727 update_gid(dev
, inbox
, (u8
)slave
);
3728 update_pkey_index(dev
, slave
, inbox
);
3729 return mlx4_GEN_QP_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3732 int mlx4_2RST_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
3733 struct mlx4_vhcr
*vhcr
,
3734 struct mlx4_cmd_mailbox
*inbox
,
3735 struct mlx4_cmd_mailbox
*outbox
,
3736 struct mlx4_cmd_info
*cmd
)
3739 int qpn
= vhcr
->in_modifier
& 0x7fffff;
3742 err
= qp_res_start_move_to(dev
, slave
, qpn
, RES_QP_MAPPED
, &qp
, 0);
3745 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
3749 atomic_dec(&qp
->mtt
->ref_count
);
3750 atomic_dec(&qp
->rcq
->ref_count
);
3751 atomic_dec(&qp
->scq
->ref_count
);
3753 atomic_dec(&qp
->srq
->ref_count
);
3754 res_end_move(dev
, slave
, RES_QP
, qpn
);
3758 res_abort_move(dev
, slave
, RES_QP
, qpn
);
3763 static struct res_gid
*find_gid(struct mlx4_dev
*dev
, int slave
,
3764 struct res_qp
*rqp
, u8
*gid
)
3766 struct res_gid
*res
;
3768 list_for_each_entry(res
, &rqp
->mcg_list
, list
) {
3769 if (!memcmp(res
->gid
, gid
, 16))
3775 static int add_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3776 u8
*gid
, enum mlx4_protocol prot
,
3777 enum mlx4_steer_type steer
, u64 reg_id
)
3779 struct res_gid
*res
;
3782 res
= kzalloc(sizeof *res
, GFP_KERNEL
);
3786 spin_lock_irq(&rqp
->mcg_spl
);
3787 if (find_gid(dev
, slave
, rqp
, gid
)) {
3791 memcpy(res
->gid
, gid
, 16);
3794 res
->reg_id
= reg_id
;
3795 list_add_tail(&res
->list
, &rqp
->mcg_list
);
3798 spin_unlock_irq(&rqp
->mcg_spl
);
3803 static int rem_mcg_res(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
,
3804 u8
*gid
, enum mlx4_protocol prot
,
3805 enum mlx4_steer_type steer
, u64
*reg_id
)
3807 struct res_gid
*res
;
3810 spin_lock_irq(&rqp
->mcg_spl
);
3811 res
= find_gid(dev
, slave
, rqp
, gid
);
3812 if (!res
|| res
->prot
!= prot
|| res
->steer
!= steer
)
3815 *reg_id
= res
->reg_id
;
3816 list_del(&res
->list
);
3820 spin_unlock_irq(&rqp
->mcg_spl
);
3825 static int qp_attach(struct mlx4_dev
*dev
, int slave
, struct mlx4_qp
*qp
,
3826 u8 gid
[16], int block_loopback
, enum mlx4_protocol prot
,
3827 enum mlx4_steer_type type
, u64
*reg_id
)
3829 switch (dev
->caps
.steering_mode
) {
3830 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
3831 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3834 return mlx4_trans_to_dmfs_attach(dev
, qp
, gid
, port
,
3835 block_loopback
, prot
,
3838 case MLX4_STEERING_MODE_B0
:
3839 if (prot
== MLX4_PROT_ETH
) {
3840 int port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3845 return mlx4_qp_attach_common(dev
, qp
, gid
,
3846 block_loopback
, prot
, type
);
3852 static int qp_detach(struct mlx4_dev
*dev
, struct mlx4_qp
*qp
,
3853 u8 gid
[16], enum mlx4_protocol prot
,
3854 enum mlx4_steer_type type
, u64 reg_id
)
3856 switch (dev
->caps
.steering_mode
) {
3857 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
3858 return mlx4_flow_detach(dev
, reg_id
);
3859 case MLX4_STEERING_MODE_B0
:
3860 return mlx4_qp_detach_common(dev
, qp
, gid
, prot
, type
);
3866 static int mlx4_adjust_port(struct mlx4_dev
*dev
, int slave
,
3867 u8
*gid
, enum mlx4_protocol prot
)
3871 if (prot
!= MLX4_PROT_ETH
)
3874 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
||
3875 dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
3876 real_port
= mlx4_slave_convert_port(dev
, slave
, gid
[5]);
3885 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
3886 struct mlx4_vhcr
*vhcr
,
3887 struct mlx4_cmd_mailbox
*inbox
,
3888 struct mlx4_cmd_mailbox
*outbox
,
3889 struct mlx4_cmd_info
*cmd
)
3891 struct mlx4_qp qp
; /* dummy for calling attach/detach */
3892 u8
*gid
= inbox
->buf
;
3893 enum mlx4_protocol prot
= (vhcr
->in_modifier
>> 28) & 0x7;
3898 int attach
= vhcr
->op_modifier
;
3899 int block_loopback
= vhcr
->in_modifier
>> 31;
3900 u8 steer_type_mask
= 2;
3901 enum mlx4_steer_type type
= (gid
[7] & steer_type_mask
) >> 1;
3903 qpn
= vhcr
->in_modifier
& 0xffffff;
3904 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
3910 err
= qp_attach(dev
, slave
, &qp
, gid
, block_loopback
, prot
,
3913 pr_err("Fail to attach rule to qp 0x%x\n", qpn
);
3916 err
= add_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, reg_id
);
3920 err
= mlx4_adjust_port(dev
, slave
, gid
, prot
);
3924 err
= rem_mcg_res(dev
, slave
, rqp
, gid
, prot
, type
, ®_id
);
3928 err
= qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3930 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3933 put_res(dev
, slave
, qpn
, RES_QP
);
3937 qp_detach(dev
, &qp
, gid
, prot
, type
, reg_id
);
3939 put_res(dev
, slave
, qpn
, RES_QP
);
3944 * MAC validation for Flow Steering rules.
3945 * VF can attach rules only with a mac address which is assigned to it.
3947 static int validate_eth_header_mac(int slave
, struct _rule_hw
*eth_header
,
3948 struct list_head
*rlist
)
3950 struct mac_res
*res
, *tmp
;
3953 /* make sure it isn't multicast or broadcast mac*/
3954 if (!is_multicast_ether_addr(eth_header
->eth
.dst_mac
) &&
3955 !is_broadcast_ether_addr(eth_header
->eth
.dst_mac
)) {
3956 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
3957 be_mac
= cpu_to_be64(res
->mac
<< 16);
3958 if (ether_addr_equal((u8
*)&be_mac
, eth_header
->eth
.dst_mac
))
3961 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3962 eth_header
->eth
.dst_mac
, slave
);
3969 * In case of missing eth header, append eth header with a MAC address
3970 * assigned to the VF.
3972 static int add_eth_header(struct mlx4_dev
*dev
, int slave
,
3973 struct mlx4_cmd_mailbox
*inbox
,
3974 struct list_head
*rlist
, int header_id
)
3976 struct mac_res
*res
, *tmp
;
3978 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
3979 struct mlx4_net_trans_rule_hw_eth
*eth_header
;
3980 struct mlx4_net_trans_rule_hw_ipv4
*ip_header
;
3981 struct mlx4_net_trans_rule_hw_tcp_udp
*l4_header
;
3983 __be64 mac_msk
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
3985 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
3987 eth_header
= (struct mlx4_net_trans_rule_hw_eth
*)(ctrl
+ 1);
3989 /* Clear a space in the inbox for eth header */
3990 switch (header_id
) {
3991 case MLX4_NET_TRANS_RULE_ID_IPV4
:
3993 (struct mlx4_net_trans_rule_hw_ipv4
*)(eth_header
+ 1);
3994 memmove(ip_header
, eth_header
,
3995 sizeof(*ip_header
) + sizeof(*l4_header
));
3997 case MLX4_NET_TRANS_RULE_ID_TCP
:
3998 case MLX4_NET_TRANS_RULE_ID_UDP
:
3999 l4_header
= (struct mlx4_net_trans_rule_hw_tcp_udp
*)
4001 memmove(l4_header
, eth_header
, sizeof(*l4_header
));
4006 list_for_each_entry_safe(res
, tmp
, rlist
, list
) {
4007 if (port
== res
->port
) {
4008 be_mac
= cpu_to_be64(res
->mac
<< 16);
4013 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4018 memset(eth_header
, 0, sizeof(*eth_header
));
4019 eth_header
->size
= sizeof(*eth_header
) >> 2;
4020 eth_header
->id
= cpu_to_be16(__sw_id_hw
[MLX4_NET_TRANS_RULE_ID_ETH
]);
4021 memcpy(eth_header
->dst_mac
, &be_mac
, ETH_ALEN
);
4022 memcpy(eth_header
->dst_mac_msk
, &mac_msk
, ETH_ALEN
);
4028 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4029 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev
*dev
, int slave
,
4030 struct mlx4_vhcr
*vhcr
,
4031 struct mlx4_cmd_mailbox
*inbox
,
4032 struct mlx4_cmd_mailbox
*outbox
,
4033 struct mlx4_cmd_info
*cmd_info
)
4036 u32 qpn
= vhcr
->in_modifier
& 0xffffff;
4040 u64 pri_addr_path_mask
;
4041 struct mlx4_update_qp_context
*cmd
;
4044 cmd
= (struct mlx4_update_qp_context
*)inbox
->buf
;
4046 pri_addr_path_mask
= be64_to_cpu(cmd
->primary_addr_path_mask
);
4047 if (cmd
->qp_mask
|| cmd
->secondary_addr_path_mask
||
4048 (pri_addr_path_mask
& ~MLX4_UPD_QP_PATH_MASK_SUPPORTED
))
4051 /* Just change the smac for the QP */
4052 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4054 mlx4_err(dev
, "Updating qpn 0x%x for slave %d rejected\n", qpn
, slave
);
4058 port
= (rqp
->sched_queue
>> 6 & 1) + 1;
4060 if (pri_addr_path_mask
& (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX
)) {
4061 smac_index
= cmd
->qp_context
.pri_path
.grh_mylmc
;
4062 err
= mac_find_smac_ix_in_slave(dev
, slave
, port
,
4066 mlx4_err(dev
, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4072 err
= mlx4_cmd(dev
, inbox
->dma
,
4073 vhcr
->in_modifier
, 0,
4074 MLX4_CMD_UPDATE_QP
, MLX4_CMD_TIME_CLASS_A
,
4077 mlx4_err(dev
, "Failed to update qpn on qpn 0x%x, command failed\n", qpn
);
4082 put_res(dev
, slave
, qpn
, RES_QP
);
4086 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4087 struct mlx4_vhcr
*vhcr
,
4088 struct mlx4_cmd_mailbox
*inbox
,
4089 struct mlx4_cmd_mailbox
*outbox
,
4090 struct mlx4_cmd_info
*cmd
)
4093 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4094 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4095 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[RES_MAC
];
4099 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
4100 struct _rule_hw
*rule_header
;
4103 if (dev
->caps
.steering_mode
!=
4104 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4107 ctrl
= (struct mlx4_net_trans_rule_hw_ctrl
*)inbox
->buf
;
4108 ctrl
->port
= mlx4_slave_convert_port(dev
, slave
, ctrl
->port
);
4109 if (ctrl
->port
<= 0)
4111 qpn
= be32_to_cpu(ctrl
->qpn
) & 0xffffff;
4112 err
= get_res(dev
, slave
, qpn
, RES_QP
, &rqp
);
4114 pr_err("Steering rule with qpn 0x%x rejected\n", qpn
);
4117 rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
4118 header_id
= map_hw_to_sw_id(be16_to_cpu(rule_header
->id
));
4120 switch (header_id
) {
4121 case MLX4_NET_TRANS_RULE_ID_ETH
:
4122 if (validate_eth_header_mac(slave
, rule_header
, rlist
)) {
4127 case MLX4_NET_TRANS_RULE_ID_IB
:
4129 case MLX4_NET_TRANS_RULE_ID_IPV4
:
4130 case MLX4_NET_TRANS_RULE_ID_TCP
:
4131 case MLX4_NET_TRANS_RULE_ID_UDP
:
4132 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4133 if (add_eth_header(dev
, slave
, inbox
, rlist
, header_id
)) {
4137 vhcr
->in_modifier
+=
4138 sizeof(struct mlx4_net_trans_rule_hw_eth
) >> 2;
4141 pr_err("Corrupted mailbox\n");
4146 err
= mlx4_cmd_imm(dev
, inbox
->dma
, &vhcr
->out_param
,
4147 vhcr
->in_modifier
, 0,
4148 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
4153 err
= add_res_range(dev
, slave
, vhcr
->out_param
, 1, RES_FS_RULE
, qpn
);
4155 mlx4_err(dev
, "Fail to add flow steering resources\n");
4157 mlx4_cmd(dev
, vhcr
->out_param
, 0, 0,
4158 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4162 atomic_inc(&rqp
->ref_count
);
4164 put_res(dev
, slave
, qpn
, RES_QP
);
4168 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev
*dev
, int slave
,
4169 struct mlx4_vhcr
*vhcr
,
4170 struct mlx4_cmd_mailbox
*inbox
,
4171 struct mlx4_cmd_mailbox
*outbox
,
4172 struct mlx4_cmd_info
*cmd
)
4176 struct res_fs_rule
*rrule
;
4178 if (dev
->caps
.steering_mode
!=
4179 MLX4_STEERING_MODE_DEVICE_MANAGED
)
4182 err
= get_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
, &rrule
);
4185 /* Release the rule form busy state before removal */
4186 put_res(dev
, slave
, vhcr
->in_param
, RES_FS_RULE
);
4187 err
= get_res(dev
, slave
, rrule
->qpn
, RES_QP
, &rqp
);
4191 err
= rem_res_range(dev
, slave
, vhcr
->in_param
, 1, RES_FS_RULE
, 0);
4193 mlx4_err(dev
, "Fail to remove flow steering resources\n");
4197 err
= mlx4_cmd(dev
, vhcr
->in_param
, 0, 0,
4198 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
4201 atomic_dec(&rqp
->ref_count
);
4203 put_res(dev
, slave
, rrule
->qpn
, RES_QP
);
4208 BUSY_MAX_RETRIES
= 10
4211 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev
*dev
, int slave
,
4212 struct mlx4_vhcr
*vhcr
,
4213 struct mlx4_cmd_mailbox
*inbox
,
4214 struct mlx4_cmd_mailbox
*outbox
,
4215 struct mlx4_cmd_info
*cmd
)
4218 int index
= vhcr
->in_modifier
& 0xffff;
4220 err
= get_res(dev
, slave
, index
, RES_COUNTER
, NULL
);
4224 err
= mlx4_DMA_wrapper(dev
, slave
, vhcr
, inbox
, outbox
, cmd
);
4225 put_res(dev
, slave
, index
, RES_COUNTER
);
4229 static void detach_qp(struct mlx4_dev
*dev
, int slave
, struct res_qp
*rqp
)
4231 struct res_gid
*rgid
;
4232 struct res_gid
*tmp
;
4233 struct mlx4_qp qp
; /* dummy for calling attach/detach */
4235 list_for_each_entry_safe(rgid
, tmp
, &rqp
->mcg_list
, list
) {
4236 switch (dev
->caps
.steering_mode
) {
4237 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
4238 mlx4_flow_detach(dev
, rgid
->reg_id
);
4240 case MLX4_STEERING_MODE_B0
:
4241 qp
.qpn
= rqp
->local_qpn
;
4242 (void) mlx4_qp_detach_common(dev
, &qp
, rgid
->gid
,
4243 rgid
->prot
, rgid
->steer
);
4246 list_del(&rgid
->list
);
4251 static int _move_all_busy(struct mlx4_dev
*dev
, int slave
,
4252 enum mlx4_resource type
, int print
)
4254 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4255 struct mlx4_resource_tracker
*tracker
=
4256 &priv
->mfunc
.master
.res_tracker
;
4257 struct list_head
*rlist
= &tracker
->slave_list
[slave
].res_list
[type
];
4258 struct res_common
*r
;
4259 struct res_common
*tmp
;
4263 spin_lock_irq(mlx4_tlock(dev
));
4264 list_for_each_entry_safe(r
, tmp
, rlist
, list
) {
4265 if (r
->owner
== slave
) {
4267 if (r
->state
== RES_ANY_BUSY
) {
4270 "%s id 0x%llx is busy\n",
4275 r
->from_state
= r
->state
;
4276 r
->state
= RES_ANY_BUSY
;
4282 spin_unlock_irq(mlx4_tlock(dev
));
4287 static int move_all_busy(struct mlx4_dev
*dev
, int slave
,
4288 enum mlx4_resource type
)
4290 unsigned long begin
;
4295 busy
= _move_all_busy(dev
, slave
, type
, 0);
4296 if (time_after(jiffies
, begin
+ 5 * HZ
))
4303 busy
= _move_all_busy(dev
, slave
, type
, 1);
4307 static void rem_slave_qps(struct mlx4_dev
*dev
, int slave
)
4309 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4310 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4311 struct list_head
*qp_list
=
4312 &tracker
->slave_list
[slave
].res_list
[RES_QP
];
4320 err
= move_all_busy(dev
, slave
, RES_QP
);
4322 mlx4_warn(dev
, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4325 spin_lock_irq(mlx4_tlock(dev
));
4326 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4327 spin_unlock_irq(mlx4_tlock(dev
));
4328 if (qp
->com
.owner
== slave
) {
4329 qpn
= qp
->com
.res_id
;
4330 detach_qp(dev
, slave
, qp
);
4331 state
= qp
->com
.from_state
;
4332 while (state
!= 0) {
4334 case RES_QP_RESERVED
:
4335 spin_lock_irq(mlx4_tlock(dev
));
4336 rb_erase(&qp
->com
.node
,
4337 &tracker
->res_tree
[RES_QP
]);
4338 list_del(&qp
->com
.list
);
4339 spin_unlock_irq(mlx4_tlock(dev
));
4340 if (!valid_reserved(dev
, slave
, qpn
)) {
4341 __mlx4_qp_release_range(dev
, qpn
, 1);
4342 mlx4_release_resource(dev
, slave
,
4349 if (!valid_reserved(dev
, slave
, qpn
))
4350 __mlx4_qp_free_icm(dev
, qpn
);
4351 state
= RES_QP_RESERVED
;
4355 err
= mlx4_cmd(dev
, in_param
,
4358 MLX4_CMD_TIME_CLASS_A
,
4361 mlx4_dbg(dev
, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4362 slave
, qp
->local_qpn
);
4363 atomic_dec(&qp
->rcq
->ref_count
);
4364 atomic_dec(&qp
->scq
->ref_count
);
4365 atomic_dec(&qp
->mtt
->ref_count
);
4367 atomic_dec(&qp
->srq
->ref_count
);
4368 state
= RES_QP_MAPPED
;
4375 spin_lock_irq(mlx4_tlock(dev
));
4377 spin_unlock_irq(mlx4_tlock(dev
));
4380 static void rem_slave_srqs(struct mlx4_dev
*dev
, int slave
)
4382 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4383 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4384 struct list_head
*srq_list
=
4385 &tracker
->slave_list
[slave
].res_list
[RES_SRQ
];
4386 struct res_srq
*srq
;
4387 struct res_srq
*tmp
;
4394 err
= move_all_busy(dev
, slave
, RES_SRQ
);
4396 mlx4_warn(dev
, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4399 spin_lock_irq(mlx4_tlock(dev
));
4400 list_for_each_entry_safe(srq
, tmp
, srq_list
, com
.list
) {
4401 spin_unlock_irq(mlx4_tlock(dev
));
4402 if (srq
->com
.owner
== slave
) {
4403 srqn
= srq
->com
.res_id
;
4404 state
= srq
->com
.from_state
;
4405 while (state
!= 0) {
4407 case RES_SRQ_ALLOCATED
:
4408 __mlx4_srq_free_icm(dev
, srqn
);
4409 spin_lock_irq(mlx4_tlock(dev
));
4410 rb_erase(&srq
->com
.node
,
4411 &tracker
->res_tree
[RES_SRQ
]);
4412 list_del(&srq
->com
.list
);
4413 spin_unlock_irq(mlx4_tlock(dev
));
4414 mlx4_release_resource(dev
, slave
,
4422 err
= mlx4_cmd(dev
, in_param
, srqn
, 1,
4424 MLX4_CMD_TIME_CLASS_A
,
4427 mlx4_dbg(dev
, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4430 atomic_dec(&srq
->mtt
->ref_count
);
4432 atomic_dec(&srq
->cq
->ref_count
);
4433 state
= RES_SRQ_ALLOCATED
;
4441 spin_lock_irq(mlx4_tlock(dev
));
4443 spin_unlock_irq(mlx4_tlock(dev
));
4446 static void rem_slave_cqs(struct mlx4_dev
*dev
, int slave
)
4448 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4449 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4450 struct list_head
*cq_list
=
4451 &tracker
->slave_list
[slave
].res_list
[RES_CQ
];
4460 err
= move_all_busy(dev
, slave
, RES_CQ
);
4462 mlx4_warn(dev
, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4465 spin_lock_irq(mlx4_tlock(dev
));
4466 list_for_each_entry_safe(cq
, tmp
, cq_list
, com
.list
) {
4467 spin_unlock_irq(mlx4_tlock(dev
));
4468 if (cq
->com
.owner
== slave
&& !atomic_read(&cq
->ref_count
)) {
4469 cqn
= cq
->com
.res_id
;
4470 state
= cq
->com
.from_state
;
4471 while (state
!= 0) {
4473 case RES_CQ_ALLOCATED
:
4474 __mlx4_cq_free_icm(dev
, cqn
);
4475 spin_lock_irq(mlx4_tlock(dev
));
4476 rb_erase(&cq
->com
.node
,
4477 &tracker
->res_tree
[RES_CQ
]);
4478 list_del(&cq
->com
.list
);
4479 spin_unlock_irq(mlx4_tlock(dev
));
4480 mlx4_release_resource(dev
, slave
,
4488 err
= mlx4_cmd(dev
, in_param
, cqn
, 1,
4490 MLX4_CMD_TIME_CLASS_A
,
4493 mlx4_dbg(dev
, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4495 atomic_dec(&cq
->mtt
->ref_count
);
4496 state
= RES_CQ_ALLOCATED
;
4504 spin_lock_irq(mlx4_tlock(dev
));
4506 spin_unlock_irq(mlx4_tlock(dev
));
4509 static void rem_slave_mrs(struct mlx4_dev
*dev
, int slave
)
4511 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4512 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4513 struct list_head
*mpt_list
=
4514 &tracker
->slave_list
[slave
].res_list
[RES_MPT
];
4515 struct res_mpt
*mpt
;
4516 struct res_mpt
*tmp
;
4523 err
= move_all_busy(dev
, slave
, RES_MPT
);
4525 mlx4_warn(dev
, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4528 spin_lock_irq(mlx4_tlock(dev
));
4529 list_for_each_entry_safe(mpt
, tmp
, mpt_list
, com
.list
) {
4530 spin_unlock_irq(mlx4_tlock(dev
));
4531 if (mpt
->com
.owner
== slave
) {
4532 mptn
= mpt
->com
.res_id
;
4533 state
= mpt
->com
.from_state
;
4534 while (state
!= 0) {
4536 case RES_MPT_RESERVED
:
4537 __mlx4_mpt_release(dev
, mpt
->key
);
4538 spin_lock_irq(mlx4_tlock(dev
));
4539 rb_erase(&mpt
->com
.node
,
4540 &tracker
->res_tree
[RES_MPT
]);
4541 list_del(&mpt
->com
.list
);
4542 spin_unlock_irq(mlx4_tlock(dev
));
4543 mlx4_release_resource(dev
, slave
,
4549 case RES_MPT_MAPPED
:
4550 __mlx4_mpt_free_icm(dev
, mpt
->key
);
4551 state
= RES_MPT_RESERVED
;
4556 err
= mlx4_cmd(dev
, in_param
, mptn
, 0,
4558 MLX4_CMD_TIME_CLASS_A
,
4561 mlx4_dbg(dev
, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4564 atomic_dec(&mpt
->mtt
->ref_count
);
4565 state
= RES_MPT_MAPPED
;
4572 spin_lock_irq(mlx4_tlock(dev
));
4574 spin_unlock_irq(mlx4_tlock(dev
));
4577 static void rem_slave_mtts(struct mlx4_dev
*dev
, int slave
)
4579 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4580 struct mlx4_resource_tracker
*tracker
=
4581 &priv
->mfunc
.master
.res_tracker
;
4582 struct list_head
*mtt_list
=
4583 &tracker
->slave_list
[slave
].res_list
[RES_MTT
];
4584 struct res_mtt
*mtt
;
4585 struct res_mtt
*tmp
;
4591 err
= move_all_busy(dev
, slave
, RES_MTT
);
4593 mlx4_warn(dev
, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4596 spin_lock_irq(mlx4_tlock(dev
));
4597 list_for_each_entry_safe(mtt
, tmp
, mtt_list
, com
.list
) {
4598 spin_unlock_irq(mlx4_tlock(dev
));
4599 if (mtt
->com
.owner
== slave
) {
4600 base
= mtt
->com
.res_id
;
4601 state
= mtt
->com
.from_state
;
4602 while (state
!= 0) {
4604 case RES_MTT_ALLOCATED
:
4605 __mlx4_free_mtt_range(dev
, base
,
4607 spin_lock_irq(mlx4_tlock(dev
));
4608 rb_erase(&mtt
->com
.node
,
4609 &tracker
->res_tree
[RES_MTT
]);
4610 list_del(&mtt
->com
.list
);
4611 spin_unlock_irq(mlx4_tlock(dev
));
4612 mlx4_release_resource(dev
, slave
, RES_MTT
,
4613 1 << mtt
->order
, 0);
4623 spin_lock_irq(mlx4_tlock(dev
));
4625 spin_unlock_irq(mlx4_tlock(dev
));
4628 static void rem_slave_fs_rule(struct mlx4_dev
*dev
, int slave
)
4630 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4631 struct mlx4_resource_tracker
*tracker
=
4632 &priv
->mfunc
.master
.res_tracker
;
4633 struct list_head
*fs_rule_list
=
4634 &tracker
->slave_list
[slave
].res_list
[RES_FS_RULE
];
4635 struct res_fs_rule
*fs_rule
;
4636 struct res_fs_rule
*tmp
;
4641 err
= move_all_busy(dev
, slave
, RES_FS_RULE
);
4643 mlx4_warn(dev
, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4646 spin_lock_irq(mlx4_tlock(dev
));
4647 list_for_each_entry_safe(fs_rule
, tmp
, fs_rule_list
, com
.list
) {
4648 spin_unlock_irq(mlx4_tlock(dev
));
4649 if (fs_rule
->com
.owner
== slave
) {
4650 base
= fs_rule
->com
.res_id
;
4651 state
= fs_rule
->com
.from_state
;
4652 while (state
!= 0) {
4654 case RES_FS_RULE_ALLOCATED
:
4656 err
= mlx4_cmd(dev
, base
, 0, 0,
4657 MLX4_QP_FLOW_STEERING_DETACH
,
4658 MLX4_CMD_TIME_CLASS_A
,
4661 spin_lock_irq(mlx4_tlock(dev
));
4662 rb_erase(&fs_rule
->com
.node
,
4663 &tracker
->res_tree
[RES_FS_RULE
]);
4664 list_del(&fs_rule
->com
.list
);
4665 spin_unlock_irq(mlx4_tlock(dev
));
4675 spin_lock_irq(mlx4_tlock(dev
));
4677 spin_unlock_irq(mlx4_tlock(dev
));
4680 static void rem_slave_eqs(struct mlx4_dev
*dev
, int slave
)
4682 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4683 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4684 struct list_head
*eq_list
=
4685 &tracker
->slave_list
[slave
].res_list
[RES_EQ
];
4693 err
= move_all_busy(dev
, slave
, RES_EQ
);
4695 mlx4_warn(dev
, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4698 spin_lock_irq(mlx4_tlock(dev
));
4699 list_for_each_entry_safe(eq
, tmp
, eq_list
, com
.list
) {
4700 spin_unlock_irq(mlx4_tlock(dev
));
4701 if (eq
->com
.owner
== slave
) {
4702 eqn
= eq
->com
.res_id
;
4703 state
= eq
->com
.from_state
;
4704 while (state
!= 0) {
4706 case RES_EQ_RESERVED
:
4707 spin_lock_irq(mlx4_tlock(dev
));
4708 rb_erase(&eq
->com
.node
,
4709 &tracker
->res_tree
[RES_EQ
]);
4710 list_del(&eq
->com
.list
);
4711 spin_unlock_irq(mlx4_tlock(dev
));
4717 err
= mlx4_cmd(dev
, slave
, eqn
& 0xff,
4718 1, MLX4_CMD_HW2SW_EQ
,
4719 MLX4_CMD_TIME_CLASS_A
,
4722 mlx4_dbg(dev
, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4724 atomic_dec(&eq
->mtt
->ref_count
);
4725 state
= RES_EQ_RESERVED
;
4733 spin_lock_irq(mlx4_tlock(dev
));
4735 spin_unlock_irq(mlx4_tlock(dev
));
4738 static void rem_slave_counters(struct mlx4_dev
*dev
, int slave
)
4740 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4741 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4742 struct list_head
*counter_list
=
4743 &tracker
->slave_list
[slave
].res_list
[RES_COUNTER
];
4744 struct res_counter
*counter
;
4745 struct res_counter
*tmp
;
4749 err
= move_all_busy(dev
, slave
, RES_COUNTER
);
4751 mlx4_warn(dev
, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4754 spin_lock_irq(mlx4_tlock(dev
));
4755 list_for_each_entry_safe(counter
, tmp
, counter_list
, com
.list
) {
4756 if (counter
->com
.owner
== slave
) {
4757 index
= counter
->com
.res_id
;
4758 rb_erase(&counter
->com
.node
,
4759 &tracker
->res_tree
[RES_COUNTER
]);
4760 list_del(&counter
->com
.list
);
4762 __mlx4_counter_free(dev
, index
);
4763 mlx4_release_resource(dev
, slave
, RES_COUNTER
, 1, 0);
4766 spin_unlock_irq(mlx4_tlock(dev
));
4769 static void rem_slave_xrcdns(struct mlx4_dev
*dev
, int slave
)
4771 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4772 struct mlx4_resource_tracker
*tracker
= &priv
->mfunc
.master
.res_tracker
;
4773 struct list_head
*xrcdn_list
=
4774 &tracker
->slave_list
[slave
].res_list
[RES_XRCD
];
4775 struct res_xrcdn
*xrcd
;
4776 struct res_xrcdn
*tmp
;
4780 err
= move_all_busy(dev
, slave
, RES_XRCD
);
4782 mlx4_warn(dev
, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4785 spin_lock_irq(mlx4_tlock(dev
));
4786 list_for_each_entry_safe(xrcd
, tmp
, xrcdn_list
, com
.list
) {
4787 if (xrcd
->com
.owner
== slave
) {
4788 xrcdn
= xrcd
->com
.res_id
;
4789 rb_erase(&xrcd
->com
.node
, &tracker
->res_tree
[RES_XRCD
]);
4790 list_del(&xrcd
->com
.list
);
4792 __mlx4_xrcd_free(dev
, xrcdn
);
4795 spin_unlock_irq(mlx4_tlock(dev
));
4798 void mlx4_delete_all_resources_for_slave(struct mlx4_dev
*dev
, int slave
)
4800 struct mlx4_priv
*priv
= mlx4_priv(dev
);
4801 mlx4_reset_roce_gids(dev
, slave
);
4802 mutex_lock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4803 rem_slave_vlans(dev
, slave
);
4804 rem_slave_macs(dev
, slave
);
4805 rem_slave_fs_rule(dev
, slave
);
4806 rem_slave_qps(dev
, slave
);
4807 rem_slave_srqs(dev
, slave
);
4808 rem_slave_cqs(dev
, slave
);
4809 rem_slave_mrs(dev
, slave
);
4810 rem_slave_eqs(dev
, slave
);
4811 rem_slave_mtts(dev
, slave
);
4812 rem_slave_counters(dev
, slave
);
4813 rem_slave_xrcdns(dev
, slave
);
4814 mutex_unlock(&priv
->mfunc
.master
.res_tracker
.slave_list
[slave
].mutex
);
4817 void mlx4_vf_immed_vlan_work_handler(struct work_struct
*_work
)
4819 struct mlx4_vf_immed_vlan_work
*work
=
4820 container_of(_work
, struct mlx4_vf_immed_vlan_work
, work
);
4821 struct mlx4_cmd_mailbox
*mailbox
;
4822 struct mlx4_update_qp_context
*upd_context
;
4823 struct mlx4_dev
*dev
= &work
->priv
->dev
;
4824 struct mlx4_resource_tracker
*tracker
=
4825 &work
->priv
->mfunc
.master
.res_tracker
;
4826 struct list_head
*qp_list
=
4827 &tracker
->slave_list
[work
->slave
].res_list
[RES_QP
];
4830 u64 qp_path_mask_vlan_ctrl
=
4831 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED
) |
4832 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P
) |
4833 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED
) |
4834 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED
) |
4835 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P
) |
4836 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED
));
4838 u64 qp_path_mask
= ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX
) |
4839 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL
) |
4840 (1ULL << MLX4_UPD_QP_PATH_MASK_CV
) |
4841 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN
) |
4842 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP
) |
4843 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX
) |
4844 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE
));
4847 int port
, errors
= 0;
4850 if (mlx4_is_slave(dev
)) {
4851 mlx4_warn(dev
, "Trying to update-qp in slave %d\n",
4856 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
4857 if (IS_ERR(mailbox
))
4859 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
) /* block all */
4860 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4861 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED
|
4862 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED
|
4863 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4864 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
|
4865 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4866 else if (!work
->vlan_id
)
4867 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4868 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED
;
4870 vlan_control
= MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED
|
4871 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED
|
4872 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED
;
4874 upd_context
= mailbox
->buf
;
4875 upd_context
->qp_mask
= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD
);
4877 spin_lock_irq(mlx4_tlock(dev
));
4878 list_for_each_entry_safe(qp
, tmp
, qp_list
, com
.list
) {
4879 spin_unlock_irq(mlx4_tlock(dev
));
4880 if (qp
->com
.owner
== work
->slave
) {
4881 if (qp
->com
.from_state
!= RES_QP_HW
||
4882 !qp
->sched_queue
|| /* no INIT2RTR trans yet */
4883 mlx4_is_qp_reserved(dev
, qp
->local_qpn
) ||
4884 qp
->qpc_flags
& (1 << MLX4_RSS_QPC_FLAG_OFFSET
)) {
4885 spin_lock_irq(mlx4_tlock(dev
));
4888 port
= (qp
->sched_queue
>> 6 & 1) + 1;
4889 if (port
!= work
->port
) {
4890 spin_lock_irq(mlx4_tlock(dev
));
4893 if (MLX4_QP_ST_RC
== ((qp
->qpc_flags
>> 16) & 0xff))
4894 upd_context
->primary_addr_path_mask
= cpu_to_be64(qp_path_mask
);
4896 upd_context
->primary_addr_path_mask
=
4897 cpu_to_be64(qp_path_mask
| qp_path_mask_vlan_ctrl
);
4898 if (work
->vlan_id
== MLX4_VGT
) {
4899 upd_context
->qp_context
.param3
= qp
->param3
;
4900 upd_context
->qp_context
.pri_path
.vlan_control
= qp
->vlan_control
;
4901 upd_context
->qp_context
.pri_path
.fvl_rx
= qp
->fvl_rx
;
4902 upd_context
->qp_context
.pri_path
.vlan_index
= qp
->vlan_index
;
4903 upd_context
->qp_context
.pri_path
.fl
= qp
->pri_path_fl
;
4904 upd_context
->qp_context
.pri_path
.feup
= qp
->feup
;
4905 upd_context
->qp_context
.pri_path
.sched_queue
=
4908 upd_context
->qp_context
.param3
= qp
->param3
& ~cpu_to_be32(MLX4_STRIP_VLAN
);
4909 upd_context
->qp_context
.pri_path
.vlan_control
= vlan_control
;
4910 upd_context
->qp_context
.pri_path
.vlan_index
= work
->vlan_ix
;
4911 upd_context
->qp_context
.pri_path
.fvl_rx
=
4912 qp
->fvl_rx
| MLX4_FVL_RX_FORCE_ETH_VLAN
;
4913 upd_context
->qp_context
.pri_path
.fl
=
4914 qp
->pri_path_fl
| MLX4_FL_CV
| MLX4_FL_ETH_HIDE_CQE_VLAN
;
4915 upd_context
->qp_context
.pri_path
.feup
=
4916 qp
->feup
| MLX4_FEUP_FORCE_ETH_UP
| MLX4_FVL_FORCE_ETH_VLAN
;
4917 upd_context
->qp_context
.pri_path
.sched_queue
=
4918 qp
->sched_queue
& 0xC7;
4919 upd_context
->qp_context
.pri_path
.sched_queue
|=
4920 ((work
->qos
& 0x7) << 3);
4923 err
= mlx4_cmd(dev
, mailbox
->dma
,
4924 qp
->local_qpn
& 0xffffff,
4925 0, MLX4_CMD_UPDATE_QP
,
4926 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
4928 mlx4_info(dev
, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4929 work
->slave
, port
, qp
->local_qpn
, err
);
4933 spin_lock_irq(mlx4_tlock(dev
));
4935 spin_unlock_irq(mlx4_tlock(dev
));
4936 mlx4_free_cmd_mailbox(dev
, mailbox
);
4939 mlx4_err(dev
, "%d UPDATE_QP failures for slave %d, port %d\n",
4940 errors
, work
->slave
, work
->port
);
4942 /* unregister previous vlan_id if needed and we had no errors
4943 * while updating the QPs
4945 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
&& !errors
&&
4946 NO_INDX
!= work
->orig_vlan_ix
)
4947 __mlx4_unregister_vlan(&work
->priv
->dev
, work
->port
,
4948 work
->orig_vlan_id
);