net/mlx4_core: Fix quota handling in the QUERY_FUNC_CAP wrapper
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
4874080d
JM
58struct vlan_res {
59 struct list_head list;
60 u16 vlan;
61 int ref_count;
62 int vlan_index;
63 u8 port;
64};
65
c82e9aa0
EC
66struct res_common {
67 struct list_head list;
4af1c048 68 struct rb_node node;
aa1ec3dd 69 u64 res_id;
c82e9aa0
EC
70 int owner;
71 int state;
72 int from_state;
73 int to_state;
74 int removing;
75};
76
77enum {
78 RES_ANY_BUSY = 1
79};
80
81struct res_gid {
82 struct list_head list;
83 u8 gid[16];
84 enum mlx4_protocol prot;
9f5b6c63 85 enum mlx4_steer_type steer;
fab1e24a 86 u64 reg_id;
c82e9aa0
EC
87};
88
89enum res_qp_states {
90 RES_QP_BUSY = RES_ANY_BUSY,
91
92 /* QP number was allocated */
93 RES_QP_RESERVED,
94
95 /* ICM memory for QP context was mapped */
96 RES_QP_MAPPED,
97
98 /* QP is in hw ownership */
99 RES_QP_HW
100};
101
c82e9aa0
EC
102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
2c473ae7 111 atomic_t ref_count;
b01978ca
JM
112 u32 qpc_flags;
113 u8 sched_queue;
c82e9aa0
EC
114};
115
116enum res_mtt_states {
117 RES_MTT_BUSY = RES_ANY_BUSY,
118 RES_MTT_ALLOCATED,
119};
120
121static inline const char *mtt_states_str(enum res_mtt_states state)
122{
123 switch (state) {
124 case RES_MTT_BUSY: return "RES_MTT_BUSY";
125 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
126 default: return "Unknown";
127 }
128}
129
130struct res_mtt {
131 struct res_common com;
132 int order;
133 atomic_t ref_count;
134};
135
136enum res_mpt_states {
137 RES_MPT_BUSY = RES_ANY_BUSY,
138 RES_MPT_RESERVED,
139 RES_MPT_MAPPED,
140 RES_MPT_HW,
141};
142
143struct res_mpt {
144 struct res_common com;
145 struct res_mtt *mtt;
146 int key;
147};
148
149enum res_eq_states {
150 RES_EQ_BUSY = RES_ANY_BUSY,
151 RES_EQ_RESERVED,
152 RES_EQ_HW,
153};
154
155struct res_eq {
156 struct res_common com;
157 struct res_mtt *mtt;
158};
159
160enum res_cq_states {
161 RES_CQ_BUSY = RES_ANY_BUSY,
162 RES_CQ_ALLOCATED,
163 RES_CQ_HW,
164};
165
166struct res_cq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 atomic_t ref_count;
170};
171
172enum res_srq_states {
173 RES_SRQ_BUSY = RES_ANY_BUSY,
174 RES_SRQ_ALLOCATED,
175 RES_SRQ_HW,
176};
177
c82e9aa0
EC
178struct res_srq {
179 struct res_common com;
180 struct res_mtt *mtt;
181 struct res_cq *cq;
182 atomic_t ref_count;
183};
184
185enum res_counter_states {
186 RES_COUNTER_BUSY = RES_ANY_BUSY,
187 RES_COUNTER_ALLOCATED,
188};
189
c82e9aa0
EC
190struct res_counter {
191 struct res_common com;
192 int port;
193};
194
ba062d52
JM
195enum res_xrcdn_states {
196 RES_XRCD_BUSY = RES_ANY_BUSY,
197 RES_XRCD_ALLOCATED,
198};
199
200struct res_xrcdn {
201 struct res_common com;
202 int port;
203};
204
1b9c6b06
HHZ
205enum res_fs_rule_states {
206 RES_FS_RULE_BUSY = RES_ANY_BUSY,
207 RES_FS_RULE_ALLOCATED,
208};
209
210struct res_fs_rule {
211 struct res_common com;
2c473ae7 212 int qpn;
1b9c6b06
HHZ
213};
214
4af1c048
HHZ
215static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
216{
217 struct rb_node *node = root->rb_node;
218
219 while (node) {
220 struct res_common *res = container_of(node, struct res_common,
221 node);
222
223 if (res_id < res->res_id)
224 node = node->rb_left;
225 else if (res_id > res->res_id)
226 node = node->rb_right;
227 else
228 return res;
229 }
230 return NULL;
231}
232
233static int res_tracker_insert(struct rb_root *root, struct res_common *res)
234{
235 struct rb_node **new = &(root->rb_node), *parent = NULL;
236
237 /* Figure out where to put new node */
238 while (*new) {
239 struct res_common *this = container_of(*new, struct res_common,
240 node);
241
242 parent = *new;
243 if (res->res_id < this->res_id)
244 new = &((*new)->rb_left);
245 else if (res->res_id > this->res_id)
246 new = &((*new)->rb_right);
247 else
248 return -EEXIST;
249 }
250
251 /* Add new node and rebalance tree. */
252 rb_link_node(&res->node, parent, new);
253 rb_insert_color(&res->node, root);
254
255 return 0;
256}
257
54679e14
JM
258enum qp_transition {
259 QP_TRANS_INIT2RTR,
260 QP_TRANS_RTR2RTS,
261 QP_TRANS_RTS2RTS,
262 QP_TRANS_SQERR2RTS,
263 QP_TRANS_SQD2SQD,
264 QP_TRANS_SQD2RTS
265};
266
c82e9aa0
EC
267/* For Debug uses */
268static const char *ResourceType(enum mlx4_resource rt)
269{
270 switch (rt) {
271 case RES_QP: return "RES_QP";
272 case RES_CQ: return "RES_CQ";
273 case RES_SRQ: return "RES_SRQ";
274 case RES_MPT: return "RES_MPT";
275 case RES_MTT: return "RES_MTT";
276 case RES_MAC: return "RES_MAC";
4874080d 277 case RES_VLAN: return "RES_VLAN";
c82e9aa0
EC
278 case RES_EQ: return "RES_EQ";
279 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 280 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 281 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
282 default: return "Unknown resource type !!!";
283 };
284}
285
4874080d 286static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
5a0d0a61
JM
287static inline void initialize_res_quotas(struct mlx4_dev *dev,
288 struct resource_allocator *res_alloc,
289 enum mlx4_resource res_type,
290 int vf, int num_instances)
291{
292 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
293 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
294 if (vf == mlx4_master_func_num(dev)) {
295 res_alloc->res_free = num_instances;
296 if (res_type == RES_MTT) {
297 /* reserved mtts will be taken out of the PF allocation */
298 res_alloc->res_free += dev->caps.reserved_mtts;
299 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
300 res_alloc->quota[vf] += dev->caps.reserved_mtts;
301 }
302 }
303}
304
305void mlx4_init_quotas(struct mlx4_dev *dev)
306{
307 struct mlx4_priv *priv = mlx4_priv(dev);
308 int pf;
309
310 /* quotas for VFs are initialized in mlx4_slave_cap */
311 if (mlx4_is_slave(dev))
312 return;
313
314 if (!mlx4_is_mfunc(dev)) {
315 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
316 mlx4_num_reserved_sqps(dev);
317 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
318 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
319 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
320 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
321 return;
322 }
323
324 pf = mlx4_master_func_num(dev);
325 dev->quotas.qp =
326 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
327 dev->quotas.cq =
328 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
329 dev->quotas.srq =
330 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
331 dev->quotas.mtt =
332 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
333 dev->quotas.mpt =
334 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
335}
c82e9aa0
EC
336int mlx4_init_resource_tracker(struct mlx4_dev *dev)
337{
338 struct mlx4_priv *priv = mlx4_priv(dev);
5a0d0a61 339 int i, j;
c82e9aa0
EC
340 int t;
341
342 priv->mfunc.master.res_tracker.slave_list =
343 kzalloc(dev->num_slaves * sizeof(struct slave_list),
344 GFP_KERNEL);
345 if (!priv->mfunc.master.res_tracker.slave_list)
346 return -ENOMEM;
347
348 for (i = 0 ; i < dev->num_slaves; i++) {
349 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
350 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
351 slave_list[i].res_list[t]);
352 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
353 }
354
355 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
356 dev->num_slaves);
357 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 358 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0 359
5a0d0a61
JM
360 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
361 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[i];
363 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
364 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
365 if (i == RES_MAC || i == RES_VLAN)
366 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
367 (dev->num_vfs + 1) * sizeof(int),
368 GFP_KERNEL);
369 else
370 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
371
372 if (!res_alloc->quota || !res_alloc->guaranteed ||
373 !res_alloc->allocated)
374 goto no_mem_err;
375
376 for (t = 0; t < dev->num_vfs + 1; t++) {
377 switch (i) {
378 case RES_QP:
379 initialize_res_quotas(dev, res_alloc, RES_QP,
380 t, dev->caps.num_qps -
381 dev->caps.reserved_qps -
382 mlx4_num_reserved_sqps(dev));
383 break;
384 case RES_CQ:
385 initialize_res_quotas(dev, res_alloc, RES_CQ,
386 t, dev->caps.num_cqs -
387 dev->caps.reserved_cqs);
388 break;
389 case RES_SRQ:
390 initialize_res_quotas(dev, res_alloc, RES_SRQ,
391 t, dev->caps.num_srqs -
392 dev->caps.reserved_srqs);
393 break;
394 case RES_MPT:
395 initialize_res_quotas(dev, res_alloc, RES_MPT,
396 t, dev->caps.num_mpts -
397 dev->caps.reserved_mrws);
398 break;
399 case RES_MTT:
400 initialize_res_quotas(dev, res_alloc, RES_MTT,
401 t, dev->caps.num_mtts -
402 dev->caps.reserved_mtts);
403 break;
404 case RES_MAC:
405 if (t == mlx4_master_func_num(dev)) {
406 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
407 res_alloc->guaranteed[t] = 2;
408 for (j = 0; j < MLX4_MAX_PORTS; j++)
409 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
410 } else {
411 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
412 res_alloc->guaranteed[t] = 2;
413 }
414 break;
415 case RES_VLAN:
416 if (t == mlx4_master_func_num(dev)) {
417 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
418 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
419 for (j = 0; j < MLX4_MAX_PORTS; j++)
420 res_alloc->res_port_free[j] =
421 res_alloc->quota[t];
422 } else {
423 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
424 res_alloc->guaranteed[t] = 0;
425 }
426 break;
427 case RES_COUNTER:
428 res_alloc->quota[t] = dev->caps.max_counters;
429 res_alloc->guaranteed[t] = 0;
430 if (t == mlx4_master_func_num(dev))
431 res_alloc->res_free = res_alloc->quota[t];
432 break;
433 default:
434 break;
435 }
436 if (i == RES_MAC || i == RES_VLAN) {
437 for (j = 0; j < MLX4_MAX_PORTS; j++)
438 res_alloc->res_port_rsvd[j] +=
439 res_alloc->guaranteed[t];
440 } else {
441 res_alloc->res_reserved += res_alloc->guaranteed[t];
442 }
443 }
444 }
c82e9aa0 445 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
5a0d0a61
JM
446 return 0;
447
448no_mem_err:
449 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
450 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
451 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
452 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
453 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
454 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
455 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
456 }
457 return -ENOMEM;
c82e9aa0
EC
458}
459
b8924951
JM
460void mlx4_free_resource_tracker(struct mlx4_dev *dev,
461 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
462{
463 struct mlx4_priv *priv = mlx4_priv(dev);
464 int i;
465
466 if (priv->mfunc.master.res_tracker.slave_list) {
4874080d
JM
467 if (type != RES_TR_FREE_STRUCTS_ONLY) {
468 for (i = 0; i < dev->num_slaves; i++) {
b8924951
JM
469 if (type == RES_TR_FREE_ALL ||
470 dev->caps.function != i)
471 mlx4_delete_all_resources_for_slave(dev, i);
4874080d
JM
472 }
473 /* free master's vlans */
474 i = dev->caps.function;
475 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
476 rem_slave_vlans(dev, i);
477 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
478 }
b8924951
JM
479
480 if (type != RES_TR_FREE_SLAVES_ONLY) {
5a0d0a61
JM
481 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
482 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
483 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
484 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
485 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
486 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
487 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
488 }
b8924951
JM
489 kfree(priv->mfunc.master.res_tracker.slave_list);
490 priv->mfunc.master.res_tracker.slave_list = NULL;
491 }
c82e9aa0
EC
492 }
493}
494
54679e14
JM
495static void update_pkey_index(struct mlx4_dev *dev, int slave,
496 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 497{
54679e14
JM
498 u8 sched = *(u8 *)(inbox->buf + 64);
499 u8 orig_index = *(u8 *)(inbox->buf + 35);
500 u8 new_index;
501 struct mlx4_priv *priv = mlx4_priv(dev);
502 int port;
503
504 port = (sched >> 6 & 1) + 1;
505
506 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
507 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
508}
509
510static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
511 u8 slave)
512{
513 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
514 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
515 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
c82e9aa0
EC
516
517 if (MLX4_QP_ST_UD == ts)
518 qp_ctx->pri_path.mgid_index = 0x80 | slave;
519
54679e14
JM
520 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
521 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
522 qp_ctx->pri_path.mgid_index = slave & 0x7F;
523 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
524 qp_ctx->alt_path.mgid_index = slave & 0x7F;
525 }
c82e9aa0
EC
526}
527
3f7fb021
RE
528static int update_vport_qp_param(struct mlx4_dev *dev,
529 struct mlx4_cmd_mailbox *inbox,
b01978ca 530 u8 slave, u32 qpn)
3f7fb021
RE
531{
532 struct mlx4_qp_context *qpc = inbox->buf + 8;
533 struct mlx4_vport_oper_state *vp_oper;
534 struct mlx4_priv *priv;
535 u32 qp_type;
536 int port;
537
538 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
539 priv = mlx4_priv(dev);
540 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
541
542 if (MLX4_VGT != vp_oper->state.default_vlan) {
543 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
b01978ca
JM
544 if (MLX4_QP_ST_RC == qp_type ||
545 (MLX4_QP_ST_UD == qp_type &&
546 !mlx4_is_qp_reserved(dev, qpn)))
3f7fb021
RE
547 return -EINVAL;
548
b01978ca
JM
549 /* the reserved QPs (special, proxy, tunnel)
550 * do not operate over vlans
551 */
552 if (mlx4_is_qp_reserved(dev, qpn))
553 return 0;
554
7677fc96
RE
555 /* force strip vlan by clear vsd */
556 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
0a6eac24
RE
557
558 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
559 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
560 qpc->pri_path.vlan_control =
561 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
562 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
563 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
564 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
565 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
566 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
567 } else if (0 != vp_oper->state.default_vlan) {
7677fc96
RE
568 qpc->pri_path.vlan_control =
569 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
570 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
571 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
572 } else { /* priority tagged */
573 qpc->pri_path.vlan_control =
574 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
575 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
576 }
577
578 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 579 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7677fc96
RE
580 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
581 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
582 qpc->pri_path.sched_queue &= 0xC7;
583 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
3f7fb021 584 }
e6b6a231 585 if (vp_oper->state.spoofchk) {
7677fc96 586 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 587 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 588 }
3f7fb021
RE
589 return 0;
590}
591
c82e9aa0
EC
592static int mpt_mask(struct mlx4_dev *dev)
593{
594 return dev->caps.num_mpts - 1;
595}
596
1e3f7b32 597static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
598 enum mlx4_resource type)
599{
600 struct mlx4_priv *priv = mlx4_priv(dev);
601
4af1c048
HHZ
602 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
603 res_id);
c82e9aa0
EC
604}
605
aa1ec3dd 606static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
607 enum mlx4_resource type,
608 void *res)
609{
610 struct res_common *r;
611 int err = 0;
612
613 spin_lock_irq(mlx4_tlock(dev));
614 r = find_res(dev, res_id, type);
615 if (!r) {
616 err = -ENONET;
617 goto exit;
618 }
619
620 if (r->state == RES_ANY_BUSY) {
621 err = -EBUSY;
622 goto exit;
623 }
624
625 if (r->owner != slave) {
626 err = -EPERM;
627 goto exit;
628 }
629
630 r->from_state = r->state;
631 r->state = RES_ANY_BUSY;
c82e9aa0
EC
632
633 if (res)
634 *((struct res_common **)res) = r;
635
636exit:
637 spin_unlock_irq(mlx4_tlock(dev));
638 return err;
639}
640
641int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
642 enum mlx4_resource type,
aa1ec3dd 643 u64 res_id, int *slave)
c82e9aa0
EC
644{
645
646 struct res_common *r;
647 int err = -ENOENT;
648 int id = res_id;
649
650 if (type == RES_QP)
651 id &= 0x7fffff;
996b0541 652 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
653
654 r = find_res(dev, id, type);
655 if (r) {
656 *slave = r->owner;
657 err = 0;
658 }
996b0541 659 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
660
661 return err;
662}
663
aa1ec3dd 664static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
665 enum mlx4_resource type)
666{
667 struct res_common *r;
668
669 spin_lock_irq(mlx4_tlock(dev));
670 r = find_res(dev, res_id, type);
671 if (r)
672 r->state = r->from_state;
673 spin_unlock_irq(mlx4_tlock(dev));
674}
675
676static struct res_common *alloc_qp_tr(int id)
677{
678 struct res_qp *ret;
679
680 ret = kzalloc(sizeof *ret, GFP_KERNEL);
681 if (!ret)
682 return NULL;
683
684 ret->com.res_id = id;
685 ret->com.state = RES_QP_RESERVED;
2531188b 686 ret->local_qpn = id;
c82e9aa0
EC
687 INIT_LIST_HEAD(&ret->mcg_list);
688 spin_lock_init(&ret->mcg_spl);
2c473ae7 689 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
690
691 return &ret->com;
692}
693
694static struct res_common *alloc_mtt_tr(int id, int order)
695{
696 struct res_mtt *ret;
697
698 ret = kzalloc(sizeof *ret, GFP_KERNEL);
699 if (!ret)
700 return NULL;
701
702 ret->com.res_id = id;
703 ret->order = order;
704 ret->com.state = RES_MTT_ALLOCATED;
705 atomic_set(&ret->ref_count, 0);
706
707 return &ret->com;
708}
709
710static struct res_common *alloc_mpt_tr(int id, int key)
711{
712 struct res_mpt *ret;
713
714 ret = kzalloc(sizeof *ret, GFP_KERNEL);
715 if (!ret)
716 return NULL;
717
718 ret->com.res_id = id;
719 ret->com.state = RES_MPT_RESERVED;
720 ret->key = key;
721
722 return &ret->com;
723}
724
725static struct res_common *alloc_eq_tr(int id)
726{
727 struct res_eq *ret;
728
729 ret = kzalloc(sizeof *ret, GFP_KERNEL);
730 if (!ret)
731 return NULL;
732
733 ret->com.res_id = id;
734 ret->com.state = RES_EQ_RESERVED;
735
736 return &ret->com;
737}
738
739static struct res_common *alloc_cq_tr(int id)
740{
741 struct res_cq *ret;
742
743 ret = kzalloc(sizeof *ret, GFP_KERNEL);
744 if (!ret)
745 return NULL;
746
747 ret->com.res_id = id;
748 ret->com.state = RES_CQ_ALLOCATED;
749 atomic_set(&ret->ref_count, 0);
750
751 return &ret->com;
752}
753
754static struct res_common *alloc_srq_tr(int id)
755{
756 struct res_srq *ret;
757
758 ret = kzalloc(sizeof *ret, GFP_KERNEL);
759 if (!ret)
760 return NULL;
761
762 ret->com.res_id = id;
763 ret->com.state = RES_SRQ_ALLOCATED;
764 atomic_set(&ret->ref_count, 0);
765
766 return &ret->com;
767}
768
769static struct res_common *alloc_counter_tr(int id)
770{
771 struct res_counter *ret;
772
773 ret = kzalloc(sizeof *ret, GFP_KERNEL);
774 if (!ret)
775 return NULL;
776
777 ret->com.res_id = id;
778 ret->com.state = RES_COUNTER_ALLOCATED;
779
780 return &ret->com;
781}
782
ba062d52
JM
783static struct res_common *alloc_xrcdn_tr(int id)
784{
785 struct res_xrcdn *ret;
786
787 ret = kzalloc(sizeof *ret, GFP_KERNEL);
788 if (!ret)
789 return NULL;
790
791 ret->com.res_id = id;
792 ret->com.state = RES_XRCD_ALLOCATED;
793
794 return &ret->com;
795}
796
2c473ae7 797static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
798{
799 struct res_fs_rule *ret;
800
801 ret = kzalloc(sizeof *ret, GFP_KERNEL);
802 if (!ret)
803 return NULL;
804
805 ret->com.res_id = id;
806 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 807 ret->qpn = qpn;
1b9c6b06
HHZ
808 return &ret->com;
809}
810
aa1ec3dd 811static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
812 int extra)
813{
814 struct res_common *ret;
815
816 switch (type) {
817 case RES_QP:
818 ret = alloc_qp_tr(id);
819 break;
820 case RES_MPT:
821 ret = alloc_mpt_tr(id, extra);
822 break;
823 case RES_MTT:
824 ret = alloc_mtt_tr(id, extra);
825 break;
826 case RES_EQ:
827 ret = alloc_eq_tr(id);
828 break;
829 case RES_CQ:
830 ret = alloc_cq_tr(id);
831 break;
832 case RES_SRQ:
833 ret = alloc_srq_tr(id);
834 break;
835 case RES_MAC:
836 printk(KERN_ERR "implementation missing\n");
837 return NULL;
838 case RES_COUNTER:
839 ret = alloc_counter_tr(id);
840 break;
ba062d52
JM
841 case RES_XRCD:
842 ret = alloc_xrcdn_tr(id);
843 break;
1b9c6b06 844 case RES_FS_RULE:
2c473ae7 845 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 846 break;
c82e9aa0
EC
847 default:
848 return NULL;
849 }
850 if (ret)
851 ret->owner = slave;
852
853 return ret;
854}
855
aa1ec3dd 856static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
857 enum mlx4_resource type, int extra)
858{
859 int i;
860 int err;
861 struct mlx4_priv *priv = mlx4_priv(dev);
862 struct res_common **res_arr;
863 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 864 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
865
866 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
867 if (!res_arr)
868 return -ENOMEM;
869
870 for (i = 0; i < count; ++i) {
871 res_arr[i] = alloc_tr(base + i, type, slave, extra);
872 if (!res_arr[i]) {
873 for (--i; i >= 0; --i)
874 kfree(res_arr[i]);
875
876 kfree(res_arr);
877 return -ENOMEM;
878 }
879 }
880
881 spin_lock_irq(mlx4_tlock(dev));
882 for (i = 0; i < count; ++i) {
883 if (find_res(dev, base + i, type)) {
884 err = -EEXIST;
885 goto undo;
886 }
4af1c048 887 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
888 if (err)
889 goto undo;
890 list_add_tail(&res_arr[i]->list,
891 &tracker->slave_list[slave].res_list[type]);
892 }
893 spin_unlock_irq(mlx4_tlock(dev));
894 kfree(res_arr);
895
896 return 0;
897
898undo:
899 for (--i; i >= base; --i)
4af1c048 900 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
901
902 spin_unlock_irq(mlx4_tlock(dev));
903
904 for (i = 0; i < count; ++i)
905 kfree(res_arr[i]);
906
907 kfree(res_arr);
908
909 return err;
910}
911
912static int remove_qp_ok(struct res_qp *res)
913{
2c473ae7
HHZ
914 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
915 !list_empty(&res->mcg_list)) {
916 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
917 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 918 return -EBUSY;
2c473ae7 919 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 920 return -EPERM;
2c473ae7 921 }
c82e9aa0
EC
922
923 return 0;
924}
925
926static int remove_mtt_ok(struct res_mtt *res, int order)
927{
928 if (res->com.state == RES_MTT_BUSY ||
929 atomic_read(&res->ref_count)) {
930 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
931 __func__, __LINE__,
932 mtt_states_str(res->com.state),
933 atomic_read(&res->ref_count));
934 return -EBUSY;
935 } else if (res->com.state != RES_MTT_ALLOCATED)
936 return -EPERM;
937 else if (res->order != order)
938 return -EINVAL;
939
940 return 0;
941}
942
943static int remove_mpt_ok(struct res_mpt *res)
944{
945 if (res->com.state == RES_MPT_BUSY)
946 return -EBUSY;
947 else if (res->com.state != RES_MPT_RESERVED)
948 return -EPERM;
949
950 return 0;
951}
952
953static int remove_eq_ok(struct res_eq *res)
954{
955 if (res->com.state == RES_MPT_BUSY)
956 return -EBUSY;
957 else if (res->com.state != RES_MPT_RESERVED)
958 return -EPERM;
959
960 return 0;
961}
962
963static int remove_counter_ok(struct res_counter *res)
964{
965 if (res->com.state == RES_COUNTER_BUSY)
966 return -EBUSY;
967 else if (res->com.state != RES_COUNTER_ALLOCATED)
968 return -EPERM;
969
970 return 0;
971}
972
ba062d52
JM
973static int remove_xrcdn_ok(struct res_xrcdn *res)
974{
975 if (res->com.state == RES_XRCD_BUSY)
976 return -EBUSY;
977 else if (res->com.state != RES_XRCD_ALLOCATED)
978 return -EPERM;
979
980 return 0;
981}
982
1b9c6b06
HHZ
983static int remove_fs_rule_ok(struct res_fs_rule *res)
984{
985 if (res->com.state == RES_FS_RULE_BUSY)
986 return -EBUSY;
987 else if (res->com.state != RES_FS_RULE_ALLOCATED)
988 return -EPERM;
989
990 return 0;
991}
992
c82e9aa0
EC
993static int remove_cq_ok(struct res_cq *res)
994{
995 if (res->com.state == RES_CQ_BUSY)
996 return -EBUSY;
997 else if (res->com.state != RES_CQ_ALLOCATED)
998 return -EPERM;
999
1000 return 0;
1001}
1002
1003static int remove_srq_ok(struct res_srq *res)
1004{
1005 if (res->com.state == RES_SRQ_BUSY)
1006 return -EBUSY;
1007 else if (res->com.state != RES_SRQ_ALLOCATED)
1008 return -EPERM;
1009
1010 return 0;
1011}
1012
1013static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1014{
1015 switch (type) {
1016 case RES_QP:
1017 return remove_qp_ok((struct res_qp *)res);
1018 case RES_CQ:
1019 return remove_cq_ok((struct res_cq *)res);
1020 case RES_SRQ:
1021 return remove_srq_ok((struct res_srq *)res);
1022 case RES_MPT:
1023 return remove_mpt_ok((struct res_mpt *)res);
1024 case RES_MTT:
1025 return remove_mtt_ok((struct res_mtt *)res, extra);
1026 case RES_MAC:
1027 return -ENOSYS;
1028 case RES_EQ:
1029 return remove_eq_ok((struct res_eq *)res);
1030 case RES_COUNTER:
1031 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
1032 case RES_XRCD:
1033 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
1034 case RES_FS_RULE:
1035 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
1036 default:
1037 return -EINVAL;
1038 }
1039}
1040
aa1ec3dd 1041static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1042 enum mlx4_resource type, int extra)
1043{
aa1ec3dd 1044 u64 i;
c82e9aa0
EC
1045 int err;
1046 struct mlx4_priv *priv = mlx4_priv(dev);
1047 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1048 struct res_common *r;
1049
1050 spin_lock_irq(mlx4_tlock(dev));
1051 for (i = base; i < base + count; ++i) {
4af1c048 1052 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
1053 if (!r) {
1054 err = -ENOENT;
1055 goto out;
1056 }
1057 if (r->owner != slave) {
1058 err = -EPERM;
1059 goto out;
1060 }
1061 err = remove_ok(r, type, extra);
1062 if (err)
1063 goto out;
1064 }
1065
1066 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
1067 r = res_tracker_lookup(&tracker->res_tree[type], i);
1068 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
1069 list_del(&r->list);
1070 kfree(r);
1071 }
1072 err = 0;
1073
1074out:
1075 spin_unlock_irq(mlx4_tlock(dev));
1076
1077 return err;
1078}
1079
1080static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1081 enum res_qp_states state, struct res_qp **qp,
1082 int alloc)
1083{
1084 struct mlx4_priv *priv = mlx4_priv(dev);
1085 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1086 struct res_qp *r;
1087 int err = 0;
1088
1089 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1090 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
1091 if (!r)
1092 err = -ENOENT;
1093 else if (r->com.owner != slave)
1094 err = -EPERM;
1095 else {
1096 switch (state) {
1097 case RES_QP_BUSY:
aa1ec3dd 1098 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1099 __func__, r->com.res_id);
1100 err = -EBUSY;
1101 break;
1102
1103 case RES_QP_RESERVED:
1104 if (r->com.state == RES_QP_MAPPED && !alloc)
1105 break;
1106
aa1ec3dd 1107 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
1108 err = -EINVAL;
1109 break;
1110
1111 case RES_QP_MAPPED:
1112 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1113 r->com.state == RES_QP_HW)
1114 break;
1115 else {
aa1ec3dd 1116 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1117 r->com.res_id);
1118 err = -EINVAL;
1119 }
1120
1121 break;
1122
1123 case RES_QP_HW:
1124 if (r->com.state != RES_QP_MAPPED)
1125 err = -EINVAL;
1126 break;
1127 default:
1128 err = -EINVAL;
1129 }
1130
1131 if (!err) {
1132 r->com.from_state = r->com.state;
1133 r->com.to_state = state;
1134 r->com.state = RES_QP_BUSY;
1135 if (qp)
64699336 1136 *qp = r;
c82e9aa0
EC
1137 }
1138 }
1139
1140 spin_unlock_irq(mlx4_tlock(dev));
1141
1142 return err;
1143}
1144
1145static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1146 enum res_mpt_states state, struct res_mpt **mpt)
1147{
1148 struct mlx4_priv *priv = mlx4_priv(dev);
1149 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1150 struct res_mpt *r;
1151 int err = 0;
1152
1153 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1154 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
1155 if (!r)
1156 err = -ENOENT;
1157 else if (r->com.owner != slave)
1158 err = -EPERM;
1159 else {
1160 switch (state) {
1161 case RES_MPT_BUSY:
1162 err = -EINVAL;
1163 break;
1164
1165 case RES_MPT_RESERVED:
1166 if (r->com.state != RES_MPT_MAPPED)
1167 err = -EINVAL;
1168 break;
1169
1170 case RES_MPT_MAPPED:
1171 if (r->com.state != RES_MPT_RESERVED &&
1172 r->com.state != RES_MPT_HW)
1173 err = -EINVAL;
1174 break;
1175
1176 case RES_MPT_HW:
1177 if (r->com.state != RES_MPT_MAPPED)
1178 err = -EINVAL;
1179 break;
1180 default:
1181 err = -EINVAL;
1182 }
1183
1184 if (!err) {
1185 r->com.from_state = r->com.state;
1186 r->com.to_state = state;
1187 r->com.state = RES_MPT_BUSY;
1188 if (mpt)
64699336 1189 *mpt = r;
c82e9aa0
EC
1190 }
1191 }
1192
1193 spin_unlock_irq(mlx4_tlock(dev));
1194
1195 return err;
1196}
1197
1198static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1199 enum res_eq_states state, struct res_eq **eq)
1200{
1201 struct mlx4_priv *priv = mlx4_priv(dev);
1202 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1203 struct res_eq *r;
1204 int err = 0;
1205
1206 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1207 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1208 if (!r)
1209 err = -ENOENT;
1210 else if (r->com.owner != slave)
1211 err = -EPERM;
1212 else {
1213 switch (state) {
1214 case RES_EQ_BUSY:
1215 err = -EINVAL;
1216 break;
1217
1218 case RES_EQ_RESERVED:
1219 if (r->com.state != RES_EQ_HW)
1220 err = -EINVAL;
1221 break;
1222
1223 case RES_EQ_HW:
1224 if (r->com.state != RES_EQ_RESERVED)
1225 err = -EINVAL;
1226 break;
1227
1228 default:
1229 err = -EINVAL;
1230 }
1231
1232 if (!err) {
1233 r->com.from_state = r->com.state;
1234 r->com.to_state = state;
1235 r->com.state = RES_EQ_BUSY;
1236 if (eq)
1237 *eq = r;
1238 }
1239 }
1240
1241 spin_unlock_irq(mlx4_tlock(dev));
1242
1243 return err;
1244}
1245
1246static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1247 enum res_cq_states state, struct res_cq **cq)
1248{
1249 struct mlx4_priv *priv = mlx4_priv(dev);
1250 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1251 struct res_cq *r;
1252 int err;
1253
1254 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1255 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
1256 if (!r)
1257 err = -ENOENT;
1258 else if (r->com.owner != slave)
1259 err = -EPERM;
1260 else {
1261 switch (state) {
1262 case RES_CQ_BUSY:
1263 err = -EBUSY;
1264 break;
1265
1266 case RES_CQ_ALLOCATED:
1267 if (r->com.state != RES_CQ_HW)
1268 err = -EINVAL;
1269 else if (atomic_read(&r->ref_count))
1270 err = -EBUSY;
1271 else
1272 err = 0;
1273 break;
1274
1275 case RES_CQ_HW:
1276 if (r->com.state != RES_CQ_ALLOCATED)
1277 err = -EINVAL;
1278 else
1279 err = 0;
1280 break;
1281
1282 default:
1283 err = -EINVAL;
1284 }
1285
1286 if (!err) {
1287 r->com.from_state = r->com.state;
1288 r->com.to_state = state;
1289 r->com.state = RES_CQ_BUSY;
1290 if (cq)
1291 *cq = r;
1292 }
1293 }
1294
1295 spin_unlock_irq(mlx4_tlock(dev));
1296
1297 return err;
1298}
1299
1300static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1301 enum res_cq_states state, struct res_srq **srq)
1302{
1303 struct mlx4_priv *priv = mlx4_priv(dev);
1304 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1305 struct res_srq *r;
1306 int err = 0;
1307
1308 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1309 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1310 if (!r)
1311 err = -ENOENT;
1312 else if (r->com.owner != slave)
1313 err = -EPERM;
1314 else {
1315 switch (state) {
1316 case RES_SRQ_BUSY:
1317 err = -EINVAL;
1318 break;
1319
1320 case RES_SRQ_ALLOCATED:
1321 if (r->com.state != RES_SRQ_HW)
1322 err = -EINVAL;
1323 else if (atomic_read(&r->ref_count))
1324 err = -EBUSY;
1325 break;
1326
1327 case RES_SRQ_HW:
1328 if (r->com.state != RES_SRQ_ALLOCATED)
1329 err = -EINVAL;
1330 break;
1331
1332 default:
1333 err = -EINVAL;
1334 }
1335
1336 if (!err) {
1337 r->com.from_state = r->com.state;
1338 r->com.to_state = state;
1339 r->com.state = RES_SRQ_BUSY;
1340 if (srq)
1341 *srq = r;
1342 }
1343 }
1344
1345 spin_unlock_irq(mlx4_tlock(dev));
1346
1347 return err;
1348}
1349
1350static void res_abort_move(struct mlx4_dev *dev, int slave,
1351 enum mlx4_resource type, int id)
1352{
1353 struct mlx4_priv *priv = mlx4_priv(dev);
1354 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1355 struct res_common *r;
1356
1357 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1358 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1359 if (r && (r->owner == slave))
1360 r->state = r->from_state;
1361 spin_unlock_irq(mlx4_tlock(dev));
1362}
1363
1364static void res_end_move(struct mlx4_dev *dev, int slave,
1365 enum mlx4_resource type, int id)
1366{
1367 struct mlx4_priv *priv = mlx4_priv(dev);
1368 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1369 struct res_common *r;
1370
1371 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1372 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1373 if (r && (r->owner == slave))
1374 r->state = r->to_state;
1375 spin_unlock_irq(mlx4_tlock(dev));
1376}
1377
1378static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1379{
e2c76824
JM
1380 return mlx4_is_qp_reserved(dev, qpn) &&
1381 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1382}
1383
54679e14
JM
1384static int fw_reserved(struct mlx4_dev *dev, int qpn)
1385{
1386 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1387}
1388
1389static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1390 u64 in_param, u64 *out_param)
1391{
1392 int err;
1393 int count;
1394 int align;
1395 int base;
1396 int qpn;
1397
1398 switch (op) {
1399 case RES_OP_RESERVE:
1400 count = get_param_l(&in_param);
1401 align = get_param_h(&in_param);
1402 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1403 if (err)
1404 return err;
1405
1406 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1407 if (err) {
1408 __mlx4_qp_release_range(dev, base, count);
1409 return err;
1410 }
1411 set_param_l(out_param, base);
1412 break;
1413 case RES_OP_MAP_ICM:
1414 qpn = get_param_l(&in_param) & 0x7fffff;
1415 if (valid_reserved(dev, slave, qpn)) {
1416 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1417 if (err)
1418 return err;
1419 }
1420
1421 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1422 NULL, 1);
1423 if (err)
1424 return err;
1425
54679e14 1426 if (!fw_reserved(dev, qpn)) {
c82e9aa0
EC
1427 err = __mlx4_qp_alloc_icm(dev, qpn);
1428 if (err) {
1429 res_abort_move(dev, slave, RES_QP, qpn);
1430 return err;
1431 }
1432 }
1433
1434 res_end_move(dev, slave, RES_QP, qpn);
1435 break;
1436
1437 default:
1438 err = -EINVAL;
1439 break;
1440 }
1441 return err;
1442}
1443
1444static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1445 u64 in_param, u64 *out_param)
1446{
1447 int err = -EINVAL;
1448 int base;
1449 int order;
1450
1451 if (op != RES_OP_RESERVE_AND_MAP)
1452 return err;
1453
1454 order = get_param_l(&in_param);
1455 base = __mlx4_alloc_mtt_range(dev, order);
1456 if (base == -1)
1457 return -ENOMEM;
1458
1459 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1460 if (err)
1461 __mlx4_free_mtt_range(dev, base, order);
1462 else
1463 set_param_l(out_param, base);
1464
1465 return err;
1466}
1467
1468static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1469 u64 in_param, u64 *out_param)
1470{
1471 int err = -EINVAL;
1472 int index;
1473 int id;
1474 struct res_mpt *mpt;
1475
1476 switch (op) {
1477 case RES_OP_RESERVE:
b20e519a 1478 index = __mlx4_mpt_reserve(dev);
c82e9aa0
EC
1479 if (index == -1)
1480 break;
1481 id = index & mpt_mask(dev);
1482
1483 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1484 if (err) {
b20e519a 1485 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1486 break;
1487 }
1488 set_param_l(out_param, index);
1489 break;
1490 case RES_OP_MAP_ICM:
1491 index = get_param_l(&in_param);
1492 id = index & mpt_mask(dev);
1493 err = mr_res_start_move_to(dev, slave, id,
1494 RES_MPT_MAPPED, &mpt);
1495 if (err)
1496 return err;
1497
b20e519a 1498 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1499 if (err) {
1500 res_abort_move(dev, slave, RES_MPT, id);
1501 return err;
1502 }
1503
1504 res_end_move(dev, slave, RES_MPT, id);
1505 break;
1506 }
1507 return err;
1508}
1509
1510static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1511 u64 in_param, u64 *out_param)
1512{
1513 int cqn;
1514 int err;
1515
1516 switch (op) {
1517 case RES_OP_RESERVE_AND_MAP:
1518 err = __mlx4_cq_alloc_icm(dev, &cqn);
1519 if (err)
1520 break;
1521
1522 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1523 if (err) {
1524 __mlx4_cq_free_icm(dev, cqn);
1525 break;
1526 }
1527
1528 set_param_l(out_param, cqn);
1529 break;
1530
1531 default:
1532 err = -EINVAL;
1533 }
1534
1535 return err;
1536}
1537
1538static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539 u64 in_param, u64 *out_param)
1540{
1541 int srqn;
1542 int err;
1543
1544 switch (op) {
1545 case RES_OP_RESERVE_AND_MAP:
1546 err = __mlx4_srq_alloc_icm(dev, &srqn);
1547 if (err)
1548 break;
1549
1550 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1551 if (err) {
1552 __mlx4_srq_free_icm(dev, srqn);
1553 break;
1554 }
1555
1556 set_param_l(out_param, srqn);
1557 break;
1558
1559 default:
1560 err = -EINVAL;
1561 }
1562
1563 return err;
1564}
1565
1566static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1567{
1568 struct mlx4_priv *priv = mlx4_priv(dev);
1569 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1570 struct mac_res *res;
1571
1572 res = kzalloc(sizeof *res, GFP_KERNEL);
1573 if (!res)
1574 return -ENOMEM;
1575 res->mac = mac;
1576 res->port = (u8) port;
1577 list_add_tail(&res->list,
1578 &tracker->slave_list[slave].res_list[RES_MAC]);
1579 return 0;
1580}
1581
1582static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1583 int port)
1584{
1585 struct mlx4_priv *priv = mlx4_priv(dev);
1586 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1587 struct list_head *mac_list =
1588 &tracker->slave_list[slave].res_list[RES_MAC];
1589 struct mac_res *res, *tmp;
1590
1591 list_for_each_entry_safe(res, tmp, mac_list, list) {
1592 if (res->mac == mac && res->port == (u8) port) {
1593 list_del(&res->list);
1594 kfree(res);
1595 break;
1596 }
1597 }
1598}
1599
1600static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1601{
1602 struct mlx4_priv *priv = mlx4_priv(dev);
1603 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1604 struct list_head *mac_list =
1605 &tracker->slave_list[slave].res_list[RES_MAC];
1606 struct mac_res *res, *tmp;
1607
1608 list_for_each_entry_safe(res, tmp, mac_list, list) {
1609 list_del(&res->list);
1610 __mlx4_unregister_mac(dev, res->port, res->mac);
1611 kfree(res);
1612 }
1613}
1614
1615static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1616 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
1617{
1618 int err = -EINVAL;
1619 int port;
1620 u64 mac;
1621
1622 if (op != RES_OP_RESERVE_AND_MAP)
1623 return err;
1624
acddd5dd 1625 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
1626 mac = in_param;
1627
1628 err = __mlx4_register_mac(dev, port, mac);
1629 if (err >= 0) {
1630 set_param_l(out_param, err);
1631 err = 0;
1632 }
1633
1634 if (!err) {
1635 err = mac_add_to_slave(dev, slave, mac, port);
1636 if (err)
1637 __mlx4_unregister_mac(dev, port, mac);
1638 }
1639 return err;
1640}
1641
4874080d
JM
1642static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1643 int port, int vlan_index)
ffe455ad 1644{
4874080d
JM
1645 struct mlx4_priv *priv = mlx4_priv(dev);
1646 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1647 struct list_head *vlan_list =
1648 &tracker->slave_list[slave].res_list[RES_VLAN];
1649 struct vlan_res *res, *tmp;
1650
1651 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1652 if (res->vlan == vlan && res->port == (u8) port) {
1653 /* vlan found. update ref count */
1654 ++res->ref_count;
1655 return 0;
1656 }
1657 }
1658
1659 res = kzalloc(sizeof(*res), GFP_KERNEL);
1660 if (!res)
1661 return -ENOMEM;
1662 res->vlan = vlan;
1663 res->port = (u8) port;
1664 res->vlan_index = vlan_index;
1665 res->ref_count = 1;
1666 list_add_tail(&res->list,
1667 &tracker->slave_list[slave].res_list[RES_VLAN]);
ffe455ad
EE
1668 return 0;
1669}
1670
4874080d
JM
1671
1672static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1673 int port)
1674{
1675 struct mlx4_priv *priv = mlx4_priv(dev);
1676 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1677 struct list_head *vlan_list =
1678 &tracker->slave_list[slave].res_list[RES_VLAN];
1679 struct vlan_res *res, *tmp;
1680
1681 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1682 if (res->vlan == vlan && res->port == (u8) port) {
1683 if (!--res->ref_count) {
1684 list_del(&res->list);
1685 kfree(res);
1686 }
1687 break;
1688 }
1689 }
1690}
1691
1692static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1693{
1694 struct mlx4_priv *priv = mlx4_priv(dev);
1695 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1696 struct list_head *vlan_list =
1697 &tracker->slave_list[slave].res_list[RES_VLAN];
1698 struct vlan_res *res, *tmp;
1699 int i;
1700
1701 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1702 list_del(&res->list);
1703 /* dereference the vlan the num times the slave referenced it */
1704 for (i = 0; i < res->ref_count; i++)
1705 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1706 kfree(res);
1707 }
1708}
1709
1710static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2c957ff2 1711 u64 in_param, u64 *out_param, int in_port)
4874080d 1712{
2c957ff2
JM
1713 struct mlx4_priv *priv = mlx4_priv(dev);
1714 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
1715 int err;
1716 u16 vlan;
1717 int vlan_index;
2c957ff2
JM
1718 int port;
1719
1720 port = !in_port ? get_param_l(out_param) : in_port;
4874080d
JM
1721
1722 if (!port || op != RES_OP_RESERVE_AND_MAP)
1723 return -EINVAL;
1724
2c957ff2
JM
1725 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1726 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1727 slave_state[slave].old_vlan_api = true;
1728 return 0;
1729 }
1730
4874080d
JM
1731 vlan = (u16) in_param;
1732
1733 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1734 if (!err) {
1735 set_param_l(out_param, (u32) vlan_index);
1736 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1737 if (err)
1738 __mlx4_unregister_vlan(dev, port, vlan);
1739 }
1740 return err;
1741}
1742
ba062d52
JM
1743static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1744 u64 in_param, u64 *out_param)
1745{
1746 u32 index;
1747 int err;
1748
1749 if (op != RES_OP_RESERVE)
1750 return -EINVAL;
1751
1752 err = __mlx4_counter_alloc(dev, &index);
1753 if (err)
1754 return err;
1755
1756 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1757 if (err)
1758 __mlx4_counter_free(dev, index);
1759 else
1760 set_param_l(out_param, index);
1761
1762 return err;
1763}
1764
1765static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1766 u64 in_param, u64 *out_param)
1767{
1768 u32 xrcdn;
1769 int err;
1770
1771 if (op != RES_OP_RESERVE)
1772 return -EINVAL;
1773
1774 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1775 if (err)
1776 return err;
1777
1778 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1779 if (err)
1780 __mlx4_xrcd_free(dev, xrcdn);
1781 else
1782 set_param_l(out_param, xrcdn);
1783
1784 return err;
1785}
1786
c82e9aa0
EC
1787int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1788 struct mlx4_vhcr *vhcr,
1789 struct mlx4_cmd_mailbox *inbox,
1790 struct mlx4_cmd_mailbox *outbox,
1791 struct mlx4_cmd_info *cmd)
1792{
1793 int err;
1794 int alop = vhcr->op_modifier;
1795
acddd5dd 1796 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
1797 case RES_QP:
1798 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1799 vhcr->in_param, &vhcr->out_param);
1800 break;
1801
1802 case RES_MTT:
1803 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1804 vhcr->in_param, &vhcr->out_param);
1805 break;
1806
1807 case RES_MPT:
1808 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1809 vhcr->in_param, &vhcr->out_param);
1810 break;
1811
1812 case RES_CQ:
1813 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1815 break;
1816
1817 case RES_SRQ:
1818 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1819 vhcr->in_param, &vhcr->out_param);
1820 break;
1821
1822 case RES_MAC:
1823 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1824 vhcr->in_param, &vhcr->out_param,
1825 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
1826 break;
1827
ffe455ad
EE
1828 case RES_VLAN:
1829 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1830 vhcr->in_param, &vhcr->out_param,
1831 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
1832 break;
1833
ba062d52
JM
1834 case RES_COUNTER:
1835 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1836 vhcr->in_param, &vhcr->out_param);
1837 break;
1838
1839 case RES_XRCD:
1840 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1841 vhcr->in_param, &vhcr->out_param);
1842 break;
1843
c82e9aa0
EC
1844 default:
1845 err = -EINVAL;
1846 break;
1847 }
1848
1849 return err;
1850}
1851
1852static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1853 u64 in_param)
1854{
1855 int err;
1856 int count;
1857 int base;
1858 int qpn;
1859
1860 switch (op) {
1861 case RES_OP_RESERVE:
1862 base = get_param_l(&in_param) & 0x7fffff;
1863 count = get_param_h(&in_param);
1864 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1865 if (err)
1866 break;
1867 __mlx4_qp_release_range(dev, base, count);
1868 break;
1869 case RES_OP_MAP_ICM:
1870 qpn = get_param_l(&in_param) & 0x7fffff;
1871 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1872 NULL, 0);
1873 if (err)
1874 return err;
1875
54679e14 1876 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
1877 __mlx4_qp_free_icm(dev, qpn);
1878
1879 res_end_move(dev, slave, RES_QP, qpn);
1880
1881 if (valid_reserved(dev, slave, qpn))
1882 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1883 break;
1884 default:
1885 err = -EINVAL;
1886 break;
1887 }
1888 return err;
1889}
1890
1891static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1892 u64 in_param, u64 *out_param)
1893{
1894 int err = -EINVAL;
1895 int base;
1896 int order;
1897
1898 if (op != RES_OP_RESERVE_AND_MAP)
1899 return err;
1900
1901 base = get_param_l(&in_param);
1902 order = get_param_h(&in_param);
1903 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1904 if (!err)
1905 __mlx4_free_mtt_range(dev, base, order);
1906 return err;
1907}
1908
1909static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1910 u64 in_param)
1911{
1912 int err = -EINVAL;
1913 int index;
1914 int id;
1915 struct res_mpt *mpt;
1916
1917 switch (op) {
1918 case RES_OP_RESERVE:
1919 index = get_param_l(&in_param);
1920 id = index & mpt_mask(dev);
1921 err = get_res(dev, slave, id, RES_MPT, &mpt);
1922 if (err)
1923 break;
1924 index = mpt->key;
1925 put_res(dev, slave, id, RES_MPT);
1926
1927 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1928 if (err)
1929 break;
b20e519a 1930 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1931 break;
1932 case RES_OP_MAP_ICM:
1933 index = get_param_l(&in_param);
1934 id = index & mpt_mask(dev);
1935 err = mr_res_start_move_to(dev, slave, id,
1936 RES_MPT_RESERVED, &mpt);
1937 if (err)
1938 return err;
1939
b20e519a 1940 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
1941 res_end_move(dev, slave, RES_MPT, id);
1942 return err;
1943 break;
1944 default:
1945 err = -EINVAL;
1946 break;
1947 }
1948 return err;
1949}
1950
1951static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1952 u64 in_param, u64 *out_param)
1953{
1954 int cqn;
1955 int err;
1956
1957 switch (op) {
1958 case RES_OP_RESERVE_AND_MAP:
1959 cqn = get_param_l(&in_param);
1960 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1961 if (err)
1962 break;
1963
1964 __mlx4_cq_free_icm(dev, cqn);
1965 break;
1966
1967 default:
1968 err = -EINVAL;
1969 break;
1970 }
1971
1972 return err;
1973}
1974
1975static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1976 u64 in_param, u64 *out_param)
1977{
1978 int srqn;
1979 int err;
1980
1981 switch (op) {
1982 case RES_OP_RESERVE_AND_MAP:
1983 srqn = get_param_l(&in_param);
1984 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1985 if (err)
1986 break;
1987
1988 __mlx4_srq_free_icm(dev, srqn);
1989 break;
1990
1991 default:
1992 err = -EINVAL;
1993 break;
1994 }
1995
1996 return err;
1997}
1998
1999static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2000 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
2001{
2002 int port;
2003 int err = 0;
2004
2005 switch (op) {
2006 case RES_OP_RESERVE_AND_MAP:
acddd5dd 2007 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
2008 mac_del_from_slave(dev, slave, in_param, port);
2009 __mlx4_unregister_mac(dev, port, in_param);
2010 break;
2011 default:
2012 err = -EINVAL;
2013 break;
2014 }
2015
2016 return err;
2017
2018}
2019
ffe455ad 2020static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2021 u64 in_param, u64 *out_param, int port)
ffe455ad 2022{
2c957ff2
JM
2023 struct mlx4_priv *priv = mlx4_priv(dev);
2024 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
2025 int err = 0;
2026
2027 switch (op) {
2028 case RES_OP_RESERVE_AND_MAP:
2c957ff2
JM
2029 if (slave_state[slave].old_vlan_api)
2030 return 0;
4874080d
JM
2031 if (!port)
2032 return -EINVAL;
2033 vlan_del_from_slave(dev, slave, in_param, port);
2034 __mlx4_unregister_vlan(dev, port, in_param);
2035 break;
2036 default:
2037 err = -EINVAL;
2038 break;
2039 }
2040
2041 return err;
ffe455ad
EE
2042}
2043
ba062d52
JM
2044static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2045 u64 in_param, u64 *out_param)
2046{
2047 int index;
2048 int err;
2049
2050 if (op != RES_OP_RESERVE)
2051 return -EINVAL;
2052
2053 index = get_param_l(&in_param);
2054 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2055 if (err)
2056 return err;
2057
2058 __mlx4_counter_free(dev, index);
2059
2060 return err;
2061}
2062
2063static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2064 u64 in_param, u64 *out_param)
2065{
2066 int xrcdn;
2067 int err;
2068
2069 if (op != RES_OP_RESERVE)
2070 return -EINVAL;
2071
2072 xrcdn = get_param_l(&in_param);
2073 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2074 if (err)
2075 return err;
2076
2077 __mlx4_xrcd_free(dev, xrcdn);
2078
2079 return err;
2080}
2081
c82e9aa0
EC
2082int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2083 struct mlx4_vhcr *vhcr,
2084 struct mlx4_cmd_mailbox *inbox,
2085 struct mlx4_cmd_mailbox *outbox,
2086 struct mlx4_cmd_info *cmd)
2087{
2088 int err = -EINVAL;
2089 int alop = vhcr->op_modifier;
2090
acddd5dd 2091 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2092 case RES_QP:
2093 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2094 vhcr->in_param);
2095 break;
2096
2097 case RES_MTT:
2098 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2099 vhcr->in_param, &vhcr->out_param);
2100 break;
2101
2102 case RES_MPT:
2103 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2104 vhcr->in_param);
2105 break;
2106
2107 case RES_CQ:
2108 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2109 vhcr->in_param, &vhcr->out_param);
2110 break;
2111
2112 case RES_SRQ:
2113 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2114 vhcr->in_param, &vhcr->out_param);
2115 break;
2116
2117 case RES_MAC:
2118 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2119 vhcr->in_param, &vhcr->out_param,
2120 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2121 break;
2122
ffe455ad
EE
2123 case RES_VLAN:
2124 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2125 vhcr->in_param, &vhcr->out_param,
2126 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2127 break;
2128
ba062d52
JM
2129 case RES_COUNTER:
2130 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2131 vhcr->in_param, &vhcr->out_param);
2132 break;
2133
2134 case RES_XRCD:
2135 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2136 vhcr->in_param, &vhcr->out_param);
2137
c82e9aa0
EC
2138 default:
2139 break;
2140 }
2141 return err;
2142}
2143
2144/* ugly but other choices are uglier */
2145static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2146{
2147 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2148}
2149
2b8fb286 2150static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 2151{
2b8fb286 2152 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
2153}
2154
2155static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2156{
2157 return be32_to_cpu(mpt->mtt_sz);
2158}
2159
cc1ade94
SM
2160static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2161{
2162 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2163}
2164
2165static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2166{
2167 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2168}
2169
2170static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2171{
2172 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2173}
2174
2175static int mr_is_region(struct mlx4_mpt_entry *mpt)
2176{
2177 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2178}
2179
2b8fb286 2180static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
2181{
2182 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2183}
2184
2b8fb286 2185static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
2186{
2187 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2188}
2189
2190static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2191{
2192 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2193 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2194 int log_sq_sride = qpc->sq_size_stride & 7;
2195 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2196 int log_rq_stride = qpc->rq_size_stride & 7;
2197 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2198 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
5c5f3f0a
YH
2199 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2200 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
c82e9aa0
EC
2201 int sq_size;
2202 int rq_size;
2203 int total_pages;
2204 int total_mem;
2205 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2206
2207 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2208 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2209 total_mem = sq_size + rq_size;
2210 total_pages =
2211 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2212 page_shift);
2213
2214 return total_pages;
2215}
2216
c82e9aa0
EC
2217static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2218 int size, struct res_mtt *mtt)
2219{
2b8fb286
MA
2220 int res_start = mtt->com.res_id;
2221 int res_size = (1 << mtt->order);
c82e9aa0
EC
2222
2223 if (start < res_start || start + size > res_start + res_size)
2224 return -EPERM;
2225 return 0;
2226}
2227
2228int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2229 struct mlx4_vhcr *vhcr,
2230 struct mlx4_cmd_mailbox *inbox,
2231 struct mlx4_cmd_mailbox *outbox,
2232 struct mlx4_cmd_info *cmd)
2233{
2234 int err;
2235 int index = vhcr->in_modifier;
2236 struct res_mtt *mtt;
2237 struct res_mpt *mpt;
2b8fb286 2238 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2239 int phys;
2240 int id;
cc1ade94
SM
2241 u32 pd;
2242 int pd_slave;
c82e9aa0
EC
2243
2244 id = index & mpt_mask(dev);
2245 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2246 if (err)
2247 return err;
2248
cc1ade94
SM
2249 /* Disable memory windows for VFs. */
2250 if (!mr_is_region(inbox->buf)) {
2251 err = -EPERM;
2252 goto ex_abort;
2253 }
2254
2255 /* Make sure that the PD bits related to the slave id are zeros. */
2256 pd = mr_get_pd(inbox->buf);
2257 pd_slave = (pd >> 17) & 0x7f;
2258 if (pd_slave != 0 && pd_slave != slave) {
2259 err = -EPERM;
2260 goto ex_abort;
2261 }
2262
2263 if (mr_is_fmr(inbox->buf)) {
2264 /* FMR and Bind Enable are forbidden in slave devices. */
2265 if (mr_is_bind_enabled(inbox->buf)) {
2266 err = -EPERM;
2267 goto ex_abort;
2268 }
2269 /* FMR and Memory Windows are also forbidden. */
2270 if (!mr_is_region(inbox->buf)) {
2271 err = -EPERM;
2272 goto ex_abort;
2273 }
2274 }
2275
c82e9aa0
EC
2276 phys = mr_phys_mpt(inbox->buf);
2277 if (!phys) {
2b8fb286 2278 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2279 if (err)
2280 goto ex_abort;
2281
2282 err = check_mtt_range(dev, slave, mtt_base,
2283 mr_get_mtt_size(inbox->buf), mtt);
2284 if (err)
2285 goto ex_put;
2286
2287 mpt->mtt = mtt;
2288 }
2289
c82e9aa0
EC
2290 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2291 if (err)
2292 goto ex_put;
2293
2294 if (!phys) {
2295 atomic_inc(&mtt->ref_count);
2296 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2297 }
2298
2299 res_end_move(dev, slave, RES_MPT, id);
2300 return 0;
2301
2302ex_put:
2303 if (!phys)
2304 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2305ex_abort:
2306 res_abort_move(dev, slave, RES_MPT, id);
2307
2308 return err;
2309}
2310
2311int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2312 struct mlx4_vhcr *vhcr,
2313 struct mlx4_cmd_mailbox *inbox,
2314 struct mlx4_cmd_mailbox *outbox,
2315 struct mlx4_cmd_info *cmd)
2316{
2317 int err;
2318 int index = vhcr->in_modifier;
2319 struct res_mpt *mpt;
2320 int id;
2321
2322 id = index & mpt_mask(dev);
2323 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2324 if (err)
2325 return err;
2326
2327 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2328 if (err)
2329 goto ex_abort;
2330
2331 if (mpt->mtt)
2332 atomic_dec(&mpt->mtt->ref_count);
2333
2334 res_end_move(dev, slave, RES_MPT, id);
2335 return 0;
2336
2337ex_abort:
2338 res_abort_move(dev, slave, RES_MPT, id);
2339
2340 return err;
2341}
2342
2343int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2344 struct mlx4_vhcr *vhcr,
2345 struct mlx4_cmd_mailbox *inbox,
2346 struct mlx4_cmd_mailbox *outbox,
2347 struct mlx4_cmd_info *cmd)
2348{
2349 int err;
2350 int index = vhcr->in_modifier;
2351 struct res_mpt *mpt;
2352 int id;
2353
2354 id = index & mpt_mask(dev);
2355 err = get_res(dev, slave, id, RES_MPT, &mpt);
2356 if (err)
2357 return err;
2358
2359 if (mpt->com.from_state != RES_MPT_HW) {
2360 err = -EBUSY;
2361 goto out;
2362 }
2363
2364 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2365
2366out:
2367 put_res(dev, slave, id, RES_MPT);
2368 return err;
2369}
2370
2371static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2372{
2373 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2374}
2375
2376static int qp_get_scqn(struct mlx4_qp_context *qpc)
2377{
2378 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2379}
2380
2381static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2382{
2383 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2384}
2385
54679e14
JM
2386static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2387 struct mlx4_qp_context *context)
2388{
2389 u32 qpn = vhcr->in_modifier & 0xffffff;
2390 u32 qkey = 0;
2391
2392 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2393 return;
2394
2395 /* adjust qkey in qp context */
2396 context->qkey = cpu_to_be32(qkey);
2397}
2398
c82e9aa0
EC
2399int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2400 struct mlx4_vhcr *vhcr,
2401 struct mlx4_cmd_mailbox *inbox,
2402 struct mlx4_cmd_mailbox *outbox,
2403 struct mlx4_cmd_info *cmd)
2404{
2405 int err;
2406 int qpn = vhcr->in_modifier & 0x7fffff;
2407 struct res_mtt *mtt;
2408 struct res_qp *qp;
2409 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2410 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2411 int mtt_size = qp_get_mtt_size(qpc);
2412 struct res_cq *rcq;
2413 struct res_cq *scq;
2414 int rcqn = qp_get_rcqn(qpc);
2415 int scqn = qp_get_scqn(qpc);
2416 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2417 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2418 struct res_srq *srq;
2419 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2420
2421 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2422 if (err)
2423 return err;
2424 qp->local_qpn = local_qpn;
b01978ca
JM
2425 qp->sched_queue = 0;
2426 qp->qpc_flags = be32_to_cpu(qpc->flags);
c82e9aa0 2427
2b8fb286 2428 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2429 if (err)
2430 goto ex_abort;
2431
2432 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2433 if (err)
2434 goto ex_put_mtt;
2435
c82e9aa0
EC
2436 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2437 if (err)
2438 goto ex_put_mtt;
2439
2440 if (scqn != rcqn) {
2441 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2442 if (err)
2443 goto ex_put_rcq;
2444 } else
2445 scq = rcq;
2446
2447 if (use_srq) {
2448 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2449 if (err)
2450 goto ex_put_scq;
2451 }
2452
54679e14
JM
2453 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2454 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2455 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2456 if (err)
2457 goto ex_put_srq;
2458 atomic_inc(&mtt->ref_count);
2459 qp->mtt = mtt;
2460 atomic_inc(&rcq->ref_count);
2461 qp->rcq = rcq;
2462 atomic_inc(&scq->ref_count);
2463 qp->scq = scq;
2464
2465 if (scqn != rcqn)
2466 put_res(dev, slave, scqn, RES_CQ);
2467
2468 if (use_srq) {
2469 atomic_inc(&srq->ref_count);
2470 put_res(dev, slave, srqn, RES_SRQ);
2471 qp->srq = srq;
2472 }
2473 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2474 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2475 res_end_move(dev, slave, RES_QP, qpn);
2476
2477 return 0;
2478
2479ex_put_srq:
2480 if (use_srq)
2481 put_res(dev, slave, srqn, RES_SRQ);
2482ex_put_scq:
2483 if (scqn != rcqn)
2484 put_res(dev, slave, scqn, RES_CQ);
2485ex_put_rcq:
2486 put_res(dev, slave, rcqn, RES_CQ);
2487ex_put_mtt:
2b8fb286 2488 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2489ex_abort:
2490 res_abort_move(dev, slave, RES_QP, qpn);
2491
2492 return err;
2493}
2494
2b8fb286 2495static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2496{
2497 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2498}
2499
2500static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2501{
2502 int log_eq_size = eqc->log_eq_size & 0x1f;
2503 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2504
2505 if (log_eq_size + 5 < page_shift)
2506 return 1;
2507
2508 return 1 << (log_eq_size + 5 - page_shift);
2509}
2510
2b8fb286 2511static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2512{
2513 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2514}
2515
2516static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2517{
2518 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2519 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2520
2521 if (log_cq_size + 5 < page_shift)
2522 return 1;
2523
2524 return 1 << (log_cq_size + 5 - page_shift);
2525}
2526
2527int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2528 struct mlx4_vhcr *vhcr,
2529 struct mlx4_cmd_mailbox *inbox,
2530 struct mlx4_cmd_mailbox *outbox,
2531 struct mlx4_cmd_info *cmd)
2532{
2533 int err;
2534 int eqn = vhcr->in_modifier;
2535 int res_id = (slave << 8) | eqn;
2536 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2537 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2538 int mtt_size = eq_get_mtt_size(eqc);
2539 struct res_eq *eq;
2540 struct res_mtt *mtt;
2541
2542 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2543 if (err)
2544 return err;
2545 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2546 if (err)
2547 goto out_add;
2548
2b8fb286 2549 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2550 if (err)
2551 goto out_move;
2552
2553 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2554 if (err)
2555 goto out_put;
2556
2557 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2558 if (err)
2559 goto out_put;
2560
2561 atomic_inc(&mtt->ref_count);
2562 eq->mtt = mtt;
2563 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2564 res_end_move(dev, slave, RES_EQ, res_id);
2565 return 0;
2566
2567out_put:
2568 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2569out_move:
2570 res_abort_move(dev, slave, RES_EQ, res_id);
2571out_add:
2572 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2573 return err;
2574}
2575
2576static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2577 int len, struct res_mtt **res)
2578{
2579 struct mlx4_priv *priv = mlx4_priv(dev);
2580 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2581 struct res_mtt *mtt;
2582 int err = -EINVAL;
2583
2584 spin_lock_irq(mlx4_tlock(dev));
2585 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2586 com.list) {
2587 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2588 *res = mtt;
2589 mtt->com.from_state = mtt->com.state;
2590 mtt->com.state = RES_MTT_BUSY;
2591 err = 0;
2592 break;
2593 }
2594 }
2595 spin_unlock_irq(mlx4_tlock(dev));
2596
2597 return err;
2598}
2599
54679e14
JM
2600static int verify_qp_parameters(struct mlx4_dev *dev,
2601 struct mlx4_cmd_mailbox *inbox,
2602 enum qp_transition transition, u8 slave)
2603{
2604 u32 qp_type;
2605 struct mlx4_qp_context *qp_ctx;
2606 enum mlx4_qp_optpar optpar;
2607
2608 qp_ctx = inbox->buf + 8;
2609 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2610 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2611
2612 switch (qp_type) {
2613 case MLX4_QP_ST_RC:
2614 case MLX4_QP_ST_UC:
2615 switch (transition) {
2616 case QP_TRANS_INIT2RTR:
2617 case QP_TRANS_RTR2RTS:
2618 case QP_TRANS_RTS2RTS:
2619 case QP_TRANS_SQD2SQD:
2620 case QP_TRANS_SQD2RTS:
2621 if (slave != mlx4_master_func_num(dev))
2622 /* slaves have only gid index 0 */
2623 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2624 if (qp_ctx->pri_path.mgid_index)
2625 return -EINVAL;
2626 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2627 if (qp_ctx->alt_path.mgid_index)
2628 return -EINVAL;
2629 break;
2630 default:
2631 break;
2632 }
2633
2634 break;
2635 default:
2636 break;
2637 }
2638
2639 return 0;
2640}
2641
c82e9aa0
EC
2642int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2643 struct mlx4_vhcr *vhcr,
2644 struct mlx4_cmd_mailbox *inbox,
2645 struct mlx4_cmd_mailbox *outbox,
2646 struct mlx4_cmd_info *cmd)
2647{
2648 struct mlx4_mtt mtt;
2649 __be64 *page_list = inbox->buf;
2650 u64 *pg_list = (u64 *)page_list;
2651 int i;
2652 struct res_mtt *rmtt = NULL;
2653 int start = be64_to_cpu(page_list[0]);
2654 int npages = vhcr->in_modifier;
2655 int err;
2656
2657 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2658 if (err)
2659 return err;
2660
2661 /* Call the SW implementation of write_mtt:
2662 * - Prepare a dummy mtt struct
2663 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2664 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2665 we don't really use it */
c82e9aa0
EC
2666 mtt.order = 0;
2667 mtt.page_shift = 0;
2668 for (i = 0; i < npages; ++i)
2669 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2670
2671 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2672 ((u64 *)page_list + 2));
2673
2674 if (rmtt)
2675 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2676
2677 return err;
2678}
2679
2680int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2681 struct mlx4_vhcr *vhcr,
2682 struct mlx4_cmd_mailbox *inbox,
2683 struct mlx4_cmd_mailbox *outbox,
2684 struct mlx4_cmd_info *cmd)
2685{
2686 int eqn = vhcr->in_modifier;
2687 int res_id = eqn | (slave << 8);
2688 struct res_eq *eq;
2689 int err;
2690
2691 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2692 if (err)
2693 return err;
2694
2695 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2696 if (err)
2697 goto ex_abort;
2698
2699 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2700 if (err)
2701 goto ex_put;
2702
2703 atomic_dec(&eq->mtt->ref_count);
2704 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2705 res_end_move(dev, slave, RES_EQ, res_id);
2706 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2707
2708 return 0;
2709
2710ex_put:
2711 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2712ex_abort:
2713 res_abort_move(dev, slave, RES_EQ, res_id);
2714
2715 return err;
2716}
2717
2718int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2719{
2720 struct mlx4_priv *priv = mlx4_priv(dev);
2721 struct mlx4_slave_event_eq_info *event_eq;
2722 struct mlx4_cmd_mailbox *mailbox;
2723 u32 in_modifier = 0;
2724 int err;
2725 int res_id;
2726 struct res_eq *req;
2727
2728 if (!priv->mfunc.master.slave_state)
2729 return -EINVAL;
2730
803143fb 2731 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2732
2733 /* Create the event only if the slave is registered */
803143fb 2734 if (event_eq->eqn < 0)
c82e9aa0
EC
2735 return 0;
2736
2737 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2738 res_id = (slave << 8) | event_eq->eqn;
2739 err = get_res(dev, slave, res_id, RES_EQ, &req);
2740 if (err)
2741 goto unlock;
2742
2743 if (req->com.from_state != RES_EQ_HW) {
2744 err = -EINVAL;
2745 goto put;
2746 }
2747
2748 mailbox = mlx4_alloc_cmd_mailbox(dev);
2749 if (IS_ERR(mailbox)) {
2750 err = PTR_ERR(mailbox);
2751 goto put;
2752 }
2753
2754 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2755 ++event_eq->token;
2756 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2757 }
2758
2759 memcpy(mailbox->buf, (u8 *) eqe, 28);
2760
2761 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2762
2763 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2764 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2765 MLX4_CMD_NATIVE);
2766
2767 put_res(dev, slave, res_id, RES_EQ);
2768 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2769 mlx4_free_cmd_mailbox(dev, mailbox);
2770 return err;
2771
2772put:
2773 put_res(dev, slave, res_id, RES_EQ);
2774
2775unlock:
2776 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2777 return err;
2778}
2779
2780int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2781 struct mlx4_vhcr *vhcr,
2782 struct mlx4_cmd_mailbox *inbox,
2783 struct mlx4_cmd_mailbox *outbox,
2784 struct mlx4_cmd_info *cmd)
2785{
2786 int eqn = vhcr->in_modifier;
2787 int res_id = eqn | (slave << 8);
2788 struct res_eq *eq;
2789 int err;
2790
2791 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2792 if (err)
2793 return err;
2794
2795 if (eq->com.from_state != RES_EQ_HW) {
2796 err = -EINVAL;
2797 goto ex_put;
2798 }
2799
2800 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2801
2802ex_put:
2803 put_res(dev, slave, res_id, RES_EQ);
2804 return err;
2805}
2806
2807int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2808 struct mlx4_vhcr *vhcr,
2809 struct mlx4_cmd_mailbox *inbox,
2810 struct mlx4_cmd_mailbox *outbox,
2811 struct mlx4_cmd_info *cmd)
2812{
2813 int err;
2814 int cqn = vhcr->in_modifier;
2815 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2816 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2817 struct res_cq *cq;
2818 struct res_mtt *mtt;
2819
2820 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2821 if (err)
2822 return err;
2b8fb286 2823 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2824 if (err)
2825 goto out_move;
2826 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2827 if (err)
2828 goto out_put;
2829 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2830 if (err)
2831 goto out_put;
2832 atomic_inc(&mtt->ref_count);
2833 cq->mtt = mtt;
2834 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2835 res_end_move(dev, slave, RES_CQ, cqn);
2836 return 0;
2837
2838out_put:
2839 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2840out_move:
2841 res_abort_move(dev, slave, RES_CQ, cqn);
2842 return err;
2843}
2844
2845int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2846 struct mlx4_vhcr *vhcr,
2847 struct mlx4_cmd_mailbox *inbox,
2848 struct mlx4_cmd_mailbox *outbox,
2849 struct mlx4_cmd_info *cmd)
2850{
2851 int err;
2852 int cqn = vhcr->in_modifier;
2853 struct res_cq *cq;
2854
2855 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2856 if (err)
2857 return err;
2858 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2859 if (err)
2860 goto out_move;
2861 atomic_dec(&cq->mtt->ref_count);
2862 res_end_move(dev, slave, RES_CQ, cqn);
2863 return 0;
2864
2865out_move:
2866 res_abort_move(dev, slave, RES_CQ, cqn);
2867 return err;
2868}
2869
2870int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2871 struct mlx4_vhcr *vhcr,
2872 struct mlx4_cmd_mailbox *inbox,
2873 struct mlx4_cmd_mailbox *outbox,
2874 struct mlx4_cmd_info *cmd)
2875{
2876 int cqn = vhcr->in_modifier;
2877 struct res_cq *cq;
2878 int err;
2879
2880 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2881 if (err)
2882 return err;
2883
2884 if (cq->com.from_state != RES_CQ_HW)
2885 goto ex_put;
2886
2887 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2888ex_put:
2889 put_res(dev, slave, cqn, RES_CQ);
2890
2891 return err;
2892}
2893
2894static int handle_resize(struct mlx4_dev *dev, int slave,
2895 struct mlx4_vhcr *vhcr,
2896 struct mlx4_cmd_mailbox *inbox,
2897 struct mlx4_cmd_mailbox *outbox,
2898 struct mlx4_cmd_info *cmd,
2899 struct res_cq *cq)
2900{
2901 int err;
2902 struct res_mtt *orig_mtt;
2903 struct res_mtt *mtt;
2904 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2905 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2906
2907 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2908 if (err)
2909 return err;
2910
2911 if (orig_mtt != cq->mtt) {
2912 err = -EINVAL;
2913 goto ex_put;
2914 }
2915
2b8fb286 2916 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2917 if (err)
2918 goto ex_put;
2919
2920 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2921 if (err)
2922 goto ex_put1;
2923 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2924 if (err)
2925 goto ex_put1;
2926 atomic_dec(&orig_mtt->ref_count);
2927 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2928 atomic_inc(&mtt->ref_count);
2929 cq->mtt = mtt;
2930 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2931 return 0;
2932
2933ex_put1:
2934 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2935ex_put:
2936 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2937
2938 return err;
2939
2940}
2941
2942int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2943 struct mlx4_vhcr *vhcr,
2944 struct mlx4_cmd_mailbox *inbox,
2945 struct mlx4_cmd_mailbox *outbox,
2946 struct mlx4_cmd_info *cmd)
2947{
2948 int cqn = vhcr->in_modifier;
2949 struct res_cq *cq;
2950 int err;
2951
2952 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2953 if (err)
2954 return err;
2955
2956 if (cq->com.from_state != RES_CQ_HW)
2957 goto ex_put;
2958
2959 if (vhcr->op_modifier == 0) {
2960 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2961 goto ex_put;
c82e9aa0
EC
2962 }
2963
2964 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2965ex_put:
2966 put_res(dev, slave, cqn, RES_CQ);
2967
2968 return err;
2969}
2970
c82e9aa0
EC
2971static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2972{
2973 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2974 int log_rq_stride = srqc->logstride & 7;
2975 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2976
2977 if (log_srq_size + log_rq_stride + 4 < page_shift)
2978 return 1;
2979
2980 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2981}
2982
2983int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2984 struct mlx4_vhcr *vhcr,
2985 struct mlx4_cmd_mailbox *inbox,
2986 struct mlx4_cmd_mailbox *outbox,
2987 struct mlx4_cmd_info *cmd)
2988{
2989 int err;
2990 int srqn = vhcr->in_modifier;
2991 struct res_mtt *mtt;
2992 struct res_srq *srq;
2993 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2994 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2995
2996 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2997 return -EINVAL;
2998
2999 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3000 if (err)
3001 return err;
2b8fb286 3002 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3003 if (err)
3004 goto ex_abort;
3005 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3006 mtt);
3007 if (err)
3008 goto ex_put_mtt;
3009
c82e9aa0
EC
3010 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3011 if (err)
3012 goto ex_put_mtt;
3013
3014 atomic_inc(&mtt->ref_count);
3015 srq->mtt = mtt;
3016 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3017 res_end_move(dev, slave, RES_SRQ, srqn);
3018 return 0;
3019
3020ex_put_mtt:
3021 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3022ex_abort:
3023 res_abort_move(dev, slave, RES_SRQ, srqn);
3024
3025 return err;
3026}
3027
3028int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3029 struct mlx4_vhcr *vhcr,
3030 struct mlx4_cmd_mailbox *inbox,
3031 struct mlx4_cmd_mailbox *outbox,
3032 struct mlx4_cmd_info *cmd)
3033{
3034 int err;
3035 int srqn = vhcr->in_modifier;
3036 struct res_srq *srq;
3037
3038 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3039 if (err)
3040 return err;
3041 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3042 if (err)
3043 goto ex_abort;
3044 atomic_dec(&srq->mtt->ref_count);
3045 if (srq->cq)
3046 atomic_dec(&srq->cq->ref_count);
3047 res_end_move(dev, slave, RES_SRQ, srqn);
3048
3049 return 0;
3050
3051ex_abort:
3052 res_abort_move(dev, slave, RES_SRQ, srqn);
3053
3054 return err;
3055}
3056
3057int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3058 struct mlx4_vhcr *vhcr,
3059 struct mlx4_cmd_mailbox *inbox,
3060 struct mlx4_cmd_mailbox *outbox,
3061 struct mlx4_cmd_info *cmd)
3062{
3063 int err;
3064 int srqn = vhcr->in_modifier;
3065 struct res_srq *srq;
3066
3067 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3068 if (err)
3069 return err;
3070 if (srq->com.from_state != RES_SRQ_HW) {
3071 err = -EBUSY;
3072 goto out;
3073 }
3074 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3075out:
3076 put_res(dev, slave, srqn, RES_SRQ);
3077 return err;
3078}
3079
3080int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3081 struct mlx4_vhcr *vhcr,
3082 struct mlx4_cmd_mailbox *inbox,
3083 struct mlx4_cmd_mailbox *outbox,
3084 struct mlx4_cmd_info *cmd)
3085{
3086 int err;
3087 int srqn = vhcr->in_modifier;
3088 struct res_srq *srq;
3089
3090 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3091 if (err)
3092 return err;
3093
3094 if (srq->com.from_state != RES_SRQ_HW) {
3095 err = -EBUSY;
3096 goto out;
3097 }
3098
3099 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3100out:
3101 put_res(dev, slave, srqn, RES_SRQ);
3102 return err;
3103}
3104
3105int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3106 struct mlx4_vhcr *vhcr,
3107 struct mlx4_cmd_mailbox *inbox,
3108 struct mlx4_cmd_mailbox *outbox,
3109 struct mlx4_cmd_info *cmd)
3110{
3111 int err;
3112 int qpn = vhcr->in_modifier & 0x7fffff;
3113 struct res_qp *qp;
3114
3115 err = get_res(dev, slave, qpn, RES_QP, &qp);
3116 if (err)
3117 return err;
3118 if (qp->com.from_state != RES_QP_HW) {
3119 err = -EBUSY;
3120 goto out;
3121 }
3122
3123 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3124out:
3125 put_res(dev, slave, qpn, RES_QP);
3126 return err;
3127}
3128
54679e14
JM
3129int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3130 struct mlx4_vhcr *vhcr,
3131 struct mlx4_cmd_mailbox *inbox,
3132 struct mlx4_cmd_mailbox *outbox,
3133 struct mlx4_cmd_info *cmd)
3134{
3135 struct mlx4_qp_context *context = inbox->buf + 8;
3136 adjust_proxy_tun_qkey(dev, vhcr, context);
3137 update_pkey_index(dev, slave, inbox);
3138 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3139}
3140
c82e9aa0
EC
3141int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3142 struct mlx4_vhcr *vhcr,
3143 struct mlx4_cmd_mailbox *inbox,
3144 struct mlx4_cmd_mailbox *outbox,
3145 struct mlx4_cmd_info *cmd)
3146{
54679e14 3147 int err;
c82e9aa0 3148 struct mlx4_qp_context *qpc = inbox->buf + 8;
b01978ca
JM
3149 int qpn = vhcr->in_modifier & 0x7fffff;
3150 struct res_qp *qp;
3151 u8 orig_sched_queue;
c82e9aa0 3152
54679e14
JM
3153 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3154 if (err)
3155 return err;
3156
3157 update_pkey_index(dev, slave, inbox);
3158 update_gid(dev, inbox, (u8)slave);
3159 adjust_proxy_tun_qkey(dev, vhcr, qpc);
b01978ca
JM
3160 orig_sched_queue = qpc->pri_path.sched_queue;
3161 err = update_vport_qp_param(dev, inbox, slave, qpn);
3f7fb021
RE
3162 if (err)
3163 return err;
54679e14 3164
b01978ca
JM
3165 err = get_res(dev, slave, qpn, RES_QP, &qp);
3166 if (err)
3167 return err;
3168 if (qp->com.from_state != RES_QP_HW) {
3169 err = -EBUSY;
3170 goto out;
3171 }
3172
3173 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3174out:
3175 /* if no error, save sched queue value passed in by VF. This is
3176 * essentially the QOS value provided by the VF. This will be useful
3177 * if we allow dynamic changes from VST back to VGT
3178 */
3179 if (!err)
3180 qp->sched_queue = orig_sched_queue;
3181
3182 put_res(dev, slave, qpn, RES_QP);
3183 return err;
54679e14
JM
3184}
3185
3186int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3187 struct mlx4_vhcr *vhcr,
3188 struct mlx4_cmd_mailbox *inbox,
3189 struct mlx4_cmd_mailbox *outbox,
3190 struct mlx4_cmd_info *cmd)
3191{
3192 int err;
3193 struct mlx4_qp_context *context = inbox->buf + 8;
3194
3195 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3196 if (err)
3197 return err;
3198
3199 update_pkey_index(dev, slave, inbox);
3200 update_gid(dev, inbox, (u8)slave);
3201 adjust_proxy_tun_qkey(dev, vhcr, context);
3202 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3203}
3204
3205int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3206 struct mlx4_vhcr *vhcr,
3207 struct mlx4_cmd_mailbox *inbox,
3208 struct mlx4_cmd_mailbox *outbox,
3209 struct mlx4_cmd_info *cmd)
3210{
3211 int err;
3212 struct mlx4_qp_context *context = inbox->buf + 8;
3213
3214 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3215 if (err)
3216 return err;
3217
3218 update_pkey_index(dev, slave, inbox);
3219 update_gid(dev, inbox, (u8)slave);
3220 adjust_proxy_tun_qkey(dev, vhcr, context);
3221 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3222}
3223
3224
3225int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3226 struct mlx4_vhcr *vhcr,
3227 struct mlx4_cmd_mailbox *inbox,
3228 struct mlx4_cmd_mailbox *outbox,
3229 struct mlx4_cmd_info *cmd)
3230{
3231 struct mlx4_qp_context *context = inbox->buf + 8;
3232 adjust_proxy_tun_qkey(dev, vhcr, context);
3233 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3234}
3235
3236int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3237 struct mlx4_vhcr *vhcr,
3238 struct mlx4_cmd_mailbox *inbox,
3239 struct mlx4_cmd_mailbox *outbox,
3240 struct mlx4_cmd_info *cmd)
3241{
3242 int err;
3243 struct mlx4_qp_context *context = inbox->buf + 8;
3244
3245 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3246 if (err)
3247 return err;
3248
3249 adjust_proxy_tun_qkey(dev, vhcr, context);
3250 update_gid(dev, inbox, (u8)slave);
3251 update_pkey_index(dev, slave, inbox);
3252 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3253}
3254
3255int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3256 struct mlx4_vhcr *vhcr,
3257 struct mlx4_cmd_mailbox *inbox,
3258 struct mlx4_cmd_mailbox *outbox,
3259 struct mlx4_cmd_info *cmd)
3260{
3261 int err;
3262 struct mlx4_qp_context *context = inbox->buf + 8;
3263
3264 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3265 if (err)
3266 return err;
c82e9aa0 3267
54679e14
JM
3268 adjust_proxy_tun_qkey(dev, vhcr, context);
3269 update_gid(dev, inbox, (u8)slave);
3270 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3271 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3272}
3273
3274int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3275 struct mlx4_vhcr *vhcr,
3276 struct mlx4_cmd_mailbox *inbox,
3277 struct mlx4_cmd_mailbox *outbox,
3278 struct mlx4_cmd_info *cmd)
3279{
3280 int err;
3281 int qpn = vhcr->in_modifier & 0x7fffff;
3282 struct res_qp *qp;
3283
3284 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3285 if (err)
3286 return err;
3287 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3288 if (err)
3289 goto ex_abort;
3290
3291 atomic_dec(&qp->mtt->ref_count);
3292 atomic_dec(&qp->rcq->ref_count);
3293 atomic_dec(&qp->scq->ref_count);
3294 if (qp->srq)
3295 atomic_dec(&qp->srq->ref_count);
3296 res_end_move(dev, slave, RES_QP, qpn);
3297 return 0;
3298
3299ex_abort:
3300 res_abort_move(dev, slave, RES_QP, qpn);
3301
3302 return err;
3303}
3304
3305static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3306 struct res_qp *rqp, u8 *gid)
3307{
3308 struct res_gid *res;
3309
3310 list_for_each_entry(res, &rqp->mcg_list, list) {
3311 if (!memcmp(res->gid, gid, 16))
3312 return res;
3313 }
3314 return NULL;
3315}
3316
3317static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3318 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3319 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
3320{
3321 struct res_gid *res;
3322 int err;
3323
3324 res = kzalloc(sizeof *res, GFP_KERNEL);
3325 if (!res)
3326 return -ENOMEM;
3327
3328 spin_lock_irq(&rqp->mcg_spl);
3329 if (find_gid(dev, slave, rqp, gid)) {
3330 kfree(res);
3331 err = -EEXIST;
3332 } else {
3333 memcpy(res->gid, gid, 16);
3334 res->prot = prot;
9f5b6c63 3335 res->steer = steer;
fab1e24a 3336 res->reg_id = reg_id;
c82e9aa0
EC
3337 list_add_tail(&res->list, &rqp->mcg_list);
3338 err = 0;
3339 }
3340 spin_unlock_irq(&rqp->mcg_spl);
3341
3342 return err;
3343}
3344
3345static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3346 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3347 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
3348{
3349 struct res_gid *res;
3350 int err;
3351
3352 spin_lock_irq(&rqp->mcg_spl);
3353 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 3354 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
3355 err = -EINVAL;
3356 else {
fab1e24a 3357 *reg_id = res->reg_id;
c82e9aa0
EC
3358 list_del(&res->list);
3359 kfree(res);
3360 err = 0;
3361 }
3362 spin_unlock_irq(&rqp->mcg_spl);
3363
3364 return err;
3365}
3366
fab1e24a
HHZ
3367static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3368 int block_loopback, enum mlx4_protocol prot,
3369 enum mlx4_steer_type type, u64 *reg_id)
3370{
3371 switch (dev->caps.steering_mode) {
3372 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3373 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3374 block_loopback, prot,
3375 reg_id);
3376 case MLX4_STEERING_MODE_B0:
3377 return mlx4_qp_attach_common(dev, qp, gid,
3378 block_loopback, prot, type);
3379 default:
3380 return -EINVAL;
3381 }
3382}
3383
3384static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3385 enum mlx4_protocol prot, enum mlx4_steer_type type,
3386 u64 reg_id)
3387{
3388 switch (dev->caps.steering_mode) {
3389 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3390 return mlx4_flow_detach(dev, reg_id);
3391 case MLX4_STEERING_MODE_B0:
3392 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3393 default:
3394 return -EINVAL;
3395 }
3396}
3397
c82e9aa0
EC
3398int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3399 struct mlx4_vhcr *vhcr,
3400 struct mlx4_cmd_mailbox *inbox,
3401 struct mlx4_cmd_mailbox *outbox,
3402 struct mlx4_cmd_info *cmd)
3403{
3404 struct mlx4_qp qp; /* dummy for calling attach/detach */
3405 u8 *gid = inbox->buf;
3406 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3407 int err;
c82e9aa0
EC
3408 int qpn;
3409 struct res_qp *rqp;
fab1e24a 3410 u64 reg_id = 0;
c82e9aa0
EC
3411 int attach = vhcr->op_modifier;
3412 int block_loopback = vhcr->in_modifier >> 31;
3413 u8 steer_type_mask = 2;
75c6062c 3414 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3415
3416 qpn = vhcr->in_modifier & 0xffffff;
3417 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3418 if (err)
3419 return err;
3420
3421 qp.qpn = qpn;
3422 if (attach) {
fab1e24a
HHZ
3423 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3424 type, &reg_id);
3425 if (err) {
3426 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3427 goto ex_put;
fab1e24a
HHZ
3428 }
3429 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3430 if (err)
fab1e24a 3431 goto ex_detach;
c82e9aa0 3432 } else {
fab1e24a 3433 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3434 if (err)
3435 goto ex_put;
c82e9aa0 3436
fab1e24a
HHZ
3437 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3438 if (err)
3439 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3440 qpn, reg_id);
3441 }
c82e9aa0 3442 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3443 return err;
c82e9aa0 3444
fab1e24a
HHZ
3445ex_detach:
3446 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3447ex_put:
3448 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3449 return err;
3450}
3451
7fb40f87
HHZ
3452/*
3453 * MAC validation for Flow Steering rules.
3454 * VF can attach rules only with a mac address which is assigned to it.
3455 */
3456static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3457 struct list_head *rlist)
3458{
3459 struct mac_res *res, *tmp;
3460 __be64 be_mac;
3461
3462 /* make sure it isn't multicast or broadcast mac*/
3463 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3464 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3465 list_for_each_entry_safe(res, tmp, rlist, list) {
3466 be_mac = cpu_to_be64(res->mac << 16);
3467 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3468 return 0;
3469 }
3470 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3471 eth_header->eth.dst_mac, slave);
3472 return -EINVAL;
3473 }
3474 return 0;
3475}
3476
3477/*
3478 * In case of missing eth header, append eth header with a MAC address
3479 * assigned to the VF.
3480 */
3481static int add_eth_header(struct mlx4_dev *dev, int slave,
3482 struct mlx4_cmd_mailbox *inbox,
3483 struct list_head *rlist, int header_id)
3484{
3485 struct mac_res *res, *tmp;
3486 u8 port;
3487 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3488 struct mlx4_net_trans_rule_hw_eth *eth_header;
3489 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3490 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3491 __be64 be_mac = 0;
3492 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3493
3494 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3495 port = ctrl->port;
7fb40f87
HHZ
3496 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3497
3498 /* Clear a space in the inbox for eth header */
3499 switch (header_id) {
3500 case MLX4_NET_TRANS_RULE_ID_IPV4:
3501 ip_header =
3502 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3503 memmove(ip_header, eth_header,
3504 sizeof(*ip_header) + sizeof(*l4_header));
3505 break;
3506 case MLX4_NET_TRANS_RULE_ID_TCP:
3507 case MLX4_NET_TRANS_RULE_ID_UDP:
3508 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3509 (eth_header + 1);
3510 memmove(l4_header, eth_header, sizeof(*l4_header));
3511 break;
3512 default:
3513 return -EINVAL;
3514 }
3515 list_for_each_entry_safe(res, tmp, rlist, list) {
3516 if (port == res->port) {
3517 be_mac = cpu_to_be64(res->mac << 16);
3518 break;
3519 }
3520 }
3521 if (!be_mac) {
3522 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3523 port);
3524 return -EINVAL;
3525 }
3526
3527 memset(eth_header, 0, sizeof(*eth_header));
3528 eth_header->size = sizeof(*eth_header) >> 2;
3529 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3530 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3531 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3532
3533 return 0;
3534
3535}
3536
8fcfb4db
HHZ
3537int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3538 struct mlx4_vhcr *vhcr,
3539 struct mlx4_cmd_mailbox *inbox,
3540 struct mlx4_cmd_mailbox *outbox,
3541 struct mlx4_cmd_info *cmd)
3542{
7fb40f87
HHZ
3543
3544 struct mlx4_priv *priv = mlx4_priv(dev);
3545 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3546 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 3547 int err;
a9c01e7a 3548 int qpn;
2c473ae7 3549 struct res_qp *rqp;
7fb40f87
HHZ
3550 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3551 struct _rule_hw *rule_header;
3552 int header_id;
1b9c6b06 3553
0ff1fb65
HHZ
3554 if (dev->caps.steering_mode !=
3555 MLX4_STEERING_MODE_DEVICE_MANAGED)
3556 return -EOPNOTSUPP;
1b9c6b06 3557
7fb40f87 3558 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
a9c01e7a 3559 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 3560 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a
HHZ
3561 if (err) {
3562 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3563 return err;
3564 }
7fb40f87
HHZ
3565 rule_header = (struct _rule_hw *)(ctrl + 1);
3566 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3567
3568 switch (header_id) {
3569 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
3570 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3571 err = -EINVAL;
3572 goto err_put;
3573 }
7fb40f87 3574 break;
60396683
JM
3575 case MLX4_NET_TRANS_RULE_ID_IB:
3576 break;
7fb40f87
HHZ
3577 case MLX4_NET_TRANS_RULE_ID_IPV4:
3578 case MLX4_NET_TRANS_RULE_ID_TCP:
3579 case MLX4_NET_TRANS_RULE_ID_UDP:
3580 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
a9c01e7a
HHZ
3581 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3582 err = -EINVAL;
3583 goto err_put;
3584 }
7fb40f87
HHZ
3585 vhcr->in_modifier +=
3586 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3587 break;
3588 default:
3589 pr_err("Corrupted mailbox.\n");
a9c01e7a
HHZ
3590 err = -EINVAL;
3591 goto err_put;
7fb40f87
HHZ
3592 }
3593
1b9c6b06
HHZ
3594 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3595 vhcr->in_modifier, 0,
3596 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3597 MLX4_CMD_NATIVE);
3598 if (err)
a9c01e7a 3599 goto err_put;
1b9c6b06 3600
2c473ae7 3601 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06
HHZ
3602 if (err) {
3603 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3604 /* detach rule*/
3605 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 3606 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 3607 MLX4_CMD_NATIVE);
2c473ae7 3608 goto err_put;
1b9c6b06 3609 }
2c473ae7 3610 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
3611err_put:
3612 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 3613 return err;
8fcfb4db
HHZ
3614}
3615
3616int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3617 struct mlx4_vhcr *vhcr,
3618 struct mlx4_cmd_mailbox *inbox,
3619 struct mlx4_cmd_mailbox *outbox,
3620 struct mlx4_cmd_info *cmd)
3621{
1b9c6b06 3622 int err;
2c473ae7
HHZ
3623 struct res_qp *rqp;
3624 struct res_fs_rule *rrule;
1b9c6b06 3625
0ff1fb65
HHZ
3626 if (dev->caps.steering_mode !=
3627 MLX4_STEERING_MODE_DEVICE_MANAGED)
3628 return -EOPNOTSUPP;
1b9c6b06 3629
2c473ae7
HHZ
3630 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3631 if (err)
3632 return err;
3633 /* Release the rule form busy state before removal */
3634 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3635 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3636 if (err)
3637 return err;
3638
1b9c6b06
HHZ
3639 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3640 if (err) {
3641 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2c473ae7 3642 goto out;
1b9c6b06
HHZ
3643 }
3644
3645 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3646 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3647 MLX4_CMD_NATIVE);
2c473ae7
HHZ
3648 if (!err)
3649 atomic_dec(&rqp->ref_count);
3650out:
3651 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 3652 return err;
8fcfb4db
HHZ
3653}
3654
c82e9aa0
EC
3655enum {
3656 BUSY_MAX_RETRIES = 10
3657};
3658
3659int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3660 struct mlx4_vhcr *vhcr,
3661 struct mlx4_cmd_mailbox *inbox,
3662 struct mlx4_cmd_mailbox *outbox,
3663 struct mlx4_cmd_info *cmd)
3664{
3665 int err;
3666 int index = vhcr->in_modifier & 0xffff;
3667
3668 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3669 if (err)
3670 return err;
3671
3672 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3673 put_res(dev, slave, index, RES_COUNTER);
3674 return err;
3675}
3676
3677static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3678{
3679 struct res_gid *rgid;
3680 struct res_gid *tmp;
c82e9aa0
EC
3681 struct mlx4_qp qp; /* dummy for calling attach/detach */
3682
3683 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
3684 switch (dev->caps.steering_mode) {
3685 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3686 mlx4_flow_detach(dev, rgid->reg_id);
3687 break;
3688 case MLX4_STEERING_MODE_B0:
3689 qp.qpn = rqp->local_qpn;
3690 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3691 rgid->prot, rgid->steer);
3692 break;
3693 }
c82e9aa0
EC
3694 list_del(&rgid->list);
3695 kfree(rgid);
3696 }
3697}
3698
3699static int _move_all_busy(struct mlx4_dev *dev, int slave,
3700 enum mlx4_resource type, int print)
3701{
3702 struct mlx4_priv *priv = mlx4_priv(dev);
3703 struct mlx4_resource_tracker *tracker =
3704 &priv->mfunc.master.res_tracker;
3705 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3706 struct res_common *r;
3707 struct res_common *tmp;
3708 int busy;
3709
3710 busy = 0;
3711 spin_lock_irq(mlx4_tlock(dev));
3712 list_for_each_entry_safe(r, tmp, rlist, list) {
3713 if (r->owner == slave) {
3714 if (!r->removing) {
3715 if (r->state == RES_ANY_BUSY) {
3716 if (print)
3717 mlx4_dbg(dev,
aa1ec3dd 3718 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3719 ResourceType(type),
3720 r->res_id);
3721 ++busy;
3722 } else {
3723 r->from_state = r->state;
3724 r->state = RES_ANY_BUSY;
3725 r->removing = 1;
3726 }
3727 }
3728 }
3729 }
3730 spin_unlock_irq(mlx4_tlock(dev));
3731
3732 return busy;
3733}
3734
3735static int move_all_busy(struct mlx4_dev *dev, int slave,
3736 enum mlx4_resource type)
3737{
3738 unsigned long begin;
3739 int busy;
3740
3741 begin = jiffies;
3742 do {
3743 busy = _move_all_busy(dev, slave, type, 0);
3744 if (time_after(jiffies, begin + 5 * HZ))
3745 break;
3746 if (busy)
3747 cond_resched();
3748 } while (busy);
3749
3750 if (busy)
3751 busy = _move_all_busy(dev, slave, type, 1);
3752
3753 return busy;
3754}
3755static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3756{
3757 struct mlx4_priv *priv = mlx4_priv(dev);
3758 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3759 struct list_head *qp_list =
3760 &tracker->slave_list[slave].res_list[RES_QP];
3761 struct res_qp *qp;
3762 struct res_qp *tmp;
3763 int state;
3764 u64 in_param;
3765 int qpn;
3766 int err;
3767
3768 err = move_all_busy(dev, slave, RES_QP);
3769 if (err)
3770 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3771 "for slave %d\n", slave);
3772
3773 spin_lock_irq(mlx4_tlock(dev));
3774 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3775 spin_unlock_irq(mlx4_tlock(dev));
3776 if (qp->com.owner == slave) {
3777 qpn = qp->com.res_id;
3778 detach_qp(dev, slave, qp);
3779 state = qp->com.from_state;
3780 while (state != 0) {
3781 switch (state) {
3782 case RES_QP_RESERVED:
3783 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3784 rb_erase(&qp->com.node,
3785 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3786 list_del(&qp->com.list);
3787 spin_unlock_irq(mlx4_tlock(dev));
3788 kfree(qp);
3789 state = 0;
3790 break;
3791 case RES_QP_MAPPED:
3792 if (!valid_reserved(dev, slave, qpn))
3793 __mlx4_qp_free_icm(dev, qpn);
3794 state = RES_QP_RESERVED;
3795 break;
3796 case RES_QP_HW:
3797 in_param = slave;
3798 err = mlx4_cmd(dev, in_param,
3799 qp->local_qpn, 2,
3800 MLX4_CMD_2RST_QP,
3801 MLX4_CMD_TIME_CLASS_A,
3802 MLX4_CMD_NATIVE);
3803 if (err)
3804 mlx4_dbg(dev, "rem_slave_qps: failed"
3805 " to move slave %d qpn %d to"
3806 " reset\n", slave,
3807 qp->local_qpn);
3808 atomic_dec(&qp->rcq->ref_count);
3809 atomic_dec(&qp->scq->ref_count);
3810 atomic_dec(&qp->mtt->ref_count);
3811 if (qp->srq)
3812 atomic_dec(&qp->srq->ref_count);
3813 state = RES_QP_MAPPED;
3814 break;
3815 default:
3816 state = 0;
3817 }
3818 }
3819 }
3820 spin_lock_irq(mlx4_tlock(dev));
3821 }
3822 spin_unlock_irq(mlx4_tlock(dev));
3823}
3824
3825static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3826{
3827 struct mlx4_priv *priv = mlx4_priv(dev);
3828 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3829 struct list_head *srq_list =
3830 &tracker->slave_list[slave].res_list[RES_SRQ];
3831 struct res_srq *srq;
3832 struct res_srq *tmp;
3833 int state;
3834 u64 in_param;
3835 LIST_HEAD(tlist);
3836 int srqn;
3837 int err;
3838
3839 err = move_all_busy(dev, slave, RES_SRQ);
3840 if (err)
3841 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3842 "busy for slave %d\n", slave);
3843
3844 spin_lock_irq(mlx4_tlock(dev));
3845 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3846 spin_unlock_irq(mlx4_tlock(dev));
3847 if (srq->com.owner == slave) {
3848 srqn = srq->com.res_id;
3849 state = srq->com.from_state;
3850 while (state != 0) {
3851 switch (state) {
3852 case RES_SRQ_ALLOCATED:
3853 __mlx4_srq_free_icm(dev, srqn);
3854 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3855 rb_erase(&srq->com.node,
3856 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3857 list_del(&srq->com.list);
3858 spin_unlock_irq(mlx4_tlock(dev));
3859 kfree(srq);
3860 state = 0;
3861 break;
3862
3863 case RES_SRQ_HW:
3864 in_param = slave;
3865 err = mlx4_cmd(dev, in_param, srqn, 1,
3866 MLX4_CMD_HW2SW_SRQ,
3867 MLX4_CMD_TIME_CLASS_A,
3868 MLX4_CMD_NATIVE);
3869 if (err)
3870 mlx4_dbg(dev, "rem_slave_srqs: failed"
3871 " to move slave %d srq %d to"
3872 " SW ownership\n",
3873 slave, srqn);
3874
3875 atomic_dec(&srq->mtt->ref_count);
3876 if (srq->cq)
3877 atomic_dec(&srq->cq->ref_count);
3878 state = RES_SRQ_ALLOCATED;
3879 break;
3880
3881 default:
3882 state = 0;
3883 }
3884 }
3885 }
3886 spin_lock_irq(mlx4_tlock(dev));
3887 }
3888 spin_unlock_irq(mlx4_tlock(dev));
3889}
3890
3891static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3892{
3893 struct mlx4_priv *priv = mlx4_priv(dev);
3894 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3895 struct list_head *cq_list =
3896 &tracker->slave_list[slave].res_list[RES_CQ];
3897 struct res_cq *cq;
3898 struct res_cq *tmp;
3899 int state;
3900 u64 in_param;
3901 LIST_HEAD(tlist);
3902 int cqn;
3903 int err;
3904
3905 err = move_all_busy(dev, slave, RES_CQ);
3906 if (err)
3907 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3908 "busy for slave %d\n", slave);
3909
3910 spin_lock_irq(mlx4_tlock(dev));
3911 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3912 spin_unlock_irq(mlx4_tlock(dev));
3913 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3914 cqn = cq->com.res_id;
3915 state = cq->com.from_state;
3916 while (state != 0) {
3917 switch (state) {
3918 case RES_CQ_ALLOCATED:
3919 __mlx4_cq_free_icm(dev, cqn);
3920 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3921 rb_erase(&cq->com.node,
3922 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3923 list_del(&cq->com.list);
3924 spin_unlock_irq(mlx4_tlock(dev));
3925 kfree(cq);
3926 state = 0;
3927 break;
3928
3929 case RES_CQ_HW:
3930 in_param = slave;
3931 err = mlx4_cmd(dev, in_param, cqn, 1,
3932 MLX4_CMD_HW2SW_CQ,
3933 MLX4_CMD_TIME_CLASS_A,
3934 MLX4_CMD_NATIVE);
3935 if (err)
3936 mlx4_dbg(dev, "rem_slave_cqs: failed"
3937 " to move slave %d cq %d to"
3938 " SW ownership\n",
3939 slave, cqn);
3940 atomic_dec(&cq->mtt->ref_count);
3941 state = RES_CQ_ALLOCATED;
3942 break;
3943
3944 default:
3945 state = 0;
3946 }
3947 }
3948 }
3949 spin_lock_irq(mlx4_tlock(dev));
3950 }
3951 spin_unlock_irq(mlx4_tlock(dev));
3952}
3953
3954static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3955{
3956 struct mlx4_priv *priv = mlx4_priv(dev);
3957 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3958 struct list_head *mpt_list =
3959 &tracker->slave_list[slave].res_list[RES_MPT];
3960 struct res_mpt *mpt;
3961 struct res_mpt *tmp;
3962 int state;
3963 u64 in_param;
3964 LIST_HEAD(tlist);
3965 int mptn;
3966 int err;
3967
3968 err = move_all_busy(dev, slave, RES_MPT);
3969 if (err)
3970 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3971 "busy for slave %d\n", slave);
3972
3973 spin_lock_irq(mlx4_tlock(dev));
3974 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3975 spin_unlock_irq(mlx4_tlock(dev));
3976 if (mpt->com.owner == slave) {
3977 mptn = mpt->com.res_id;
3978 state = mpt->com.from_state;
3979 while (state != 0) {
3980 switch (state) {
3981 case RES_MPT_RESERVED:
b20e519a 3982 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 3983 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3984 rb_erase(&mpt->com.node,
3985 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3986 list_del(&mpt->com.list);
3987 spin_unlock_irq(mlx4_tlock(dev));
3988 kfree(mpt);
3989 state = 0;
3990 break;
3991
3992 case RES_MPT_MAPPED:
b20e519a 3993 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
3994 state = RES_MPT_RESERVED;
3995 break;
3996
3997 case RES_MPT_HW:
3998 in_param = slave;
3999 err = mlx4_cmd(dev, in_param, mptn, 0,
4000 MLX4_CMD_HW2SW_MPT,
4001 MLX4_CMD_TIME_CLASS_A,
4002 MLX4_CMD_NATIVE);
4003 if (err)
4004 mlx4_dbg(dev, "rem_slave_mrs: failed"
4005 " to move slave %d mpt %d to"
4006 " SW ownership\n",
4007 slave, mptn);
4008 if (mpt->mtt)
4009 atomic_dec(&mpt->mtt->ref_count);
4010 state = RES_MPT_MAPPED;
4011 break;
4012 default:
4013 state = 0;
4014 }
4015 }
4016 }
4017 spin_lock_irq(mlx4_tlock(dev));
4018 }
4019 spin_unlock_irq(mlx4_tlock(dev));
4020}
4021
4022static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4023{
4024 struct mlx4_priv *priv = mlx4_priv(dev);
4025 struct mlx4_resource_tracker *tracker =
4026 &priv->mfunc.master.res_tracker;
4027 struct list_head *mtt_list =
4028 &tracker->slave_list[slave].res_list[RES_MTT];
4029 struct res_mtt *mtt;
4030 struct res_mtt *tmp;
4031 int state;
4032 LIST_HEAD(tlist);
4033 int base;
4034 int err;
4035
4036 err = move_all_busy(dev, slave, RES_MTT);
4037 if (err)
4038 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4039 "busy for slave %d\n", slave);
4040
4041 spin_lock_irq(mlx4_tlock(dev));
4042 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4043 spin_unlock_irq(mlx4_tlock(dev));
4044 if (mtt->com.owner == slave) {
4045 base = mtt->com.res_id;
4046 state = mtt->com.from_state;
4047 while (state != 0) {
4048 switch (state) {
4049 case RES_MTT_ALLOCATED:
4050 __mlx4_free_mtt_range(dev, base,
4051 mtt->order);
4052 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4053 rb_erase(&mtt->com.node,
4054 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
4055 list_del(&mtt->com.list);
4056 spin_unlock_irq(mlx4_tlock(dev));
4057 kfree(mtt);
4058 state = 0;
4059 break;
4060
4061 default:
4062 state = 0;
4063 }
4064 }
4065 }
4066 spin_lock_irq(mlx4_tlock(dev));
4067 }
4068 spin_unlock_irq(mlx4_tlock(dev));
4069}
4070
1b9c6b06
HHZ
4071static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4072{
4073 struct mlx4_priv *priv = mlx4_priv(dev);
4074 struct mlx4_resource_tracker *tracker =
4075 &priv->mfunc.master.res_tracker;
4076 struct list_head *fs_rule_list =
4077 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4078 struct res_fs_rule *fs_rule;
4079 struct res_fs_rule *tmp;
4080 int state;
4081 u64 base;
4082 int err;
4083
4084 err = move_all_busy(dev, slave, RES_FS_RULE);
4085 if (err)
4086 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4087 slave);
4088
4089 spin_lock_irq(mlx4_tlock(dev));
4090 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4091 spin_unlock_irq(mlx4_tlock(dev));
4092 if (fs_rule->com.owner == slave) {
4093 base = fs_rule->com.res_id;
4094 state = fs_rule->com.from_state;
4095 while (state != 0) {
4096 switch (state) {
4097 case RES_FS_RULE_ALLOCATED:
4098 /* detach rule */
4099 err = mlx4_cmd(dev, base, 0, 0,
4100 MLX4_QP_FLOW_STEERING_DETACH,
4101 MLX4_CMD_TIME_CLASS_A,
4102 MLX4_CMD_NATIVE);
4103
4104 spin_lock_irq(mlx4_tlock(dev));
4105 rb_erase(&fs_rule->com.node,
4106 &tracker->res_tree[RES_FS_RULE]);
4107 list_del(&fs_rule->com.list);
4108 spin_unlock_irq(mlx4_tlock(dev));
4109 kfree(fs_rule);
4110 state = 0;
4111 break;
4112
4113 default:
4114 state = 0;
4115 }
4116 }
4117 }
4118 spin_lock_irq(mlx4_tlock(dev));
4119 }
4120 spin_unlock_irq(mlx4_tlock(dev));
4121}
4122
c82e9aa0
EC
4123static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4124{
4125 struct mlx4_priv *priv = mlx4_priv(dev);
4126 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4127 struct list_head *eq_list =
4128 &tracker->slave_list[slave].res_list[RES_EQ];
4129 struct res_eq *eq;
4130 struct res_eq *tmp;
4131 int err;
4132 int state;
4133 LIST_HEAD(tlist);
4134 int eqn;
4135 struct mlx4_cmd_mailbox *mailbox;
4136
4137 err = move_all_busy(dev, slave, RES_EQ);
4138 if (err)
4139 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4140 "busy for slave %d\n", slave);
4141
4142 spin_lock_irq(mlx4_tlock(dev));
4143 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4144 spin_unlock_irq(mlx4_tlock(dev));
4145 if (eq->com.owner == slave) {
4146 eqn = eq->com.res_id;
4147 state = eq->com.from_state;
4148 while (state != 0) {
4149 switch (state) {
4150 case RES_EQ_RESERVED:
4151 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4152 rb_erase(&eq->com.node,
4153 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
4154 list_del(&eq->com.list);
4155 spin_unlock_irq(mlx4_tlock(dev));
4156 kfree(eq);
4157 state = 0;
4158 break;
4159
4160 case RES_EQ_HW:
4161 mailbox = mlx4_alloc_cmd_mailbox(dev);
4162 if (IS_ERR(mailbox)) {
4163 cond_resched();
4164 continue;
4165 }
4166 err = mlx4_cmd_box(dev, slave, 0,
4167 eqn & 0xff, 0,
4168 MLX4_CMD_HW2SW_EQ,
4169 MLX4_CMD_TIME_CLASS_A,
4170 MLX4_CMD_NATIVE);
eb71d0d6
JM
4171 if (err)
4172 mlx4_dbg(dev, "rem_slave_eqs: failed"
4173 " to move slave %d eqs %d to"
4174 " SW ownership\n", slave, eqn);
c82e9aa0 4175 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
4176 atomic_dec(&eq->mtt->ref_count);
4177 state = RES_EQ_RESERVED;
c82e9aa0
EC
4178 break;
4179
4180 default:
4181 state = 0;
4182 }
4183 }
4184 }
4185 spin_lock_irq(mlx4_tlock(dev));
4186 }
4187 spin_unlock_irq(mlx4_tlock(dev));
4188}
4189
ba062d52
JM
4190static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4191{
4192 struct mlx4_priv *priv = mlx4_priv(dev);
4193 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4194 struct list_head *counter_list =
4195 &tracker->slave_list[slave].res_list[RES_COUNTER];
4196 struct res_counter *counter;
4197 struct res_counter *tmp;
4198 int err;
4199 int index;
4200
4201 err = move_all_busy(dev, slave, RES_COUNTER);
4202 if (err)
4203 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4204 "busy for slave %d\n", slave);
4205
4206 spin_lock_irq(mlx4_tlock(dev));
4207 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4208 if (counter->com.owner == slave) {
4209 index = counter->com.res_id;
4af1c048
HHZ
4210 rb_erase(&counter->com.node,
4211 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
4212 list_del(&counter->com.list);
4213 kfree(counter);
4214 __mlx4_counter_free(dev, index);
4215 }
4216 }
4217 spin_unlock_irq(mlx4_tlock(dev));
4218}
4219
4220static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4221{
4222 struct mlx4_priv *priv = mlx4_priv(dev);
4223 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4224 struct list_head *xrcdn_list =
4225 &tracker->slave_list[slave].res_list[RES_XRCD];
4226 struct res_xrcdn *xrcd;
4227 struct res_xrcdn *tmp;
4228 int err;
4229 int xrcdn;
4230
4231 err = move_all_busy(dev, slave, RES_XRCD);
4232 if (err)
4233 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4234 "busy for slave %d\n", slave);
4235
4236 spin_lock_irq(mlx4_tlock(dev));
4237 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4238 if (xrcd->com.owner == slave) {
4239 xrcdn = xrcd->com.res_id;
4af1c048 4240 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
4241 list_del(&xrcd->com.list);
4242 kfree(xrcd);
4243 __mlx4_xrcd_free(dev, xrcdn);
4244 }
4245 }
4246 spin_unlock_irq(mlx4_tlock(dev));
4247}
4248
c82e9aa0
EC
4249void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4250{
4251 struct mlx4_priv *priv = mlx4_priv(dev);
4252
4253 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4874080d 4254 rem_slave_vlans(dev, slave);
c82e9aa0 4255 rem_slave_macs(dev, slave);
80cb0021 4256 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
4257 rem_slave_qps(dev, slave);
4258 rem_slave_srqs(dev, slave);
4259 rem_slave_cqs(dev, slave);
4260 rem_slave_mrs(dev, slave);
4261 rem_slave_eqs(dev, slave);
4262 rem_slave_mtts(dev, slave);
ba062d52
JM
4263 rem_slave_counters(dev, slave);
4264 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
4265 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4266}
b01978ca
JM
4267
4268void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4269{
4270 struct mlx4_vf_immed_vlan_work *work =
4271 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4272 struct mlx4_cmd_mailbox *mailbox;
4273 struct mlx4_update_qp_context *upd_context;
4274 struct mlx4_dev *dev = &work->priv->dev;
4275 struct mlx4_resource_tracker *tracker =
4276 &work->priv->mfunc.master.res_tracker;
4277 struct list_head *qp_list =
4278 &tracker->slave_list[work->slave].res_list[RES_QP];
4279 struct res_qp *qp;
4280 struct res_qp *tmp;
4281 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4282 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4284 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4285 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4286 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4287 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4288 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4289
4290 int err;
4291 int port, errors = 0;
4292 u8 vlan_control;
4293
4294 if (mlx4_is_slave(dev)) {
4295 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4296 work->slave);
4297 goto out;
4298 }
4299
4300 mailbox = mlx4_alloc_cmd_mailbox(dev);
4301 if (IS_ERR(mailbox))
4302 goto out;
0a6eac24
RE
4303 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4304 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4305 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4306 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4307 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4308 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4309 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4310 else if (!work->vlan_id)
b01978ca
JM
4311 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4312 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4313 else
4314 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4315 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4316 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4317
4318 upd_context = mailbox->buf;
4319 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4320 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4321 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4322
4323 spin_lock_irq(mlx4_tlock(dev));
4324 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4325 spin_unlock_irq(mlx4_tlock(dev));
4326 if (qp->com.owner == work->slave) {
4327 if (qp->com.from_state != RES_QP_HW ||
4328 !qp->sched_queue || /* no INIT2RTR trans yet */
4329 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4330 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4331 spin_lock_irq(mlx4_tlock(dev));
4332 continue;
4333 }
4334 port = (qp->sched_queue >> 6 & 1) + 1;
4335 if (port != work->port) {
4336 spin_lock_irq(mlx4_tlock(dev));
4337 continue;
4338 }
4339 upd_context->qp_context.pri_path.sched_queue =
4340 qp->sched_queue & 0xC7;
4341 upd_context->qp_context.pri_path.sched_queue |=
4342 ((work->qos & 0x7) << 3);
4343
4344 err = mlx4_cmd(dev, mailbox->dma,
4345 qp->local_qpn & 0xffffff,
4346 0, MLX4_CMD_UPDATE_QP,
4347 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4348 if (err) {
4349 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4350 "port %d, qpn %d (%d)\n",
4351 work->slave, port, qp->local_qpn,
4352 err);
4353 errors++;
4354 }
4355 }
4356 spin_lock_irq(mlx4_tlock(dev));
4357 }
4358 spin_unlock_irq(mlx4_tlock(dev));
4359 mlx4_free_cmd_mailbox(dev, mailbox);
4360
4361 if (errors)
4362 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4363 errors, work->slave, work->port);
4364
4365 /* unregister previous vlan_id if needed and we had no errors
4366 * while updating the QPs
4367 */
4368 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4369 NO_INDX != work->orig_vlan_ix)
4370 __mlx4_unregister_vlan(&work->priv->dev, work->port,
2009d005 4371 work->orig_vlan_id);
b01978ca
JM
4372out:
4373 kfree(work);
4374 return;
4375}
This page took 0.362088 seconds and 5 git commands to generate.