net/mlx4: Add set VF mac address support
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
4af1c048 60 struct rb_node node;
aa1ec3dd 61 u64 res_id;
c82e9aa0
EC
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67};
68
69enum {
70 RES_ANY_BUSY = 1
71};
72
73struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
9f5b6c63 77 enum mlx4_steer_type steer;
fab1e24a 78 u64 reg_id;
c82e9aa0
EC
79};
80
81enum res_qp_states {
82 RES_QP_BUSY = RES_ANY_BUSY,
83
84 /* QP number was allocated */
85 RES_QP_RESERVED,
86
87 /* ICM memory for QP context was mapped */
88 RES_QP_MAPPED,
89
90 /* QP is in hw ownership */
91 RES_QP_HW
92};
93
c82e9aa0
EC
94struct res_qp {
95 struct res_common com;
96 struct res_mtt *mtt;
97 struct res_cq *rcq;
98 struct res_cq *scq;
99 struct res_srq *srq;
100 struct list_head mcg_list;
101 spinlock_t mcg_spl;
102 int local_qpn;
2c473ae7 103 atomic_t ref_count;
c82e9aa0
EC
104};
105
106enum res_mtt_states {
107 RES_MTT_BUSY = RES_ANY_BUSY,
108 RES_MTT_ALLOCATED,
109};
110
111static inline const char *mtt_states_str(enum res_mtt_states state)
112{
113 switch (state) {
114 case RES_MTT_BUSY: return "RES_MTT_BUSY";
115 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
116 default: return "Unknown";
117 }
118}
119
120struct res_mtt {
121 struct res_common com;
122 int order;
123 atomic_t ref_count;
124};
125
126enum res_mpt_states {
127 RES_MPT_BUSY = RES_ANY_BUSY,
128 RES_MPT_RESERVED,
129 RES_MPT_MAPPED,
130 RES_MPT_HW,
131};
132
133struct res_mpt {
134 struct res_common com;
135 struct res_mtt *mtt;
136 int key;
137};
138
139enum res_eq_states {
140 RES_EQ_BUSY = RES_ANY_BUSY,
141 RES_EQ_RESERVED,
142 RES_EQ_HW,
143};
144
145struct res_eq {
146 struct res_common com;
147 struct res_mtt *mtt;
148};
149
150enum res_cq_states {
151 RES_CQ_BUSY = RES_ANY_BUSY,
152 RES_CQ_ALLOCATED,
153 RES_CQ_HW,
154};
155
156struct res_cq {
157 struct res_common com;
158 struct res_mtt *mtt;
159 atomic_t ref_count;
160};
161
162enum res_srq_states {
163 RES_SRQ_BUSY = RES_ANY_BUSY,
164 RES_SRQ_ALLOCATED,
165 RES_SRQ_HW,
166};
167
c82e9aa0
EC
168struct res_srq {
169 struct res_common com;
170 struct res_mtt *mtt;
171 struct res_cq *cq;
172 atomic_t ref_count;
173};
174
175enum res_counter_states {
176 RES_COUNTER_BUSY = RES_ANY_BUSY,
177 RES_COUNTER_ALLOCATED,
178};
179
c82e9aa0
EC
180struct res_counter {
181 struct res_common com;
182 int port;
183};
184
ba062d52
JM
185enum res_xrcdn_states {
186 RES_XRCD_BUSY = RES_ANY_BUSY,
187 RES_XRCD_ALLOCATED,
188};
189
190struct res_xrcdn {
191 struct res_common com;
192 int port;
193};
194
1b9c6b06
HHZ
195enum res_fs_rule_states {
196 RES_FS_RULE_BUSY = RES_ANY_BUSY,
197 RES_FS_RULE_ALLOCATED,
198};
199
200struct res_fs_rule {
201 struct res_common com;
2c473ae7 202 int qpn;
1b9c6b06
HHZ
203};
204
4af1c048
HHZ
205static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
206{
207 struct rb_node *node = root->rb_node;
208
209 while (node) {
210 struct res_common *res = container_of(node, struct res_common,
211 node);
212
213 if (res_id < res->res_id)
214 node = node->rb_left;
215 else if (res_id > res->res_id)
216 node = node->rb_right;
217 else
218 return res;
219 }
220 return NULL;
221}
222
223static int res_tracker_insert(struct rb_root *root, struct res_common *res)
224{
225 struct rb_node **new = &(root->rb_node), *parent = NULL;
226
227 /* Figure out where to put new node */
228 while (*new) {
229 struct res_common *this = container_of(*new, struct res_common,
230 node);
231
232 parent = *new;
233 if (res->res_id < this->res_id)
234 new = &((*new)->rb_left);
235 else if (res->res_id > this->res_id)
236 new = &((*new)->rb_right);
237 else
238 return -EEXIST;
239 }
240
241 /* Add new node and rebalance tree. */
242 rb_link_node(&res->node, parent, new);
243 rb_insert_color(&res->node, root);
244
245 return 0;
246}
247
54679e14
JM
248enum qp_transition {
249 QP_TRANS_INIT2RTR,
250 QP_TRANS_RTR2RTS,
251 QP_TRANS_RTS2RTS,
252 QP_TRANS_SQERR2RTS,
253 QP_TRANS_SQD2SQD,
254 QP_TRANS_SQD2RTS
255};
256
c82e9aa0
EC
257/* For Debug uses */
258static const char *ResourceType(enum mlx4_resource rt)
259{
260 switch (rt) {
261 case RES_QP: return "RES_QP";
262 case RES_CQ: return "RES_CQ";
263 case RES_SRQ: return "RES_SRQ";
264 case RES_MPT: return "RES_MPT";
265 case RES_MTT: return "RES_MTT";
266 case RES_MAC: return "RES_MAC";
267 case RES_EQ: return "RES_EQ";
268 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 269 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 270 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
271 default: return "Unknown resource type !!!";
272 };
273}
274
c82e9aa0
EC
275int mlx4_init_resource_tracker(struct mlx4_dev *dev)
276{
277 struct mlx4_priv *priv = mlx4_priv(dev);
278 int i;
279 int t;
280
281 priv->mfunc.master.res_tracker.slave_list =
282 kzalloc(dev->num_slaves * sizeof(struct slave_list),
283 GFP_KERNEL);
284 if (!priv->mfunc.master.res_tracker.slave_list)
285 return -ENOMEM;
286
287 for (i = 0 ; i < dev->num_slaves; i++) {
288 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
289 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
290 slave_list[i].res_list[t]);
291 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
292 }
293
294 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
295 dev->num_slaves);
296 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 297 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
298
299 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
300 return 0 ;
301}
302
b8924951
JM
303void mlx4_free_resource_tracker(struct mlx4_dev *dev,
304 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
305{
306 struct mlx4_priv *priv = mlx4_priv(dev);
307 int i;
308
309 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
310 if (type != RES_TR_FREE_STRUCTS_ONLY)
311 for (i = 0 ; i < dev->num_slaves; i++)
312 if (type == RES_TR_FREE_ALL ||
313 dev->caps.function != i)
314 mlx4_delete_all_resources_for_slave(dev, i);
315
316 if (type != RES_TR_FREE_SLAVES_ONLY) {
317 kfree(priv->mfunc.master.res_tracker.slave_list);
318 priv->mfunc.master.res_tracker.slave_list = NULL;
319 }
c82e9aa0
EC
320 }
321}
322
54679e14
JM
323static void update_pkey_index(struct mlx4_dev *dev, int slave,
324 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 325{
54679e14
JM
326 u8 sched = *(u8 *)(inbox->buf + 64);
327 u8 orig_index = *(u8 *)(inbox->buf + 35);
328 u8 new_index;
329 struct mlx4_priv *priv = mlx4_priv(dev);
330 int port;
331
332 port = (sched >> 6 & 1) + 1;
333
334 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
335 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
336}
337
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
339 u8 slave)
340{
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
c82e9aa0
EC
344
345 if (MLX4_QP_ST_UD == ts)
346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
347
54679e14
JM
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 }
c82e9aa0
EC
354}
355
356static int mpt_mask(struct mlx4_dev *dev)
357{
358 return dev->caps.num_mpts - 1;
359}
360
1e3f7b32 361static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
362 enum mlx4_resource type)
363{
364 struct mlx4_priv *priv = mlx4_priv(dev);
365
4af1c048
HHZ
366 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
367 res_id);
c82e9aa0
EC
368}
369
aa1ec3dd 370static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
371 enum mlx4_resource type,
372 void *res)
373{
374 struct res_common *r;
375 int err = 0;
376
377 spin_lock_irq(mlx4_tlock(dev));
378 r = find_res(dev, res_id, type);
379 if (!r) {
380 err = -ENONET;
381 goto exit;
382 }
383
384 if (r->state == RES_ANY_BUSY) {
385 err = -EBUSY;
386 goto exit;
387 }
388
389 if (r->owner != slave) {
390 err = -EPERM;
391 goto exit;
392 }
393
394 r->from_state = r->state;
395 r->state = RES_ANY_BUSY;
c82e9aa0
EC
396
397 if (res)
398 *((struct res_common **)res) = r;
399
400exit:
401 spin_unlock_irq(mlx4_tlock(dev));
402 return err;
403}
404
405int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
406 enum mlx4_resource type,
aa1ec3dd 407 u64 res_id, int *slave)
c82e9aa0
EC
408{
409
410 struct res_common *r;
411 int err = -ENOENT;
412 int id = res_id;
413
414 if (type == RES_QP)
415 id &= 0x7fffff;
996b0541 416 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
417
418 r = find_res(dev, id, type);
419 if (r) {
420 *slave = r->owner;
421 err = 0;
422 }
996b0541 423 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
424
425 return err;
426}
427
aa1ec3dd 428static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
429 enum mlx4_resource type)
430{
431 struct res_common *r;
432
433 spin_lock_irq(mlx4_tlock(dev));
434 r = find_res(dev, res_id, type);
435 if (r)
436 r->state = r->from_state;
437 spin_unlock_irq(mlx4_tlock(dev));
438}
439
440static struct res_common *alloc_qp_tr(int id)
441{
442 struct res_qp *ret;
443
444 ret = kzalloc(sizeof *ret, GFP_KERNEL);
445 if (!ret)
446 return NULL;
447
448 ret->com.res_id = id;
449 ret->com.state = RES_QP_RESERVED;
2531188b 450 ret->local_qpn = id;
c82e9aa0
EC
451 INIT_LIST_HEAD(&ret->mcg_list);
452 spin_lock_init(&ret->mcg_spl);
2c473ae7 453 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
454
455 return &ret->com;
456}
457
458static struct res_common *alloc_mtt_tr(int id, int order)
459{
460 struct res_mtt *ret;
461
462 ret = kzalloc(sizeof *ret, GFP_KERNEL);
463 if (!ret)
464 return NULL;
465
466 ret->com.res_id = id;
467 ret->order = order;
468 ret->com.state = RES_MTT_ALLOCATED;
469 atomic_set(&ret->ref_count, 0);
470
471 return &ret->com;
472}
473
474static struct res_common *alloc_mpt_tr(int id, int key)
475{
476 struct res_mpt *ret;
477
478 ret = kzalloc(sizeof *ret, GFP_KERNEL);
479 if (!ret)
480 return NULL;
481
482 ret->com.res_id = id;
483 ret->com.state = RES_MPT_RESERVED;
484 ret->key = key;
485
486 return &ret->com;
487}
488
489static struct res_common *alloc_eq_tr(int id)
490{
491 struct res_eq *ret;
492
493 ret = kzalloc(sizeof *ret, GFP_KERNEL);
494 if (!ret)
495 return NULL;
496
497 ret->com.res_id = id;
498 ret->com.state = RES_EQ_RESERVED;
499
500 return &ret->com;
501}
502
503static struct res_common *alloc_cq_tr(int id)
504{
505 struct res_cq *ret;
506
507 ret = kzalloc(sizeof *ret, GFP_KERNEL);
508 if (!ret)
509 return NULL;
510
511 ret->com.res_id = id;
512 ret->com.state = RES_CQ_ALLOCATED;
513 atomic_set(&ret->ref_count, 0);
514
515 return &ret->com;
516}
517
518static struct res_common *alloc_srq_tr(int id)
519{
520 struct res_srq *ret;
521
522 ret = kzalloc(sizeof *ret, GFP_KERNEL);
523 if (!ret)
524 return NULL;
525
526 ret->com.res_id = id;
527 ret->com.state = RES_SRQ_ALLOCATED;
528 atomic_set(&ret->ref_count, 0);
529
530 return &ret->com;
531}
532
533static struct res_common *alloc_counter_tr(int id)
534{
535 struct res_counter *ret;
536
537 ret = kzalloc(sizeof *ret, GFP_KERNEL);
538 if (!ret)
539 return NULL;
540
541 ret->com.res_id = id;
542 ret->com.state = RES_COUNTER_ALLOCATED;
543
544 return &ret->com;
545}
546
ba062d52
JM
547static struct res_common *alloc_xrcdn_tr(int id)
548{
549 struct res_xrcdn *ret;
550
551 ret = kzalloc(sizeof *ret, GFP_KERNEL);
552 if (!ret)
553 return NULL;
554
555 ret->com.res_id = id;
556 ret->com.state = RES_XRCD_ALLOCATED;
557
558 return &ret->com;
559}
560
2c473ae7 561static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
562{
563 struct res_fs_rule *ret;
564
565 ret = kzalloc(sizeof *ret, GFP_KERNEL);
566 if (!ret)
567 return NULL;
568
569 ret->com.res_id = id;
570 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 571 ret->qpn = qpn;
1b9c6b06
HHZ
572 return &ret->com;
573}
574
aa1ec3dd 575static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
576 int extra)
577{
578 struct res_common *ret;
579
580 switch (type) {
581 case RES_QP:
582 ret = alloc_qp_tr(id);
583 break;
584 case RES_MPT:
585 ret = alloc_mpt_tr(id, extra);
586 break;
587 case RES_MTT:
588 ret = alloc_mtt_tr(id, extra);
589 break;
590 case RES_EQ:
591 ret = alloc_eq_tr(id);
592 break;
593 case RES_CQ:
594 ret = alloc_cq_tr(id);
595 break;
596 case RES_SRQ:
597 ret = alloc_srq_tr(id);
598 break;
599 case RES_MAC:
600 printk(KERN_ERR "implementation missing\n");
601 return NULL;
602 case RES_COUNTER:
603 ret = alloc_counter_tr(id);
604 break;
ba062d52
JM
605 case RES_XRCD:
606 ret = alloc_xrcdn_tr(id);
607 break;
1b9c6b06 608 case RES_FS_RULE:
2c473ae7 609 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 610 break;
c82e9aa0
EC
611 default:
612 return NULL;
613 }
614 if (ret)
615 ret->owner = slave;
616
617 return ret;
618}
619
aa1ec3dd 620static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
621 enum mlx4_resource type, int extra)
622{
623 int i;
624 int err;
625 struct mlx4_priv *priv = mlx4_priv(dev);
626 struct res_common **res_arr;
627 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 628 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
629
630 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
631 if (!res_arr)
632 return -ENOMEM;
633
634 for (i = 0; i < count; ++i) {
635 res_arr[i] = alloc_tr(base + i, type, slave, extra);
636 if (!res_arr[i]) {
637 for (--i; i >= 0; --i)
638 kfree(res_arr[i]);
639
640 kfree(res_arr);
641 return -ENOMEM;
642 }
643 }
644
645 spin_lock_irq(mlx4_tlock(dev));
646 for (i = 0; i < count; ++i) {
647 if (find_res(dev, base + i, type)) {
648 err = -EEXIST;
649 goto undo;
650 }
4af1c048 651 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
652 if (err)
653 goto undo;
654 list_add_tail(&res_arr[i]->list,
655 &tracker->slave_list[slave].res_list[type]);
656 }
657 spin_unlock_irq(mlx4_tlock(dev));
658 kfree(res_arr);
659
660 return 0;
661
662undo:
663 for (--i; i >= base; --i)
4af1c048 664 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
665
666 spin_unlock_irq(mlx4_tlock(dev));
667
668 for (i = 0; i < count; ++i)
669 kfree(res_arr[i]);
670
671 kfree(res_arr);
672
673 return err;
674}
675
676static int remove_qp_ok(struct res_qp *res)
677{
2c473ae7
HHZ
678 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
679 !list_empty(&res->mcg_list)) {
680 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
681 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 682 return -EBUSY;
2c473ae7 683 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 684 return -EPERM;
2c473ae7 685 }
c82e9aa0
EC
686
687 return 0;
688}
689
690static int remove_mtt_ok(struct res_mtt *res, int order)
691{
692 if (res->com.state == RES_MTT_BUSY ||
693 atomic_read(&res->ref_count)) {
694 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
695 __func__, __LINE__,
696 mtt_states_str(res->com.state),
697 atomic_read(&res->ref_count));
698 return -EBUSY;
699 } else if (res->com.state != RES_MTT_ALLOCATED)
700 return -EPERM;
701 else if (res->order != order)
702 return -EINVAL;
703
704 return 0;
705}
706
707static int remove_mpt_ok(struct res_mpt *res)
708{
709 if (res->com.state == RES_MPT_BUSY)
710 return -EBUSY;
711 else if (res->com.state != RES_MPT_RESERVED)
712 return -EPERM;
713
714 return 0;
715}
716
717static int remove_eq_ok(struct res_eq *res)
718{
719 if (res->com.state == RES_MPT_BUSY)
720 return -EBUSY;
721 else if (res->com.state != RES_MPT_RESERVED)
722 return -EPERM;
723
724 return 0;
725}
726
727static int remove_counter_ok(struct res_counter *res)
728{
729 if (res->com.state == RES_COUNTER_BUSY)
730 return -EBUSY;
731 else if (res->com.state != RES_COUNTER_ALLOCATED)
732 return -EPERM;
733
734 return 0;
735}
736
ba062d52
JM
737static int remove_xrcdn_ok(struct res_xrcdn *res)
738{
739 if (res->com.state == RES_XRCD_BUSY)
740 return -EBUSY;
741 else if (res->com.state != RES_XRCD_ALLOCATED)
742 return -EPERM;
743
744 return 0;
745}
746
1b9c6b06
HHZ
747static int remove_fs_rule_ok(struct res_fs_rule *res)
748{
749 if (res->com.state == RES_FS_RULE_BUSY)
750 return -EBUSY;
751 else if (res->com.state != RES_FS_RULE_ALLOCATED)
752 return -EPERM;
753
754 return 0;
755}
756
c82e9aa0
EC
757static int remove_cq_ok(struct res_cq *res)
758{
759 if (res->com.state == RES_CQ_BUSY)
760 return -EBUSY;
761 else if (res->com.state != RES_CQ_ALLOCATED)
762 return -EPERM;
763
764 return 0;
765}
766
767static int remove_srq_ok(struct res_srq *res)
768{
769 if (res->com.state == RES_SRQ_BUSY)
770 return -EBUSY;
771 else if (res->com.state != RES_SRQ_ALLOCATED)
772 return -EPERM;
773
774 return 0;
775}
776
777static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
778{
779 switch (type) {
780 case RES_QP:
781 return remove_qp_ok((struct res_qp *)res);
782 case RES_CQ:
783 return remove_cq_ok((struct res_cq *)res);
784 case RES_SRQ:
785 return remove_srq_ok((struct res_srq *)res);
786 case RES_MPT:
787 return remove_mpt_ok((struct res_mpt *)res);
788 case RES_MTT:
789 return remove_mtt_ok((struct res_mtt *)res, extra);
790 case RES_MAC:
791 return -ENOSYS;
792 case RES_EQ:
793 return remove_eq_ok((struct res_eq *)res);
794 case RES_COUNTER:
795 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
796 case RES_XRCD:
797 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
798 case RES_FS_RULE:
799 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
800 default:
801 return -EINVAL;
802 }
803}
804
aa1ec3dd 805static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
806 enum mlx4_resource type, int extra)
807{
aa1ec3dd 808 u64 i;
c82e9aa0
EC
809 int err;
810 struct mlx4_priv *priv = mlx4_priv(dev);
811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
812 struct res_common *r;
813
814 spin_lock_irq(mlx4_tlock(dev));
815 for (i = base; i < base + count; ++i) {
4af1c048 816 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
817 if (!r) {
818 err = -ENOENT;
819 goto out;
820 }
821 if (r->owner != slave) {
822 err = -EPERM;
823 goto out;
824 }
825 err = remove_ok(r, type, extra);
826 if (err)
827 goto out;
828 }
829
830 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
831 r = res_tracker_lookup(&tracker->res_tree[type], i);
832 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
833 list_del(&r->list);
834 kfree(r);
835 }
836 err = 0;
837
838out:
839 spin_unlock_irq(mlx4_tlock(dev));
840
841 return err;
842}
843
844static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
845 enum res_qp_states state, struct res_qp **qp,
846 int alloc)
847{
848 struct mlx4_priv *priv = mlx4_priv(dev);
849 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
850 struct res_qp *r;
851 int err = 0;
852
853 spin_lock_irq(mlx4_tlock(dev));
4af1c048 854 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
855 if (!r)
856 err = -ENOENT;
857 else if (r->com.owner != slave)
858 err = -EPERM;
859 else {
860 switch (state) {
861 case RES_QP_BUSY:
aa1ec3dd 862 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
863 __func__, r->com.res_id);
864 err = -EBUSY;
865 break;
866
867 case RES_QP_RESERVED:
868 if (r->com.state == RES_QP_MAPPED && !alloc)
869 break;
870
aa1ec3dd 871 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
872 err = -EINVAL;
873 break;
874
875 case RES_QP_MAPPED:
876 if ((r->com.state == RES_QP_RESERVED && alloc) ||
877 r->com.state == RES_QP_HW)
878 break;
879 else {
aa1ec3dd 880 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
881 r->com.res_id);
882 err = -EINVAL;
883 }
884
885 break;
886
887 case RES_QP_HW:
888 if (r->com.state != RES_QP_MAPPED)
889 err = -EINVAL;
890 break;
891 default:
892 err = -EINVAL;
893 }
894
895 if (!err) {
896 r->com.from_state = r->com.state;
897 r->com.to_state = state;
898 r->com.state = RES_QP_BUSY;
899 if (qp)
64699336 900 *qp = r;
c82e9aa0
EC
901 }
902 }
903
904 spin_unlock_irq(mlx4_tlock(dev));
905
906 return err;
907}
908
909static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
910 enum res_mpt_states state, struct res_mpt **mpt)
911{
912 struct mlx4_priv *priv = mlx4_priv(dev);
913 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
914 struct res_mpt *r;
915 int err = 0;
916
917 spin_lock_irq(mlx4_tlock(dev));
4af1c048 918 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
919 if (!r)
920 err = -ENOENT;
921 else if (r->com.owner != slave)
922 err = -EPERM;
923 else {
924 switch (state) {
925 case RES_MPT_BUSY:
926 err = -EINVAL;
927 break;
928
929 case RES_MPT_RESERVED:
930 if (r->com.state != RES_MPT_MAPPED)
931 err = -EINVAL;
932 break;
933
934 case RES_MPT_MAPPED:
935 if (r->com.state != RES_MPT_RESERVED &&
936 r->com.state != RES_MPT_HW)
937 err = -EINVAL;
938 break;
939
940 case RES_MPT_HW:
941 if (r->com.state != RES_MPT_MAPPED)
942 err = -EINVAL;
943 break;
944 default:
945 err = -EINVAL;
946 }
947
948 if (!err) {
949 r->com.from_state = r->com.state;
950 r->com.to_state = state;
951 r->com.state = RES_MPT_BUSY;
952 if (mpt)
64699336 953 *mpt = r;
c82e9aa0
EC
954 }
955 }
956
957 spin_unlock_irq(mlx4_tlock(dev));
958
959 return err;
960}
961
962static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
963 enum res_eq_states state, struct res_eq **eq)
964{
965 struct mlx4_priv *priv = mlx4_priv(dev);
966 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
967 struct res_eq *r;
968 int err = 0;
969
970 spin_lock_irq(mlx4_tlock(dev));
4af1c048 971 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
972 if (!r)
973 err = -ENOENT;
974 else if (r->com.owner != slave)
975 err = -EPERM;
976 else {
977 switch (state) {
978 case RES_EQ_BUSY:
979 err = -EINVAL;
980 break;
981
982 case RES_EQ_RESERVED:
983 if (r->com.state != RES_EQ_HW)
984 err = -EINVAL;
985 break;
986
987 case RES_EQ_HW:
988 if (r->com.state != RES_EQ_RESERVED)
989 err = -EINVAL;
990 break;
991
992 default:
993 err = -EINVAL;
994 }
995
996 if (!err) {
997 r->com.from_state = r->com.state;
998 r->com.to_state = state;
999 r->com.state = RES_EQ_BUSY;
1000 if (eq)
1001 *eq = r;
1002 }
1003 }
1004
1005 spin_unlock_irq(mlx4_tlock(dev));
1006
1007 return err;
1008}
1009
1010static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1011 enum res_cq_states state, struct res_cq **cq)
1012{
1013 struct mlx4_priv *priv = mlx4_priv(dev);
1014 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1015 struct res_cq *r;
1016 int err;
1017
1018 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1019 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
1020 if (!r)
1021 err = -ENOENT;
1022 else if (r->com.owner != slave)
1023 err = -EPERM;
1024 else {
1025 switch (state) {
1026 case RES_CQ_BUSY:
1027 err = -EBUSY;
1028 break;
1029
1030 case RES_CQ_ALLOCATED:
1031 if (r->com.state != RES_CQ_HW)
1032 err = -EINVAL;
1033 else if (atomic_read(&r->ref_count))
1034 err = -EBUSY;
1035 else
1036 err = 0;
1037 break;
1038
1039 case RES_CQ_HW:
1040 if (r->com.state != RES_CQ_ALLOCATED)
1041 err = -EINVAL;
1042 else
1043 err = 0;
1044 break;
1045
1046 default:
1047 err = -EINVAL;
1048 }
1049
1050 if (!err) {
1051 r->com.from_state = r->com.state;
1052 r->com.to_state = state;
1053 r->com.state = RES_CQ_BUSY;
1054 if (cq)
1055 *cq = r;
1056 }
1057 }
1058
1059 spin_unlock_irq(mlx4_tlock(dev));
1060
1061 return err;
1062}
1063
1064static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1065 enum res_cq_states state, struct res_srq **srq)
1066{
1067 struct mlx4_priv *priv = mlx4_priv(dev);
1068 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1069 struct res_srq *r;
1070 int err = 0;
1071
1072 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1073 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1074 if (!r)
1075 err = -ENOENT;
1076 else if (r->com.owner != slave)
1077 err = -EPERM;
1078 else {
1079 switch (state) {
1080 case RES_SRQ_BUSY:
1081 err = -EINVAL;
1082 break;
1083
1084 case RES_SRQ_ALLOCATED:
1085 if (r->com.state != RES_SRQ_HW)
1086 err = -EINVAL;
1087 else if (atomic_read(&r->ref_count))
1088 err = -EBUSY;
1089 break;
1090
1091 case RES_SRQ_HW:
1092 if (r->com.state != RES_SRQ_ALLOCATED)
1093 err = -EINVAL;
1094 break;
1095
1096 default:
1097 err = -EINVAL;
1098 }
1099
1100 if (!err) {
1101 r->com.from_state = r->com.state;
1102 r->com.to_state = state;
1103 r->com.state = RES_SRQ_BUSY;
1104 if (srq)
1105 *srq = r;
1106 }
1107 }
1108
1109 spin_unlock_irq(mlx4_tlock(dev));
1110
1111 return err;
1112}
1113
1114static void res_abort_move(struct mlx4_dev *dev, int slave,
1115 enum mlx4_resource type, int id)
1116{
1117 struct mlx4_priv *priv = mlx4_priv(dev);
1118 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1119 struct res_common *r;
1120
1121 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1122 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1123 if (r && (r->owner == slave))
1124 r->state = r->from_state;
1125 spin_unlock_irq(mlx4_tlock(dev));
1126}
1127
1128static void res_end_move(struct mlx4_dev *dev, int slave,
1129 enum mlx4_resource type, int id)
1130{
1131 struct mlx4_priv *priv = mlx4_priv(dev);
1132 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1133 struct res_common *r;
1134
1135 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1136 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1137 if (r && (r->owner == slave))
1138 r->state = r->to_state;
1139 spin_unlock_irq(mlx4_tlock(dev));
1140}
1141
1142static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1143{
e2c76824
JM
1144 return mlx4_is_qp_reserved(dev, qpn) &&
1145 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1146}
1147
54679e14
JM
1148static int fw_reserved(struct mlx4_dev *dev, int qpn)
1149{
1150 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1151}
1152
1153static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1154 u64 in_param, u64 *out_param)
1155{
1156 int err;
1157 int count;
1158 int align;
1159 int base;
1160 int qpn;
1161
1162 switch (op) {
1163 case RES_OP_RESERVE:
1164 count = get_param_l(&in_param);
1165 align = get_param_h(&in_param);
1166 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1167 if (err)
1168 return err;
1169
1170 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1171 if (err) {
1172 __mlx4_qp_release_range(dev, base, count);
1173 return err;
1174 }
1175 set_param_l(out_param, base);
1176 break;
1177 case RES_OP_MAP_ICM:
1178 qpn = get_param_l(&in_param) & 0x7fffff;
1179 if (valid_reserved(dev, slave, qpn)) {
1180 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1181 if (err)
1182 return err;
1183 }
1184
1185 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1186 NULL, 1);
1187 if (err)
1188 return err;
1189
54679e14 1190 if (!fw_reserved(dev, qpn)) {
c82e9aa0
EC
1191 err = __mlx4_qp_alloc_icm(dev, qpn);
1192 if (err) {
1193 res_abort_move(dev, slave, RES_QP, qpn);
1194 return err;
1195 }
1196 }
1197
1198 res_end_move(dev, slave, RES_QP, qpn);
1199 break;
1200
1201 default:
1202 err = -EINVAL;
1203 break;
1204 }
1205 return err;
1206}
1207
1208static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1209 u64 in_param, u64 *out_param)
1210{
1211 int err = -EINVAL;
1212 int base;
1213 int order;
1214
1215 if (op != RES_OP_RESERVE_AND_MAP)
1216 return err;
1217
1218 order = get_param_l(&in_param);
1219 base = __mlx4_alloc_mtt_range(dev, order);
1220 if (base == -1)
1221 return -ENOMEM;
1222
1223 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1224 if (err)
1225 __mlx4_free_mtt_range(dev, base, order);
1226 else
1227 set_param_l(out_param, base);
1228
1229 return err;
1230}
1231
1232static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1233 u64 in_param, u64 *out_param)
1234{
1235 int err = -EINVAL;
1236 int index;
1237 int id;
1238 struct res_mpt *mpt;
1239
1240 switch (op) {
1241 case RES_OP_RESERVE:
b20e519a 1242 index = __mlx4_mpt_reserve(dev);
c82e9aa0
EC
1243 if (index == -1)
1244 break;
1245 id = index & mpt_mask(dev);
1246
1247 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1248 if (err) {
b20e519a 1249 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1250 break;
1251 }
1252 set_param_l(out_param, index);
1253 break;
1254 case RES_OP_MAP_ICM:
1255 index = get_param_l(&in_param);
1256 id = index & mpt_mask(dev);
1257 err = mr_res_start_move_to(dev, slave, id,
1258 RES_MPT_MAPPED, &mpt);
1259 if (err)
1260 return err;
1261
b20e519a 1262 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1263 if (err) {
1264 res_abort_move(dev, slave, RES_MPT, id);
1265 return err;
1266 }
1267
1268 res_end_move(dev, slave, RES_MPT, id);
1269 break;
1270 }
1271 return err;
1272}
1273
1274static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275 u64 in_param, u64 *out_param)
1276{
1277 int cqn;
1278 int err;
1279
1280 switch (op) {
1281 case RES_OP_RESERVE_AND_MAP:
1282 err = __mlx4_cq_alloc_icm(dev, &cqn);
1283 if (err)
1284 break;
1285
1286 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1287 if (err) {
1288 __mlx4_cq_free_icm(dev, cqn);
1289 break;
1290 }
1291
1292 set_param_l(out_param, cqn);
1293 break;
1294
1295 default:
1296 err = -EINVAL;
1297 }
1298
1299 return err;
1300}
1301
1302static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1303 u64 in_param, u64 *out_param)
1304{
1305 int srqn;
1306 int err;
1307
1308 switch (op) {
1309 case RES_OP_RESERVE_AND_MAP:
1310 err = __mlx4_srq_alloc_icm(dev, &srqn);
1311 if (err)
1312 break;
1313
1314 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1315 if (err) {
1316 __mlx4_srq_free_icm(dev, srqn);
1317 break;
1318 }
1319
1320 set_param_l(out_param, srqn);
1321 break;
1322
1323 default:
1324 err = -EINVAL;
1325 }
1326
1327 return err;
1328}
1329
1330static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1331{
1332 struct mlx4_priv *priv = mlx4_priv(dev);
1333 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1334 struct mac_res *res;
1335
1336 res = kzalloc(sizeof *res, GFP_KERNEL);
1337 if (!res)
1338 return -ENOMEM;
1339 res->mac = mac;
1340 res->port = (u8) port;
1341 list_add_tail(&res->list,
1342 &tracker->slave_list[slave].res_list[RES_MAC]);
1343 return 0;
1344}
1345
1346static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1347 int port)
1348{
1349 struct mlx4_priv *priv = mlx4_priv(dev);
1350 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1351 struct list_head *mac_list =
1352 &tracker->slave_list[slave].res_list[RES_MAC];
1353 struct mac_res *res, *tmp;
1354
1355 list_for_each_entry_safe(res, tmp, mac_list, list) {
1356 if (res->mac == mac && res->port == (u8) port) {
1357 list_del(&res->list);
1358 kfree(res);
1359 break;
1360 }
1361 }
1362}
1363
1364static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1365{
1366 struct mlx4_priv *priv = mlx4_priv(dev);
1367 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1368 struct list_head *mac_list =
1369 &tracker->slave_list[slave].res_list[RES_MAC];
1370 struct mac_res *res, *tmp;
1371
1372 list_for_each_entry_safe(res, tmp, mac_list, list) {
1373 list_del(&res->list);
1374 __mlx4_unregister_mac(dev, res->port, res->mac);
1375 kfree(res);
1376 }
1377}
1378
1379static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1380 u64 in_param, u64 *out_param)
1381{
1382 int err = -EINVAL;
1383 int port;
1384 u64 mac;
1385
1386 if (op != RES_OP_RESERVE_AND_MAP)
1387 return err;
1388
1389 port = get_param_l(out_param);
1390 mac = in_param;
1391
1392 err = __mlx4_register_mac(dev, port, mac);
1393 if (err >= 0) {
1394 set_param_l(out_param, err);
1395 err = 0;
1396 }
1397
1398 if (!err) {
1399 err = mac_add_to_slave(dev, slave, mac, port);
1400 if (err)
1401 __mlx4_unregister_mac(dev, port, mac);
1402 }
1403 return err;
1404}
1405
ffe455ad
EE
1406static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1407 u64 in_param, u64 *out_param)
1408{
1409 return 0;
1410}
1411
ba062d52
JM
1412static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1413 u64 in_param, u64 *out_param)
1414{
1415 u32 index;
1416 int err;
1417
1418 if (op != RES_OP_RESERVE)
1419 return -EINVAL;
1420
1421 err = __mlx4_counter_alloc(dev, &index);
1422 if (err)
1423 return err;
1424
1425 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1426 if (err)
1427 __mlx4_counter_free(dev, index);
1428 else
1429 set_param_l(out_param, index);
1430
1431 return err;
1432}
1433
1434static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1435 u64 in_param, u64 *out_param)
1436{
1437 u32 xrcdn;
1438 int err;
1439
1440 if (op != RES_OP_RESERVE)
1441 return -EINVAL;
1442
1443 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1444 if (err)
1445 return err;
1446
1447 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1448 if (err)
1449 __mlx4_xrcd_free(dev, xrcdn);
1450 else
1451 set_param_l(out_param, xrcdn);
1452
1453 return err;
1454}
1455
c82e9aa0
EC
1456int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1457 struct mlx4_vhcr *vhcr,
1458 struct mlx4_cmd_mailbox *inbox,
1459 struct mlx4_cmd_mailbox *outbox,
1460 struct mlx4_cmd_info *cmd)
1461{
1462 int err;
1463 int alop = vhcr->op_modifier;
1464
1465 switch (vhcr->in_modifier) {
1466 case RES_QP:
1467 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1468 vhcr->in_param, &vhcr->out_param);
1469 break;
1470
1471 case RES_MTT:
1472 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1473 vhcr->in_param, &vhcr->out_param);
1474 break;
1475
1476 case RES_MPT:
1477 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1478 vhcr->in_param, &vhcr->out_param);
1479 break;
1480
1481 case RES_CQ:
1482 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1483 vhcr->in_param, &vhcr->out_param);
1484 break;
1485
1486 case RES_SRQ:
1487 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1488 vhcr->in_param, &vhcr->out_param);
1489 break;
1490
1491 case RES_MAC:
1492 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1493 vhcr->in_param, &vhcr->out_param);
1494 break;
1495
ffe455ad
EE
1496 case RES_VLAN:
1497 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1498 vhcr->in_param, &vhcr->out_param);
1499 break;
1500
ba062d52
JM
1501 case RES_COUNTER:
1502 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1503 vhcr->in_param, &vhcr->out_param);
1504 break;
1505
1506 case RES_XRCD:
1507 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1508 vhcr->in_param, &vhcr->out_param);
1509 break;
1510
c82e9aa0
EC
1511 default:
1512 err = -EINVAL;
1513 break;
1514 }
1515
1516 return err;
1517}
1518
1519static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1520 u64 in_param)
1521{
1522 int err;
1523 int count;
1524 int base;
1525 int qpn;
1526
1527 switch (op) {
1528 case RES_OP_RESERVE:
1529 base = get_param_l(&in_param) & 0x7fffff;
1530 count = get_param_h(&in_param);
1531 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1532 if (err)
1533 break;
1534 __mlx4_qp_release_range(dev, base, count);
1535 break;
1536 case RES_OP_MAP_ICM:
1537 qpn = get_param_l(&in_param) & 0x7fffff;
1538 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1539 NULL, 0);
1540 if (err)
1541 return err;
1542
54679e14 1543 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
1544 __mlx4_qp_free_icm(dev, qpn);
1545
1546 res_end_move(dev, slave, RES_QP, qpn);
1547
1548 if (valid_reserved(dev, slave, qpn))
1549 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1550 break;
1551 default:
1552 err = -EINVAL;
1553 break;
1554 }
1555 return err;
1556}
1557
1558static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1559 u64 in_param, u64 *out_param)
1560{
1561 int err = -EINVAL;
1562 int base;
1563 int order;
1564
1565 if (op != RES_OP_RESERVE_AND_MAP)
1566 return err;
1567
1568 base = get_param_l(&in_param);
1569 order = get_param_h(&in_param);
1570 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1571 if (!err)
1572 __mlx4_free_mtt_range(dev, base, order);
1573 return err;
1574}
1575
1576static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1577 u64 in_param)
1578{
1579 int err = -EINVAL;
1580 int index;
1581 int id;
1582 struct res_mpt *mpt;
1583
1584 switch (op) {
1585 case RES_OP_RESERVE:
1586 index = get_param_l(&in_param);
1587 id = index & mpt_mask(dev);
1588 err = get_res(dev, slave, id, RES_MPT, &mpt);
1589 if (err)
1590 break;
1591 index = mpt->key;
1592 put_res(dev, slave, id, RES_MPT);
1593
1594 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1595 if (err)
1596 break;
b20e519a 1597 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1598 break;
1599 case RES_OP_MAP_ICM:
1600 index = get_param_l(&in_param);
1601 id = index & mpt_mask(dev);
1602 err = mr_res_start_move_to(dev, slave, id,
1603 RES_MPT_RESERVED, &mpt);
1604 if (err)
1605 return err;
1606
b20e519a 1607 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
1608 res_end_move(dev, slave, RES_MPT, id);
1609 return err;
1610 break;
1611 default:
1612 err = -EINVAL;
1613 break;
1614 }
1615 return err;
1616}
1617
1618static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1619 u64 in_param, u64 *out_param)
1620{
1621 int cqn;
1622 int err;
1623
1624 switch (op) {
1625 case RES_OP_RESERVE_AND_MAP:
1626 cqn = get_param_l(&in_param);
1627 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1628 if (err)
1629 break;
1630
1631 __mlx4_cq_free_icm(dev, cqn);
1632 break;
1633
1634 default:
1635 err = -EINVAL;
1636 break;
1637 }
1638
1639 return err;
1640}
1641
1642static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1643 u64 in_param, u64 *out_param)
1644{
1645 int srqn;
1646 int err;
1647
1648 switch (op) {
1649 case RES_OP_RESERVE_AND_MAP:
1650 srqn = get_param_l(&in_param);
1651 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1652 if (err)
1653 break;
1654
1655 __mlx4_srq_free_icm(dev, srqn);
1656 break;
1657
1658 default:
1659 err = -EINVAL;
1660 break;
1661 }
1662
1663 return err;
1664}
1665
1666static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1667 u64 in_param, u64 *out_param)
1668{
1669 int port;
1670 int err = 0;
1671
1672 switch (op) {
1673 case RES_OP_RESERVE_AND_MAP:
1674 port = get_param_l(out_param);
1675 mac_del_from_slave(dev, slave, in_param, port);
1676 __mlx4_unregister_mac(dev, port, in_param);
1677 break;
1678 default:
1679 err = -EINVAL;
1680 break;
1681 }
1682
1683 return err;
1684
1685}
1686
ffe455ad
EE
1687static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1688 u64 in_param, u64 *out_param)
1689{
1690 return 0;
1691}
1692
ba062d52
JM
1693static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1694 u64 in_param, u64 *out_param)
1695{
1696 int index;
1697 int err;
1698
1699 if (op != RES_OP_RESERVE)
1700 return -EINVAL;
1701
1702 index = get_param_l(&in_param);
1703 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1704 if (err)
1705 return err;
1706
1707 __mlx4_counter_free(dev, index);
1708
1709 return err;
1710}
1711
1712static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1713 u64 in_param, u64 *out_param)
1714{
1715 int xrcdn;
1716 int err;
1717
1718 if (op != RES_OP_RESERVE)
1719 return -EINVAL;
1720
1721 xrcdn = get_param_l(&in_param);
1722 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1723 if (err)
1724 return err;
1725
1726 __mlx4_xrcd_free(dev, xrcdn);
1727
1728 return err;
1729}
1730
c82e9aa0
EC
1731int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1732 struct mlx4_vhcr *vhcr,
1733 struct mlx4_cmd_mailbox *inbox,
1734 struct mlx4_cmd_mailbox *outbox,
1735 struct mlx4_cmd_info *cmd)
1736{
1737 int err = -EINVAL;
1738 int alop = vhcr->op_modifier;
1739
1740 switch (vhcr->in_modifier) {
1741 case RES_QP:
1742 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1743 vhcr->in_param);
1744 break;
1745
1746 case RES_MTT:
1747 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1748 vhcr->in_param, &vhcr->out_param);
1749 break;
1750
1751 case RES_MPT:
1752 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1753 vhcr->in_param);
1754 break;
1755
1756 case RES_CQ:
1757 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1758 vhcr->in_param, &vhcr->out_param);
1759 break;
1760
1761 case RES_SRQ:
1762 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1763 vhcr->in_param, &vhcr->out_param);
1764 break;
1765
1766 case RES_MAC:
1767 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1768 vhcr->in_param, &vhcr->out_param);
1769 break;
1770
ffe455ad
EE
1771 case RES_VLAN:
1772 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1773 vhcr->in_param, &vhcr->out_param);
1774 break;
1775
ba062d52
JM
1776 case RES_COUNTER:
1777 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1778 vhcr->in_param, &vhcr->out_param);
1779 break;
1780
1781 case RES_XRCD:
1782 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1783 vhcr->in_param, &vhcr->out_param);
1784
c82e9aa0
EC
1785 default:
1786 break;
1787 }
1788 return err;
1789}
1790
1791/* ugly but other choices are uglier */
1792static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1793{
1794 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1795}
1796
2b8fb286 1797static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1798{
2b8fb286 1799 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1800}
1801
1802static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1803{
1804 return be32_to_cpu(mpt->mtt_sz);
1805}
1806
cc1ade94
SM
1807static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1808{
1809 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1810}
1811
1812static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1813{
1814 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1815}
1816
1817static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1818{
1819 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1820}
1821
1822static int mr_is_region(struct mlx4_mpt_entry *mpt)
1823{
1824 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1825}
1826
2b8fb286 1827static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1828{
1829 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1830}
1831
2b8fb286 1832static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1833{
1834 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1835}
1836
1837static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1838{
1839 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1840 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1841 int log_sq_sride = qpc->sq_size_stride & 7;
1842 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1843 int log_rq_stride = qpc->rq_size_stride & 7;
1844 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1845 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1846 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1847 int sq_size;
1848 int rq_size;
1849 int total_pages;
1850 int total_mem;
1851 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1852
1853 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1854 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1855 total_mem = sq_size + rq_size;
1856 total_pages =
1857 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1858 page_shift);
1859
1860 return total_pages;
1861}
1862
c82e9aa0
EC
1863static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1864 int size, struct res_mtt *mtt)
1865{
2b8fb286
MA
1866 int res_start = mtt->com.res_id;
1867 int res_size = (1 << mtt->order);
c82e9aa0
EC
1868
1869 if (start < res_start || start + size > res_start + res_size)
1870 return -EPERM;
1871 return 0;
1872}
1873
1874int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1875 struct mlx4_vhcr *vhcr,
1876 struct mlx4_cmd_mailbox *inbox,
1877 struct mlx4_cmd_mailbox *outbox,
1878 struct mlx4_cmd_info *cmd)
1879{
1880 int err;
1881 int index = vhcr->in_modifier;
1882 struct res_mtt *mtt;
1883 struct res_mpt *mpt;
2b8fb286 1884 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1885 int phys;
1886 int id;
cc1ade94
SM
1887 u32 pd;
1888 int pd_slave;
c82e9aa0
EC
1889
1890 id = index & mpt_mask(dev);
1891 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1892 if (err)
1893 return err;
1894
cc1ade94
SM
1895 /* Disable memory windows for VFs. */
1896 if (!mr_is_region(inbox->buf)) {
1897 err = -EPERM;
1898 goto ex_abort;
1899 }
1900
1901 /* Make sure that the PD bits related to the slave id are zeros. */
1902 pd = mr_get_pd(inbox->buf);
1903 pd_slave = (pd >> 17) & 0x7f;
1904 if (pd_slave != 0 && pd_slave != slave) {
1905 err = -EPERM;
1906 goto ex_abort;
1907 }
1908
1909 if (mr_is_fmr(inbox->buf)) {
1910 /* FMR and Bind Enable are forbidden in slave devices. */
1911 if (mr_is_bind_enabled(inbox->buf)) {
1912 err = -EPERM;
1913 goto ex_abort;
1914 }
1915 /* FMR and Memory Windows are also forbidden. */
1916 if (!mr_is_region(inbox->buf)) {
1917 err = -EPERM;
1918 goto ex_abort;
1919 }
1920 }
1921
c82e9aa0
EC
1922 phys = mr_phys_mpt(inbox->buf);
1923 if (!phys) {
2b8fb286 1924 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1925 if (err)
1926 goto ex_abort;
1927
1928 err = check_mtt_range(dev, slave, mtt_base,
1929 mr_get_mtt_size(inbox->buf), mtt);
1930 if (err)
1931 goto ex_put;
1932
1933 mpt->mtt = mtt;
1934 }
1935
c82e9aa0
EC
1936 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1937 if (err)
1938 goto ex_put;
1939
1940 if (!phys) {
1941 atomic_inc(&mtt->ref_count);
1942 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1943 }
1944
1945 res_end_move(dev, slave, RES_MPT, id);
1946 return 0;
1947
1948ex_put:
1949 if (!phys)
1950 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1951ex_abort:
1952 res_abort_move(dev, slave, RES_MPT, id);
1953
1954 return err;
1955}
1956
1957int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1958 struct mlx4_vhcr *vhcr,
1959 struct mlx4_cmd_mailbox *inbox,
1960 struct mlx4_cmd_mailbox *outbox,
1961 struct mlx4_cmd_info *cmd)
1962{
1963 int err;
1964 int index = vhcr->in_modifier;
1965 struct res_mpt *mpt;
1966 int id;
1967
1968 id = index & mpt_mask(dev);
1969 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1970 if (err)
1971 return err;
1972
1973 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1974 if (err)
1975 goto ex_abort;
1976
1977 if (mpt->mtt)
1978 atomic_dec(&mpt->mtt->ref_count);
1979
1980 res_end_move(dev, slave, RES_MPT, id);
1981 return 0;
1982
1983ex_abort:
1984 res_abort_move(dev, slave, RES_MPT, id);
1985
1986 return err;
1987}
1988
1989int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1990 struct mlx4_vhcr *vhcr,
1991 struct mlx4_cmd_mailbox *inbox,
1992 struct mlx4_cmd_mailbox *outbox,
1993 struct mlx4_cmd_info *cmd)
1994{
1995 int err;
1996 int index = vhcr->in_modifier;
1997 struct res_mpt *mpt;
1998 int id;
1999
2000 id = index & mpt_mask(dev);
2001 err = get_res(dev, slave, id, RES_MPT, &mpt);
2002 if (err)
2003 return err;
2004
2005 if (mpt->com.from_state != RES_MPT_HW) {
2006 err = -EBUSY;
2007 goto out;
2008 }
2009
2010 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2011
2012out:
2013 put_res(dev, slave, id, RES_MPT);
2014 return err;
2015}
2016
2017static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2018{
2019 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2020}
2021
2022static int qp_get_scqn(struct mlx4_qp_context *qpc)
2023{
2024 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2025}
2026
2027static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2028{
2029 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2030}
2031
54679e14
JM
2032static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2033 struct mlx4_qp_context *context)
2034{
2035 u32 qpn = vhcr->in_modifier & 0xffffff;
2036 u32 qkey = 0;
2037
2038 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2039 return;
2040
2041 /* adjust qkey in qp context */
2042 context->qkey = cpu_to_be32(qkey);
2043}
2044
c82e9aa0
EC
2045int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2046 struct mlx4_vhcr *vhcr,
2047 struct mlx4_cmd_mailbox *inbox,
2048 struct mlx4_cmd_mailbox *outbox,
2049 struct mlx4_cmd_info *cmd)
2050{
2051 int err;
2052 int qpn = vhcr->in_modifier & 0x7fffff;
2053 struct res_mtt *mtt;
2054 struct res_qp *qp;
2055 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2056 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2057 int mtt_size = qp_get_mtt_size(qpc);
2058 struct res_cq *rcq;
2059 struct res_cq *scq;
2060 int rcqn = qp_get_rcqn(qpc);
2061 int scqn = qp_get_scqn(qpc);
2062 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2063 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2064 struct res_srq *srq;
2065 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2066
2067 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2068 if (err)
2069 return err;
2070 qp->local_qpn = local_qpn;
2071
2b8fb286 2072 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2073 if (err)
2074 goto ex_abort;
2075
2076 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2077 if (err)
2078 goto ex_put_mtt;
2079
c82e9aa0
EC
2080 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2081 if (err)
2082 goto ex_put_mtt;
2083
2084 if (scqn != rcqn) {
2085 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2086 if (err)
2087 goto ex_put_rcq;
2088 } else
2089 scq = rcq;
2090
2091 if (use_srq) {
2092 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2093 if (err)
2094 goto ex_put_scq;
2095 }
2096
54679e14
JM
2097 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2098 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2099 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2100 if (err)
2101 goto ex_put_srq;
2102 atomic_inc(&mtt->ref_count);
2103 qp->mtt = mtt;
2104 atomic_inc(&rcq->ref_count);
2105 qp->rcq = rcq;
2106 atomic_inc(&scq->ref_count);
2107 qp->scq = scq;
2108
2109 if (scqn != rcqn)
2110 put_res(dev, slave, scqn, RES_CQ);
2111
2112 if (use_srq) {
2113 atomic_inc(&srq->ref_count);
2114 put_res(dev, slave, srqn, RES_SRQ);
2115 qp->srq = srq;
2116 }
2117 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2118 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2119 res_end_move(dev, slave, RES_QP, qpn);
2120
2121 return 0;
2122
2123ex_put_srq:
2124 if (use_srq)
2125 put_res(dev, slave, srqn, RES_SRQ);
2126ex_put_scq:
2127 if (scqn != rcqn)
2128 put_res(dev, slave, scqn, RES_CQ);
2129ex_put_rcq:
2130 put_res(dev, slave, rcqn, RES_CQ);
2131ex_put_mtt:
2b8fb286 2132 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2133ex_abort:
2134 res_abort_move(dev, slave, RES_QP, qpn);
2135
2136 return err;
2137}
2138
2b8fb286 2139static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2140{
2141 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2142}
2143
2144static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2145{
2146 int log_eq_size = eqc->log_eq_size & 0x1f;
2147 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2148
2149 if (log_eq_size + 5 < page_shift)
2150 return 1;
2151
2152 return 1 << (log_eq_size + 5 - page_shift);
2153}
2154
2b8fb286 2155static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2156{
2157 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2158}
2159
2160static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2161{
2162 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2163 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2164
2165 if (log_cq_size + 5 < page_shift)
2166 return 1;
2167
2168 return 1 << (log_cq_size + 5 - page_shift);
2169}
2170
2171int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2172 struct mlx4_vhcr *vhcr,
2173 struct mlx4_cmd_mailbox *inbox,
2174 struct mlx4_cmd_mailbox *outbox,
2175 struct mlx4_cmd_info *cmd)
2176{
2177 int err;
2178 int eqn = vhcr->in_modifier;
2179 int res_id = (slave << 8) | eqn;
2180 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2181 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2182 int mtt_size = eq_get_mtt_size(eqc);
2183 struct res_eq *eq;
2184 struct res_mtt *mtt;
2185
2186 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2187 if (err)
2188 return err;
2189 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2190 if (err)
2191 goto out_add;
2192
2b8fb286 2193 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2194 if (err)
2195 goto out_move;
2196
2197 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2198 if (err)
2199 goto out_put;
2200
2201 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2202 if (err)
2203 goto out_put;
2204
2205 atomic_inc(&mtt->ref_count);
2206 eq->mtt = mtt;
2207 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2208 res_end_move(dev, slave, RES_EQ, res_id);
2209 return 0;
2210
2211out_put:
2212 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2213out_move:
2214 res_abort_move(dev, slave, RES_EQ, res_id);
2215out_add:
2216 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2217 return err;
2218}
2219
2220static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2221 int len, struct res_mtt **res)
2222{
2223 struct mlx4_priv *priv = mlx4_priv(dev);
2224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2225 struct res_mtt *mtt;
2226 int err = -EINVAL;
2227
2228 spin_lock_irq(mlx4_tlock(dev));
2229 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2230 com.list) {
2231 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2232 *res = mtt;
2233 mtt->com.from_state = mtt->com.state;
2234 mtt->com.state = RES_MTT_BUSY;
2235 err = 0;
2236 break;
2237 }
2238 }
2239 spin_unlock_irq(mlx4_tlock(dev));
2240
2241 return err;
2242}
2243
54679e14
JM
2244static int verify_qp_parameters(struct mlx4_dev *dev,
2245 struct mlx4_cmd_mailbox *inbox,
2246 enum qp_transition transition, u8 slave)
2247{
2248 u32 qp_type;
2249 struct mlx4_qp_context *qp_ctx;
2250 enum mlx4_qp_optpar optpar;
2251
2252 qp_ctx = inbox->buf + 8;
2253 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2254 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2255
2256 switch (qp_type) {
2257 case MLX4_QP_ST_RC:
2258 case MLX4_QP_ST_UC:
2259 switch (transition) {
2260 case QP_TRANS_INIT2RTR:
2261 case QP_TRANS_RTR2RTS:
2262 case QP_TRANS_RTS2RTS:
2263 case QP_TRANS_SQD2SQD:
2264 case QP_TRANS_SQD2RTS:
2265 if (slave != mlx4_master_func_num(dev))
2266 /* slaves have only gid index 0 */
2267 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2268 if (qp_ctx->pri_path.mgid_index)
2269 return -EINVAL;
2270 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2271 if (qp_ctx->alt_path.mgid_index)
2272 return -EINVAL;
2273 break;
2274 default:
2275 break;
2276 }
2277
2278 break;
2279 default:
2280 break;
2281 }
2282
2283 return 0;
2284}
2285
c82e9aa0
EC
2286int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2287 struct mlx4_vhcr *vhcr,
2288 struct mlx4_cmd_mailbox *inbox,
2289 struct mlx4_cmd_mailbox *outbox,
2290 struct mlx4_cmd_info *cmd)
2291{
2292 struct mlx4_mtt mtt;
2293 __be64 *page_list = inbox->buf;
2294 u64 *pg_list = (u64 *)page_list;
2295 int i;
2296 struct res_mtt *rmtt = NULL;
2297 int start = be64_to_cpu(page_list[0]);
2298 int npages = vhcr->in_modifier;
2299 int err;
2300
2301 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2302 if (err)
2303 return err;
2304
2305 /* Call the SW implementation of write_mtt:
2306 * - Prepare a dummy mtt struct
2307 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2308 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2309 we don't really use it */
c82e9aa0
EC
2310 mtt.order = 0;
2311 mtt.page_shift = 0;
2312 for (i = 0; i < npages; ++i)
2313 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2314
2315 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2316 ((u64 *)page_list + 2));
2317
2318 if (rmtt)
2319 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2320
2321 return err;
2322}
2323
2324int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2325 struct mlx4_vhcr *vhcr,
2326 struct mlx4_cmd_mailbox *inbox,
2327 struct mlx4_cmd_mailbox *outbox,
2328 struct mlx4_cmd_info *cmd)
2329{
2330 int eqn = vhcr->in_modifier;
2331 int res_id = eqn | (slave << 8);
2332 struct res_eq *eq;
2333 int err;
2334
2335 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2336 if (err)
2337 return err;
2338
2339 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2340 if (err)
2341 goto ex_abort;
2342
2343 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2344 if (err)
2345 goto ex_put;
2346
2347 atomic_dec(&eq->mtt->ref_count);
2348 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2349 res_end_move(dev, slave, RES_EQ, res_id);
2350 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2351
2352 return 0;
2353
2354ex_put:
2355 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2356ex_abort:
2357 res_abort_move(dev, slave, RES_EQ, res_id);
2358
2359 return err;
2360}
2361
2362int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2363{
2364 struct mlx4_priv *priv = mlx4_priv(dev);
2365 struct mlx4_slave_event_eq_info *event_eq;
2366 struct mlx4_cmd_mailbox *mailbox;
2367 u32 in_modifier = 0;
2368 int err;
2369 int res_id;
2370 struct res_eq *req;
2371
2372 if (!priv->mfunc.master.slave_state)
2373 return -EINVAL;
2374
803143fb 2375 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2376
2377 /* Create the event only if the slave is registered */
803143fb 2378 if (event_eq->eqn < 0)
c82e9aa0
EC
2379 return 0;
2380
2381 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2382 res_id = (slave << 8) | event_eq->eqn;
2383 err = get_res(dev, slave, res_id, RES_EQ, &req);
2384 if (err)
2385 goto unlock;
2386
2387 if (req->com.from_state != RES_EQ_HW) {
2388 err = -EINVAL;
2389 goto put;
2390 }
2391
2392 mailbox = mlx4_alloc_cmd_mailbox(dev);
2393 if (IS_ERR(mailbox)) {
2394 err = PTR_ERR(mailbox);
2395 goto put;
2396 }
2397
2398 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2399 ++event_eq->token;
2400 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2401 }
2402
2403 memcpy(mailbox->buf, (u8 *) eqe, 28);
2404
2405 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2406
2407 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2408 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2409 MLX4_CMD_NATIVE);
2410
2411 put_res(dev, slave, res_id, RES_EQ);
2412 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2413 mlx4_free_cmd_mailbox(dev, mailbox);
2414 return err;
2415
2416put:
2417 put_res(dev, slave, res_id, RES_EQ);
2418
2419unlock:
2420 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2421 return err;
2422}
2423
2424int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2425 struct mlx4_vhcr *vhcr,
2426 struct mlx4_cmd_mailbox *inbox,
2427 struct mlx4_cmd_mailbox *outbox,
2428 struct mlx4_cmd_info *cmd)
2429{
2430 int eqn = vhcr->in_modifier;
2431 int res_id = eqn | (slave << 8);
2432 struct res_eq *eq;
2433 int err;
2434
2435 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2436 if (err)
2437 return err;
2438
2439 if (eq->com.from_state != RES_EQ_HW) {
2440 err = -EINVAL;
2441 goto ex_put;
2442 }
2443
2444 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2445
2446ex_put:
2447 put_res(dev, slave, res_id, RES_EQ);
2448 return err;
2449}
2450
2451int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2452 struct mlx4_vhcr *vhcr,
2453 struct mlx4_cmd_mailbox *inbox,
2454 struct mlx4_cmd_mailbox *outbox,
2455 struct mlx4_cmd_info *cmd)
2456{
2457 int err;
2458 int cqn = vhcr->in_modifier;
2459 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2460 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2461 struct res_cq *cq;
2462 struct res_mtt *mtt;
2463
2464 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2465 if (err)
2466 return err;
2b8fb286 2467 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2468 if (err)
2469 goto out_move;
2470 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2471 if (err)
2472 goto out_put;
2473 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2474 if (err)
2475 goto out_put;
2476 atomic_inc(&mtt->ref_count);
2477 cq->mtt = mtt;
2478 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2479 res_end_move(dev, slave, RES_CQ, cqn);
2480 return 0;
2481
2482out_put:
2483 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2484out_move:
2485 res_abort_move(dev, slave, RES_CQ, cqn);
2486 return err;
2487}
2488
2489int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2490 struct mlx4_vhcr *vhcr,
2491 struct mlx4_cmd_mailbox *inbox,
2492 struct mlx4_cmd_mailbox *outbox,
2493 struct mlx4_cmd_info *cmd)
2494{
2495 int err;
2496 int cqn = vhcr->in_modifier;
2497 struct res_cq *cq;
2498
2499 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2500 if (err)
2501 return err;
2502 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2503 if (err)
2504 goto out_move;
2505 atomic_dec(&cq->mtt->ref_count);
2506 res_end_move(dev, slave, RES_CQ, cqn);
2507 return 0;
2508
2509out_move:
2510 res_abort_move(dev, slave, RES_CQ, cqn);
2511 return err;
2512}
2513
2514int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2515 struct mlx4_vhcr *vhcr,
2516 struct mlx4_cmd_mailbox *inbox,
2517 struct mlx4_cmd_mailbox *outbox,
2518 struct mlx4_cmd_info *cmd)
2519{
2520 int cqn = vhcr->in_modifier;
2521 struct res_cq *cq;
2522 int err;
2523
2524 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2525 if (err)
2526 return err;
2527
2528 if (cq->com.from_state != RES_CQ_HW)
2529 goto ex_put;
2530
2531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2532ex_put:
2533 put_res(dev, slave, cqn, RES_CQ);
2534
2535 return err;
2536}
2537
2538static int handle_resize(struct mlx4_dev *dev, int slave,
2539 struct mlx4_vhcr *vhcr,
2540 struct mlx4_cmd_mailbox *inbox,
2541 struct mlx4_cmd_mailbox *outbox,
2542 struct mlx4_cmd_info *cmd,
2543 struct res_cq *cq)
2544{
2545 int err;
2546 struct res_mtt *orig_mtt;
2547 struct res_mtt *mtt;
2548 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2549 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2550
2551 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2552 if (err)
2553 return err;
2554
2555 if (orig_mtt != cq->mtt) {
2556 err = -EINVAL;
2557 goto ex_put;
2558 }
2559
2b8fb286 2560 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2561 if (err)
2562 goto ex_put;
2563
2564 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2565 if (err)
2566 goto ex_put1;
2567 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2568 if (err)
2569 goto ex_put1;
2570 atomic_dec(&orig_mtt->ref_count);
2571 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2572 atomic_inc(&mtt->ref_count);
2573 cq->mtt = mtt;
2574 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2575 return 0;
2576
2577ex_put1:
2578 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2579ex_put:
2580 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2581
2582 return err;
2583
2584}
2585
2586int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2587 struct mlx4_vhcr *vhcr,
2588 struct mlx4_cmd_mailbox *inbox,
2589 struct mlx4_cmd_mailbox *outbox,
2590 struct mlx4_cmd_info *cmd)
2591{
2592 int cqn = vhcr->in_modifier;
2593 struct res_cq *cq;
2594 int err;
2595
2596 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2597 if (err)
2598 return err;
2599
2600 if (cq->com.from_state != RES_CQ_HW)
2601 goto ex_put;
2602
2603 if (vhcr->op_modifier == 0) {
2604 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2605 goto ex_put;
c82e9aa0
EC
2606 }
2607
2608 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2609ex_put:
2610 put_res(dev, slave, cqn, RES_CQ);
2611
2612 return err;
2613}
2614
c82e9aa0
EC
2615static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2616{
2617 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2618 int log_rq_stride = srqc->logstride & 7;
2619 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2620
2621 if (log_srq_size + log_rq_stride + 4 < page_shift)
2622 return 1;
2623
2624 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2625}
2626
2627int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2628 struct mlx4_vhcr *vhcr,
2629 struct mlx4_cmd_mailbox *inbox,
2630 struct mlx4_cmd_mailbox *outbox,
2631 struct mlx4_cmd_info *cmd)
2632{
2633 int err;
2634 int srqn = vhcr->in_modifier;
2635 struct res_mtt *mtt;
2636 struct res_srq *srq;
2637 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2638 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2639
2640 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2641 return -EINVAL;
2642
2643 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2644 if (err)
2645 return err;
2b8fb286 2646 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2647 if (err)
2648 goto ex_abort;
2649 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2650 mtt);
2651 if (err)
2652 goto ex_put_mtt;
2653
c82e9aa0
EC
2654 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2655 if (err)
2656 goto ex_put_mtt;
2657
2658 atomic_inc(&mtt->ref_count);
2659 srq->mtt = mtt;
2660 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2661 res_end_move(dev, slave, RES_SRQ, srqn);
2662 return 0;
2663
2664ex_put_mtt:
2665 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2666ex_abort:
2667 res_abort_move(dev, slave, RES_SRQ, srqn);
2668
2669 return err;
2670}
2671
2672int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2673 struct mlx4_vhcr *vhcr,
2674 struct mlx4_cmd_mailbox *inbox,
2675 struct mlx4_cmd_mailbox *outbox,
2676 struct mlx4_cmd_info *cmd)
2677{
2678 int err;
2679 int srqn = vhcr->in_modifier;
2680 struct res_srq *srq;
2681
2682 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2683 if (err)
2684 return err;
2685 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2686 if (err)
2687 goto ex_abort;
2688 atomic_dec(&srq->mtt->ref_count);
2689 if (srq->cq)
2690 atomic_dec(&srq->cq->ref_count);
2691 res_end_move(dev, slave, RES_SRQ, srqn);
2692
2693 return 0;
2694
2695ex_abort:
2696 res_abort_move(dev, slave, RES_SRQ, srqn);
2697
2698 return err;
2699}
2700
2701int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2702 struct mlx4_vhcr *vhcr,
2703 struct mlx4_cmd_mailbox *inbox,
2704 struct mlx4_cmd_mailbox *outbox,
2705 struct mlx4_cmd_info *cmd)
2706{
2707 int err;
2708 int srqn = vhcr->in_modifier;
2709 struct res_srq *srq;
2710
2711 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2712 if (err)
2713 return err;
2714 if (srq->com.from_state != RES_SRQ_HW) {
2715 err = -EBUSY;
2716 goto out;
2717 }
2718 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2719out:
2720 put_res(dev, slave, srqn, RES_SRQ);
2721 return err;
2722}
2723
2724int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2725 struct mlx4_vhcr *vhcr,
2726 struct mlx4_cmd_mailbox *inbox,
2727 struct mlx4_cmd_mailbox *outbox,
2728 struct mlx4_cmd_info *cmd)
2729{
2730 int err;
2731 int srqn = vhcr->in_modifier;
2732 struct res_srq *srq;
2733
2734 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2735 if (err)
2736 return err;
2737
2738 if (srq->com.from_state != RES_SRQ_HW) {
2739 err = -EBUSY;
2740 goto out;
2741 }
2742
2743 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2744out:
2745 put_res(dev, slave, srqn, RES_SRQ);
2746 return err;
2747}
2748
2749int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2750 struct mlx4_vhcr *vhcr,
2751 struct mlx4_cmd_mailbox *inbox,
2752 struct mlx4_cmd_mailbox *outbox,
2753 struct mlx4_cmd_info *cmd)
2754{
2755 int err;
2756 int qpn = vhcr->in_modifier & 0x7fffff;
2757 struct res_qp *qp;
2758
2759 err = get_res(dev, slave, qpn, RES_QP, &qp);
2760 if (err)
2761 return err;
2762 if (qp->com.from_state != RES_QP_HW) {
2763 err = -EBUSY;
2764 goto out;
2765 }
2766
2767 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2768out:
2769 put_res(dev, slave, qpn, RES_QP);
2770 return err;
2771}
2772
54679e14
JM
2773int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2774 struct mlx4_vhcr *vhcr,
2775 struct mlx4_cmd_mailbox *inbox,
2776 struct mlx4_cmd_mailbox *outbox,
2777 struct mlx4_cmd_info *cmd)
2778{
2779 struct mlx4_qp_context *context = inbox->buf + 8;
2780 adjust_proxy_tun_qkey(dev, vhcr, context);
2781 update_pkey_index(dev, slave, inbox);
2782 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2783}
2784
c82e9aa0
EC
2785int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2786 struct mlx4_vhcr *vhcr,
2787 struct mlx4_cmd_mailbox *inbox,
2788 struct mlx4_cmd_mailbox *outbox,
2789 struct mlx4_cmd_info *cmd)
2790{
54679e14 2791 int err;
c82e9aa0
EC
2792 struct mlx4_qp_context *qpc = inbox->buf + 8;
2793
54679e14
JM
2794 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2795 if (err)
2796 return err;
2797
2798 update_pkey_index(dev, slave, inbox);
2799 update_gid(dev, inbox, (u8)slave);
2800 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2801
2802 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2803}
2804
2805int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2806 struct mlx4_vhcr *vhcr,
2807 struct mlx4_cmd_mailbox *inbox,
2808 struct mlx4_cmd_mailbox *outbox,
2809 struct mlx4_cmd_info *cmd)
2810{
2811 int err;
2812 struct mlx4_qp_context *context = inbox->buf + 8;
2813
2814 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2815 if (err)
2816 return err;
2817
2818 update_pkey_index(dev, slave, inbox);
2819 update_gid(dev, inbox, (u8)slave);
2820 adjust_proxy_tun_qkey(dev, vhcr, context);
2821 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2822}
2823
2824int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2825 struct mlx4_vhcr *vhcr,
2826 struct mlx4_cmd_mailbox *inbox,
2827 struct mlx4_cmd_mailbox *outbox,
2828 struct mlx4_cmd_info *cmd)
2829{
2830 int err;
2831 struct mlx4_qp_context *context = inbox->buf + 8;
2832
2833 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2834 if (err)
2835 return err;
2836
2837 update_pkey_index(dev, slave, inbox);
2838 update_gid(dev, inbox, (u8)slave);
2839 adjust_proxy_tun_qkey(dev, vhcr, context);
2840 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2841}
2842
2843
2844int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2845 struct mlx4_vhcr *vhcr,
2846 struct mlx4_cmd_mailbox *inbox,
2847 struct mlx4_cmd_mailbox *outbox,
2848 struct mlx4_cmd_info *cmd)
2849{
2850 struct mlx4_qp_context *context = inbox->buf + 8;
2851 adjust_proxy_tun_qkey(dev, vhcr, context);
2852 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2853}
2854
2855int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2856 struct mlx4_vhcr *vhcr,
2857 struct mlx4_cmd_mailbox *inbox,
2858 struct mlx4_cmd_mailbox *outbox,
2859 struct mlx4_cmd_info *cmd)
2860{
2861 int err;
2862 struct mlx4_qp_context *context = inbox->buf + 8;
2863
2864 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2865 if (err)
2866 return err;
2867
2868 adjust_proxy_tun_qkey(dev, vhcr, context);
2869 update_gid(dev, inbox, (u8)slave);
2870 update_pkey_index(dev, slave, inbox);
2871 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2872}
2873
2874int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2875 struct mlx4_vhcr *vhcr,
2876 struct mlx4_cmd_mailbox *inbox,
2877 struct mlx4_cmd_mailbox *outbox,
2878 struct mlx4_cmd_info *cmd)
2879{
2880 int err;
2881 struct mlx4_qp_context *context = inbox->buf + 8;
2882
2883 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2884 if (err)
2885 return err;
c82e9aa0 2886
54679e14
JM
2887 adjust_proxy_tun_qkey(dev, vhcr, context);
2888 update_gid(dev, inbox, (u8)slave);
2889 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2890 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2891}
2892
2893int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2894 struct mlx4_vhcr *vhcr,
2895 struct mlx4_cmd_mailbox *inbox,
2896 struct mlx4_cmd_mailbox *outbox,
2897 struct mlx4_cmd_info *cmd)
2898{
2899 int err;
2900 int qpn = vhcr->in_modifier & 0x7fffff;
2901 struct res_qp *qp;
2902
2903 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2904 if (err)
2905 return err;
2906 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2907 if (err)
2908 goto ex_abort;
2909
2910 atomic_dec(&qp->mtt->ref_count);
2911 atomic_dec(&qp->rcq->ref_count);
2912 atomic_dec(&qp->scq->ref_count);
2913 if (qp->srq)
2914 atomic_dec(&qp->srq->ref_count);
2915 res_end_move(dev, slave, RES_QP, qpn);
2916 return 0;
2917
2918ex_abort:
2919 res_abort_move(dev, slave, RES_QP, qpn);
2920
2921 return err;
2922}
2923
2924static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2925 struct res_qp *rqp, u8 *gid)
2926{
2927 struct res_gid *res;
2928
2929 list_for_each_entry(res, &rqp->mcg_list, list) {
2930 if (!memcmp(res->gid, gid, 16))
2931 return res;
2932 }
2933 return NULL;
2934}
2935
2936static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 2937 u8 *gid, enum mlx4_protocol prot,
fab1e24a 2938 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
2939{
2940 struct res_gid *res;
2941 int err;
2942
2943 res = kzalloc(sizeof *res, GFP_KERNEL);
2944 if (!res)
2945 return -ENOMEM;
2946
2947 spin_lock_irq(&rqp->mcg_spl);
2948 if (find_gid(dev, slave, rqp, gid)) {
2949 kfree(res);
2950 err = -EEXIST;
2951 } else {
2952 memcpy(res->gid, gid, 16);
2953 res->prot = prot;
9f5b6c63 2954 res->steer = steer;
fab1e24a 2955 res->reg_id = reg_id;
c82e9aa0
EC
2956 list_add_tail(&res->list, &rqp->mcg_list);
2957 err = 0;
2958 }
2959 spin_unlock_irq(&rqp->mcg_spl);
2960
2961 return err;
2962}
2963
2964static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 2965 u8 *gid, enum mlx4_protocol prot,
fab1e24a 2966 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
2967{
2968 struct res_gid *res;
2969 int err;
2970
2971 spin_lock_irq(&rqp->mcg_spl);
2972 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2973 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2974 err = -EINVAL;
2975 else {
fab1e24a 2976 *reg_id = res->reg_id;
c82e9aa0
EC
2977 list_del(&res->list);
2978 kfree(res);
2979 err = 0;
2980 }
2981 spin_unlock_irq(&rqp->mcg_spl);
2982
2983 return err;
2984}
2985
fab1e24a
HHZ
2986static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
2987 int block_loopback, enum mlx4_protocol prot,
2988 enum mlx4_steer_type type, u64 *reg_id)
2989{
2990 switch (dev->caps.steering_mode) {
2991 case MLX4_STEERING_MODE_DEVICE_MANAGED:
2992 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
2993 block_loopback, prot,
2994 reg_id);
2995 case MLX4_STEERING_MODE_B0:
2996 return mlx4_qp_attach_common(dev, qp, gid,
2997 block_loopback, prot, type);
2998 default:
2999 return -EINVAL;
3000 }
3001}
3002
3003static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3004 enum mlx4_protocol prot, enum mlx4_steer_type type,
3005 u64 reg_id)
3006{
3007 switch (dev->caps.steering_mode) {
3008 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3009 return mlx4_flow_detach(dev, reg_id);
3010 case MLX4_STEERING_MODE_B0:
3011 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3012 default:
3013 return -EINVAL;
3014 }
3015}
3016
c82e9aa0
EC
3017int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3018 struct mlx4_vhcr *vhcr,
3019 struct mlx4_cmd_mailbox *inbox,
3020 struct mlx4_cmd_mailbox *outbox,
3021 struct mlx4_cmd_info *cmd)
3022{
3023 struct mlx4_qp qp; /* dummy for calling attach/detach */
3024 u8 *gid = inbox->buf;
3025 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3026 int err;
c82e9aa0
EC
3027 int qpn;
3028 struct res_qp *rqp;
fab1e24a 3029 u64 reg_id = 0;
c82e9aa0
EC
3030 int attach = vhcr->op_modifier;
3031 int block_loopback = vhcr->in_modifier >> 31;
3032 u8 steer_type_mask = 2;
75c6062c 3033 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3034
3035 qpn = vhcr->in_modifier & 0xffffff;
3036 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3037 if (err)
3038 return err;
3039
3040 qp.qpn = qpn;
3041 if (attach) {
fab1e24a
HHZ
3042 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3043 type, &reg_id);
3044 if (err) {
3045 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3046 goto ex_put;
fab1e24a
HHZ
3047 }
3048 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3049 if (err)
fab1e24a 3050 goto ex_detach;
c82e9aa0 3051 } else {
fab1e24a 3052 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3053 if (err)
3054 goto ex_put;
c82e9aa0 3055
fab1e24a
HHZ
3056 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3057 if (err)
3058 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3059 qpn, reg_id);
3060 }
c82e9aa0 3061 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3062 return err;
c82e9aa0 3063
fab1e24a
HHZ
3064ex_detach:
3065 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3066ex_put:
3067 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3068 return err;
3069}
3070
7fb40f87
HHZ
3071/*
3072 * MAC validation for Flow Steering rules.
3073 * VF can attach rules only with a mac address which is assigned to it.
3074 */
3075static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3076 struct list_head *rlist)
3077{
3078 struct mac_res *res, *tmp;
3079 __be64 be_mac;
3080
3081 /* make sure it isn't multicast or broadcast mac*/
3082 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3083 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3084 list_for_each_entry_safe(res, tmp, rlist, list) {
3085 be_mac = cpu_to_be64(res->mac << 16);
3086 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3087 return 0;
3088 }
3089 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3090 eth_header->eth.dst_mac, slave);
3091 return -EINVAL;
3092 }
3093 return 0;
3094}
3095
3096/*
3097 * In case of missing eth header, append eth header with a MAC address
3098 * assigned to the VF.
3099 */
3100static int add_eth_header(struct mlx4_dev *dev, int slave,
3101 struct mlx4_cmd_mailbox *inbox,
3102 struct list_head *rlist, int header_id)
3103{
3104 struct mac_res *res, *tmp;
3105 u8 port;
3106 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3107 struct mlx4_net_trans_rule_hw_eth *eth_header;
3108 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3109 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3110 __be64 be_mac = 0;
3111 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3112
3113 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3114 port = ctrl->port;
7fb40f87
HHZ
3115 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3116
3117 /* Clear a space in the inbox for eth header */
3118 switch (header_id) {
3119 case MLX4_NET_TRANS_RULE_ID_IPV4:
3120 ip_header =
3121 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3122 memmove(ip_header, eth_header,
3123 sizeof(*ip_header) + sizeof(*l4_header));
3124 break;
3125 case MLX4_NET_TRANS_RULE_ID_TCP:
3126 case MLX4_NET_TRANS_RULE_ID_UDP:
3127 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3128 (eth_header + 1);
3129 memmove(l4_header, eth_header, sizeof(*l4_header));
3130 break;
3131 default:
3132 return -EINVAL;
3133 }
3134 list_for_each_entry_safe(res, tmp, rlist, list) {
3135 if (port == res->port) {
3136 be_mac = cpu_to_be64(res->mac << 16);
3137 break;
3138 }
3139 }
3140 if (!be_mac) {
3141 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3142 port);
3143 return -EINVAL;
3144 }
3145
3146 memset(eth_header, 0, sizeof(*eth_header));
3147 eth_header->size = sizeof(*eth_header) >> 2;
3148 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3149 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3150 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3151
3152 return 0;
3153
3154}
3155
8fcfb4db
HHZ
3156int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3157 struct mlx4_vhcr *vhcr,
3158 struct mlx4_cmd_mailbox *inbox,
3159 struct mlx4_cmd_mailbox *outbox,
3160 struct mlx4_cmd_info *cmd)
3161{
7fb40f87
HHZ
3162
3163 struct mlx4_priv *priv = mlx4_priv(dev);
3164 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3165 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 3166 int err;
a9c01e7a 3167 int qpn;
2c473ae7 3168 struct res_qp *rqp;
7fb40f87
HHZ
3169 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3170 struct _rule_hw *rule_header;
3171 int header_id;
1b9c6b06 3172
0ff1fb65
HHZ
3173 if (dev->caps.steering_mode !=
3174 MLX4_STEERING_MODE_DEVICE_MANAGED)
3175 return -EOPNOTSUPP;
1b9c6b06 3176
7fb40f87 3177 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
a9c01e7a 3178 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 3179 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a
HHZ
3180 if (err) {
3181 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3182 return err;
3183 }
7fb40f87
HHZ
3184 rule_header = (struct _rule_hw *)(ctrl + 1);
3185 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3186
3187 switch (header_id) {
3188 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
3189 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3190 err = -EINVAL;
3191 goto err_put;
3192 }
7fb40f87 3193 break;
60396683
JM
3194 case MLX4_NET_TRANS_RULE_ID_IB:
3195 break;
7fb40f87
HHZ
3196 case MLX4_NET_TRANS_RULE_ID_IPV4:
3197 case MLX4_NET_TRANS_RULE_ID_TCP:
3198 case MLX4_NET_TRANS_RULE_ID_UDP:
3199 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
a9c01e7a
HHZ
3200 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3201 err = -EINVAL;
3202 goto err_put;
3203 }
7fb40f87
HHZ
3204 vhcr->in_modifier +=
3205 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3206 break;
3207 default:
3208 pr_err("Corrupted mailbox.\n");
a9c01e7a
HHZ
3209 err = -EINVAL;
3210 goto err_put;
7fb40f87
HHZ
3211 }
3212
1b9c6b06
HHZ
3213 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3214 vhcr->in_modifier, 0,
3215 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3216 MLX4_CMD_NATIVE);
3217 if (err)
a9c01e7a 3218 goto err_put;
1b9c6b06 3219
2c473ae7 3220 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06
HHZ
3221 if (err) {
3222 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3223 /* detach rule*/
3224 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 3225 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 3226 MLX4_CMD_NATIVE);
2c473ae7 3227 goto err_put;
1b9c6b06 3228 }
2c473ae7 3229 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
3230err_put:
3231 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 3232 return err;
8fcfb4db
HHZ
3233}
3234
3235int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3236 struct mlx4_vhcr *vhcr,
3237 struct mlx4_cmd_mailbox *inbox,
3238 struct mlx4_cmd_mailbox *outbox,
3239 struct mlx4_cmd_info *cmd)
3240{
1b9c6b06 3241 int err;
2c473ae7
HHZ
3242 struct res_qp *rqp;
3243 struct res_fs_rule *rrule;
1b9c6b06 3244
0ff1fb65
HHZ
3245 if (dev->caps.steering_mode !=
3246 MLX4_STEERING_MODE_DEVICE_MANAGED)
3247 return -EOPNOTSUPP;
1b9c6b06 3248
2c473ae7
HHZ
3249 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3250 if (err)
3251 return err;
3252 /* Release the rule form busy state before removal */
3253 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3254 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3255 if (err)
3256 return err;
3257
1b9c6b06
HHZ
3258 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3259 if (err) {
3260 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2c473ae7 3261 goto out;
1b9c6b06
HHZ
3262 }
3263
3264 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3265 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3266 MLX4_CMD_NATIVE);
2c473ae7
HHZ
3267 if (!err)
3268 atomic_dec(&rqp->ref_count);
3269out:
3270 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 3271 return err;
8fcfb4db
HHZ
3272}
3273
c82e9aa0
EC
3274enum {
3275 BUSY_MAX_RETRIES = 10
3276};
3277
3278int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3279 struct mlx4_vhcr *vhcr,
3280 struct mlx4_cmd_mailbox *inbox,
3281 struct mlx4_cmd_mailbox *outbox,
3282 struct mlx4_cmd_info *cmd)
3283{
3284 int err;
3285 int index = vhcr->in_modifier & 0xffff;
3286
3287 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3288 if (err)
3289 return err;
3290
3291 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3292 put_res(dev, slave, index, RES_COUNTER);
3293 return err;
3294}
3295
3296static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3297{
3298 struct res_gid *rgid;
3299 struct res_gid *tmp;
c82e9aa0
EC
3300 struct mlx4_qp qp; /* dummy for calling attach/detach */
3301
3302 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
3303 switch (dev->caps.steering_mode) {
3304 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3305 mlx4_flow_detach(dev, rgid->reg_id);
3306 break;
3307 case MLX4_STEERING_MODE_B0:
3308 qp.qpn = rqp->local_qpn;
3309 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3310 rgid->prot, rgid->steer);
3311 break;
3312 }
c82e9aa0
EC
3313 list_del(&rgid->list);
3314 kfree(rgid);
3315 }
3316}
3317
3318static int _move_all_busy(struct mlx4_dev *dev, int slave,
3319 enum mlx4_resource type, int print)
3320{
3321 struct mlx4_priv *priv = mlx4_priv(dev);
3322 struct mlx4_resource_tracker *tracker =
3323 &priv->mfunc.master.res_tracker;
3324 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3325 struct res_common *r;
3326 struct res_common *tmp;
3327 int busy;
3328
3329 busy = 0;
3330 spin_lock_irq(mlx4_tlock(dev));
3331 list_for_each_entry_safe(r, tmp, rlist, list) {
3332 if (r->owner == slave) {
3333 if (!r->removing) {
3334 if (r->state == RES_ANY_BUSY) {
3335 if (print)
3336 mlx4_dbg(dev,
aa1ec3dd 3337 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3338 ResourceType(type),
3339 r->res_id);
3340 ++busy;
3341 } else {
3342 r->from_state = r->state;
3343 r->state = RES_ANY_BUSY;
3344 r->removing = 1;
3345 }
3346 }
3347 }
3348 }
3349 spin_unlock_irq(mlx4_tlock(dev));
3350
3351 return busy;
3352}
3353
3354static int move_all_busy(struct mlx4_dev *dev, int slave,
3355 enum mlx4_resource type)
3356{
3357 unsigned long begin;
3358 int busy;
3359
3360 begin = jiffies;
3361 do {
3362 busy = _move_all_busy(dev, slave, type, 0);
3363 if (time_after(jiffies, begin + 5 * HZ))
3364 break;
3365 if (busy)
3366 cond_resched();
3367 } while (busy);
3368
3369 if (busy)
3370 busy = _move_all_busy(dev, slave, type, 1);
3371
3372 return busy;
3373}
3374static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3375{
3376 struct mlx4_priv *priv = mlx4_priv(dev);
3377 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3378 struct list_head *qp_list =
3379 &tracker->slave_list[slave].res_list[RES_QP];
3380 struct res_qp *qp;
3381 struct res_qp *tmp;
3382 int state;
3383 u64 in_param;
3384 int qpn;
3385 int err;
3386
3387 err = move_all_busy(dev, slave, RES_QP);
3388 if (err)
3389 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3390 "for slave %d\n", slave);
3391
3392 spin_lock_irq(mlx4_tlock(dev));
3393 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3394 spin_unlock_irq(mlx4_tlock(dev));
3395 if (qp->com.owner == slave) {
3396 qpn = qp->com.res_id;
3397 detach_qp(dev, slave, qp);
3398 state = qp->com.from_state;
3399 while (state != 0) {
3400 switch (state) {
3401 case RES_QP_RESERVED:
3402 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3403 rb_erase(&qp->com.node,
3404 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3405 list_del(&qp->com.list);
3406 spin_unlock_irq(mlx4_tlock(dev));
3407 kfree(qp);
3408 state = 0;
3409 break;
3410 case RES_QP_MAPPED:
3411 if (!valid_reserved(dev, slave, qpn))
3412 __mlx4_qp_free_icm(dev, qpn);
3413 state = RES_QP_RESERVED;
3414 break;
3415 case RES_QP_HW:
3416 in_param = slave;
3417 err = mlx4_cmd(dev, in_param,
3418 qp->local_qpn, 2,
3419 MLX4_CMD_2RST_QP,
3420 MLX4_CMD_TIME_CLASS_A,
3421 MLX4_CMD_NATIVE);
3422 if (err)
3423 mlx4_dbg(dev, "rem_slave_qps: failed"
3424 " to move slave %d qpn %d to"
3425 " reset\n", slave,
3426 qp->local_qpn);
3427 atomic_dec(&qp->rcq->ref_count);
3428 atomic_dec(&qp->scq->ref_count);
3429 atomic_dec(&qp->mtt->ref_count);
3430 if (qp->srq)
3431 atomic_dec(&qp->srq->ref_count);
3432 state = RES_QP_MAPPED;
3433 break;
3434 default:
3435 state = 0;
3436 }
3437 }
3438 }
3439 spin_lock_irq(mlx4_tlock(dev));
3440 }
3441 spin_unlock_irq(mlx4_tlock(dev));
3442}
3443
3444static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3445{
3446 struct mlx4_priv *priv = mlx4_priv(dev);
3447 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3448 struct list_head *srq_list =
3449 &tracker->slave_list[slave].res_list[RES_SRQ];
3450 struct res_srq *srq;
3451 struct res_srq *tmp;
3452 int state;
3453 u64 in_param;
3454 LIST_HEAD(tlist);
3455 int srqn;
3456 int err;
3457
3458 err = move_all_busy(dev, slave, RES_SRQ);
3459 if (err)
3460 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3461 "busy for slave %d\n", slave);
3462
3463 spin_lock_irq(mlx4_tlock(dev));
3464 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3465 spin_unlock_irq(mlx4_tlock(dev));
3466 if (srq->com.owner == slave) {
3467 srqn = srq->com.res_id;
3468 state = srq->com.from_state;
3469 while (state != 0) {
3470 switch (state) {
3471 case RES_SRQ_ALLOCATED:
3472 __mlx4_srq_free_icm(dev, srqn);
3473 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3474 rb_erase(&srq->com.node,
3475 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3476 list_del(&srq->com.list);
3477 spin_unlock_irq(mlx4_tlock(dev));
3478 kfree(srq);
3479 state = 0;
3480 break;
3481
3482 case RES_SRQ_HW:
3483 in_param = slave;
3484 err = mlx4_cmd(dev, in_param, srqn, 1,
3485 MLX4_CMD_HW2SW_SRQ,
3486 MLX4_CMD_TIME_CLASS_A,
3487 MLX4_CMD_NATIVE);
3488 if (err)
3489 mlx4_dbg(dev, "rem_slave_srqs: failed"
3490 " to move slave %d srq %d to"
3491 " SW ownership\n",
3492 slave, srqn);
3493
3494 atomic_dec(&srq->mtt->ref_count);
3495 if (srq->cq)
3496 atomic_dec(&srq->cq->ref_count);
3497 state = RES_SRQ_ALLOCATED;
3498 break;
3499
3500 default:
3501 state = 0;
3502 }
3503 }
3504 }
3505 spin_lock_irq(mlx4_tlock(dev));
3506 }
3507 spin_unlock_irq(mlx4_tlock(dev));
3508}
3509
3510static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3511{
3512 struct mlx4_priv *priv = mlx4_priv(dev);
3513 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3514 struct list_head *cq_list =
3515 &tracker->slave_list[slave].res_list[RES_CQ];
3516 struct res_cq *cq;
3517 struct res_cq *tmp;
3518 int state;
3519 u64 in_param;
3520 LIST_HEAD(tlist);
3521 int cqn;
3522 int err;
3523
3524 err = move_all_busy(dev, slave, RES_CQ);
3525 if (err)
3526 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3527 "busy for slave %d\n", slave);
3528
3529 spin_lock_irq(mlx4_tlock(dev));
3530 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3531 spin_unlock_irq(mlx4_tlock(dev));
3532 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3533 cqn = cq->com.res_id;
3534 state = cq->com.from_state;
3535 while (state != 0) {
3536 switch (state) {
3537 case RES_CQ_ALLOCATED:
3538 __mlx4_cq_free_icm(dev, cqn);
3539 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3540 rb_erase(&cq->com.node,
3541 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3542 list_del(&cq->com.list);
3543 spin_unlock_irq(mlx4_tlock(dev));
3544 kfree(cq);
3545 state = 0;
3546 break;
3547
3548 case RES_CQ_HW:
3549 in_param = slave;
3550 err = mlx4_cmd(dev, in_param, cqn, 1,
3551 MLX4_CMD_HW2SW_CQ,
3552 MLX4_CMD_TIME_CLASS_A,
3553 MLX4_CMD_NATIVE);
3554 if (err)
3555 mlx4_dbg(dev, "rem_slave_cqs: failed"
3556 " to move slave %d cq %d to"
3557 " SW ownership\n",
3558 slave, cqn);
3559 atomic_dec(&cq->mtt->ref_count);
3560 state = RES_CQ_ALLOCATED;
3561 break;
3562
3563 default:
3564 state = 0;
3565 }
3566 }
3567 }
3568 spin_lock_irq(mlx4_tlock(dev));
3569 }
3570 spin_unlock_irq(mlx4_tlock(dev));
3571}
3572
3573static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3574{
3575 struct mlx4_priv *priv = mlx4_priv(dev);
3576 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3577 struct list_head *mpt_list =
3578 &tracker->slave_list[slave].res_list[RES_MPT];
3579 struct res_mpt *mpt;
3580 struct res_mpt *tmp;
3581 int state;
3582 u64 in_param;
3583 LIST_HEAD(tlist);
3584 int mptn;
3585 int err;
3586
3587 err = move_all_busy(dev, slave, RES_MPT);
3588 if (err)
3589 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3590 "busy for slave %d\n", slave);
3591
3592 spin_lock_irq(mlx4_tlock(dev));
3593 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3594 spin_unlock_irq(mlx4_tlock(dev));
3595 if (mpt->com.owner == slave) {
3596 mptn = mpt->com.res_id;
3597 state = mpt->com.from_state;
3598 while (state != 0) {
3599 switch (state) {
3600 case RES_MPT_RESERVED:
b20e519a 3601 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 3602 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3603 rb_erase(&mpt->com.node,
3604 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3605 list_del(&mpt->com.list);
3606 spin_unlock_irq(mlx4_tlock(dev));
3607 kfree(mpt);
3608 state = 0;
3609 break;
3610
3611 case RES_MPT_MAPPED:
b20e519a 3612 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
3613 state = RES_MPT_RESERVED;
3614 break;
3615
3616 case RES_MPT_HW:
3617 in_param = slave;
3618 err = mlx4_cmd(dev, in_param, mptn, 0,
3619 MLX4_CMD_HW2SW_MPT,
3620 MLX4_CMD_TIME_CLASS_A,
3621 MLX4_CMD_NATIVE);
3622 if (err)
3623 mlx4_dbg(dev, "rem_slave_mrs: failed"
3624 " to move slave %d mpt %d to"
3625 " SW ownership\n",
3626 slave, mptn);
3627 if (mpt->mtt)
3628 atomic_dec(&mpt->mtt->ref_count);
3629 state = RES_MPT_MAPPED;
3630 break;
3631 default:
3632 state = 0;
3633 }
3634 }
3635 }
3636 spin_lock_irq(mlx4_tlock(dev));
3637 }
3638 spin_unlock_irq(mlx4_tlock(dev));
3639}
3640
3641static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3642{
3643 struct mlx4_priv *priv = mlx4_priv(dev);
3644 struct mlx4_resource_tracker *tracker =
3645 &priv->mfunc.master.res_tracker;
3646 struct list_head *mtt_list =
3647 &tracker->slave_list[slave].res_list[RES_MTT];
3648 struct res_mtt *mtt;
3649 struct res_mtt *tmp;
3650 int state;
3651 LIST_HEAD(tlist);
3652 int base;
3653 int err;
3654
3655 err = move_all_busy(dev, slave, RES_MTT);
3656 if (err)
3657 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3658 "busy for slave %d\n", slave);
3659
3660 spin_lock_irq(mlx4_tlock(dev));
3661 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3662 spin_unlock_irq(mlx4_tlock(dev));
3663 if (mtt->com.owner == slave) {
3664 base = mtt->com.res_id;
3665 state = mtt->com.from_state;
3666 while (state != 0) {
3667 switch (state) {
3668 case RES_MTT_ALLOCATED:
3669 __mlx4_free_mtt_range(dev, base,
3670 mtt->order);
3671 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3672 rb_erase(&mtt->com.node,
3673 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3674 list_del(&mtt->com.list);
3675 spin_unlock_irq(mlx4_tlock(dev));
3676 kfree(mtt);
3677 state = 0;
3678 break;
3679
3680 default:
3681 state = 0;
3682 }
3683 }
3684 }
3685 spin_lock_irq(mlx4_tlock(dev));
3686 }
3687 spin_unlock_irq(mlx4_tlock(dev));
3688}
3689
1b9c6b06
HHZ
3690static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3691{
3692 struct mlx4_priv *priv = mlx4_priv(dev);
3693 struct mlx4_resource_tracker *tracker =
3694 &priv->mfunc.master.res_tracker;
3695 struct list_head *fs_rule_list =
3696 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3697 struct res_fs_rule *fs_rule;
3698 struct res_fs_rule *tmp;
3699 int state;
3700 u64 base;
3701 int err;
3702
3703 err = move_all_busy(dev, slave, RES_FS_RULE);
3704 if (err)
3705 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3706 slave);
3707
3708 spin_lock_irq(mlx4_tlock(dev));
3709 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3710 spin_unlock_irq(mlx4_tlock(dev));
3711 if (fs_rule->com.owner == slave) {
3712 base = fs_rule->com.res_id;
3713 state = fs_rule->com.from_state;
3714 while (state != 0) {
3715 switch (state) {
3716 case RES_FS_RULE_ALLOCATED:
3717 /* detach rule */
3718 err = mlx4_cmd(dev, base, 0, 0,
3719 MLX4_QP_FLOW_STEERING_DETACH,
3720 MLX4_CMD_TIME_CLASS_A,
3721 MLX4_CMD_NATIVE);
3722
3723 spin_lock_irq(mlx4_tlock(dev));
3724 rb_erase(&fs_rule->com.node,
3725 &tracker->res_tree[RES_FS_RULE]);
3726 list_del(&fs_rule->com.list);
3727 spin_unlock_irq(mlx4_tlock(dev));
3728 kfree(fs_rule);
3729 state = 0;
3730 break;
3731
3732 default:
3733 state = 0;
3734 }
3735 }
3736 }
3737 spin_lock_irq(mlx4_tlock(dev));
3738 }
3739 spin_unlock_irq(mlx4_tlock(dev));
3740}
3741
c82e9aa0
EC
3742static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3743{
3744 struct mlx4_priv *priv = mlx4_priv(dev);
3745 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3746 struct list_head *eq_list =
3747 &tracker->slave_list[slave].res_list[RES_EQ];
3748 struct res_eq *eq;
3749 struct res_eq *tmp;
3750 int err;
3751 int state;
3752 LIST_HEAD(tlist);
3753 int eqn;
3754 struct mlx4_cmd_mailbox *mailbox;
3755
3756 err = move_all_busy(dev, slave, RES_EQ);
3757 if (err)
3758 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3759 "busy for slave %d\n", slave);
3760
3761 spin_lock_irq(mlx4_tlock(dev));
3762 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3763 spin_unlock_irq(mlx4_tlock(dev));
3764 if (eq->com.owner == slave) {
3765 eqn = eq->com.res_id;
3766 state = eq->com.from_state;
3767 while (state != 0) {
3768 switch (state) {
3769 case RES_EQ_RESERVED:
3770 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3771 rb_erase(&eq->com.node,
3772 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3773 list_del(&eq->com.list);
3774 spin_unlock_irq(mlx4_tlock(dev));
3775 kfree(eq);
3776 state = 0;
3777 break;
3778
3779 case RES_EQ_HW:
3780 mailbox = mlx4_alloc_cmd_mailbox(dev);
3781 if (IS_ERR(mailbox)) {
3782 cond_resched();
3783 continue;
3784 }
3785 err = mlx4_cmd_box(dev, slave, 0,
3786 eqn & 0xff, 0,
3787 MLX4_CMD_HW2SW_EQ,
3788 MLX4_CMD_TIME_CLASS_A,
3789 MLX4_CMD_NATIVE);
eb71d0d6
JM
3790 if (err)
3791 mlx4_dbg(dev, "rem_slave_eqs: failed"
3792 " to move slave %d eqs %d to"
3793 " SW ownership\n", slave, eqn);
c82e9aa0 3794 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3795 atomic_dec(&eq->mtt->ref_count);
3796 state = RES_EQ_RESERVED;
c82e9aa0
EC
3797 break;
3798
3799 default:
3800 state = 0;
3801 }
3802 }
3803 }
3804 spin_lock_irq(mlx4_tlock(dev));
3805 }
3806 spin_unlock_irq(mlx4_tlock(dev));
3807}
3808
ba062d52
JM
3809static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3810{
3811 struct mlx4_priv *priv = mlx4_priv(dev);
3812 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3813 struct list_head *counter_list =
3814 &tracker->slave_list[slave].res_list[RES_COUNTER];
3815 struct res_counter *counter;
3816 struct res_counter *tmp;
3817 int err;
3818 int index;
3819
3820 err = move_all_busy(dev, slave, RES_COUNTER);
3821 if (err)
3822 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3823 "busy for slave %d\n", slave);
3824
3825 spin_lock_irq(mlx4_tlock(dev));
3826 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3827 if (counter->com.owner == slave) {
3828 index = counter->com.res_id;
4af1c048
HHZ
3829 rb_erase(&counter->com.node,
3830 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3831 list_del(&counter->com.list);
3832 kfree(counter);
3833 __mlx4_counter_free(dev, index);
3834 }
3835 }
3836 spin_unlock_irq(mlx4_tlock(dev));
3837}
3838
3839static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3840{
3841 struct mlx4_priv *priv = mlx4_priv(dev);
3842 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3843 struct list_head *xrcdn_list =
3844 &tracker->slave_list[slave].res_list[RES_XRCD];
3845 struct res_xrcdn *xrcd;
3846 struct res_xrcdn *tmp;
3847 int err;
3848 int xrcdn;
3849
3850 err = move_all_busy(dev, slave, RES_XRCD);
3851 if (err)
3852 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3853 "busy for slave %d\n", slave);
3854
3855 spin_lock_irq(mlx4_tlock(dev));
3856 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3857 if (xrcd->com.owner == slave) {
3858 xrcdn = xrcd->com.res_id;
4af1c048 3859 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3860 list_del(&xrcd->com.list);
3861 kfree(xrcd);
3862 __mlx4_xrcd_free(dev, xrcdn);
3863 }
3864 }
3865 spin_unlock_irq(mlx4_tlock(dev));
3866}
3867
c82e9aa0
EC
3868void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3869{
3870 struct mlx4_priv *priv = mlx4_priv(dev);
3871
3872 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3873 /*VLAN*/
3874 rem_slave_macs(dev, slave);
80cb0021 3875 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
3876 rem_slave_qps(dev, slave);
3877 rem_slave_srqs(dev, slave);
3878 rem_slave_cqs(dev, slave);
3879 rem_slave_mrs(dev, slave);
3880 rem_slave_eqs(dev, slave);
3881 rem_slave_mtts(dev, slave);
ba062d52
JM
3882 rem_slave_counters(dev, slave);
3883 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
3884 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3885}
This page took 0.296192 seconds and 5 git commands to generate.