IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
4af1c048 60 struct rb_node node;
aa1ec3dd 61 u64 res_id;
c82e9aa0
EC
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67};
68
69enum {
70 RES_ANY_BUSY = 1
71};
72
73struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
9f5b6c63 77 enum mlx4_steer_type steer;
c82e9aa0
EC
78};
79
80enum res_qp_states {
81 RES_QP_BUSY = RES_ANY_BUSY,
82
83 /* QP number was allocated */
84 RES_QP_RESERVED,
85
86 /* ICM memory for QP context was mapped */
87 RES_QP_MAPPED,
88
89 /* QP is in hw ownership */
90 RES_QP_HW
91};
92
c82e9aa0
EC
93struct res_qp {
94 struct res_common com;
95 struct res_mtt *mtt;
96 struct res_cq *rcq;
97 struct res_cq *scq;
98 struct res_srq *srq;
99 struct list_head mcg_list;
100 spinlock_t mcg_spl;
101 int local_qpn;
102};
103
104enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
106 RES_MTT_ALLOCATED,
107};
108
109static inline const char *mtt_states_str(enum res_mtt_states state)
110{
111 switch (state) {
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
115 }
116}
117
118struct res_mtt {
119 struct res_common com;
120 int order;
121 atomic_t ref_count;
122};
123
124enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
126 RES_MPT_RESERVED,
127 RES_MPT_MAPPED,
128 RES_MPT_HW,
129};
130
131struct res_mpt {
132 struct res_common com;
133 struct res_mtt *mtt;
134 int key;
135};
136
137enum res_eq_states {
138 RES_EQ_BUSY = RES_ANY_BUSY,
139 RES_EQ_RESERVED,
140 RES_EQ_HW,
141};
142
143struct res_eq {
144 struct res_common com;
145 struct res_mtt *mtt;
146};
147
148enum res_cq_states {
149 RES_CQ_BUSY = RES_ANY_BUSY,
150 RES_CQ_ALLOCATED,
151 RES_CQ_HW,
152};
153
154struct res_cq {
155 struct res_common com;
156 struct res_mtt *mtt;
157 atomic_t ref_count;
158};
159
160enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
162 RES_SRQ_ALLOCATED,
163 RES_SRQ_HW,
164};
165
c82e9aa0
EC
166struct res_srq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 struct res_cq *cq;
170 atomic_t ref_count;
171};
172
173enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
176};
177
c82e9aa0
EC
178struct res_counter {
179 struct res_common com;
180 int port;
181};
182
ba062d52
JM
183enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
185 RES_XRCD_ALLOCATED,
186};
187
188struct res_xrcdn {
189 struct res_common com;
190 int port;
191};
192
1b9c6b06
HHZ
193enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
196};
197
198struct res_fs_rule {
199 struct res_common com;
200};
201
4af1c048
HHZ
202static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203{
204 struct rb_node *node = root->rb_node;
205
206 while (node) {
207 struct res_common *res = container_of(node, struct res_common,
208 node);
209
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
214 else
215 return res;
216 }
217 return NULL;
218}
219
220static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221{
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224 /* Figure out where to put new node */
225 while (*new) {
226 struct res_common *this = container_of(*new, struct res_common,
227 node);
228
229 parent = *new;
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
234 else
235 return -EEXIST;
236 }
237
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
241
242 return 0;
243}
244
c82e9aa0
EC
245/* For Debug uses */
246static const char *ResourceType(enum mlx4_resource rt)
247{
248 switch (rt) {
249 case RES_QP: return "RES_QP";
250 case RES_CQ: return "RES_CQ";
251 case RES_SRQ: return "RES_SRQ";
252 case RES_MPT: return "RES_MPT";
253 case RES_MTT: return "RES_MTT";
254 case RES_MAC: return "RES_MAC";
255 case RES_EQ: return "RES_EQ";
256 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 257 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 258 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
259 default: return "Unknown resource type !!!";
260 };
261}
262
c82e9aa0
EC
263int mlx4_init_resource_tracker(struct mlx4_dev *dev)
264{
265 struct mlx4_priv *priv = mlx4_priv(dev);
266 int i;
267 int t;
268
269 priv->mfunc.master.res_tracker.slave_list =
270 kzalloc(dev->num_slaves * sizeof(struct slave_list),
271 GFP_KERNEL);
272 if (!priv->mfunc.master.res_tracker.slave_list)
273 return -ENOMEM;
274
275 for (i = 0 ; i < dev->num_slaves; i++) {
276 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
277 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
278 slave_list[i].res_list[t]);
279 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
280 }
281
282 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
283 dev->num_slaves);
284 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 285 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
286
287 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
288 return 0 ;
289}
290
b8924951
JM
291void mlx4_free_resource_tracker(struct mlx4_dev *dev,
292 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
293{
294 struct mlx4_priv *priv = mlx4_priv(dev);
295 int i;
296
297 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
298 if (type != RES_TR_FREE_STRUCTS_ONLY)
299 for (i = 0 ; i < dev->num_slaves; i++)
300 if (type == RES_TR_FREE_ALL ||
301 dev->caps.function != i)
302 mlx4_delete_all_resources_for_slave(dev, i);
303
304 if (type != RES_TR_FREE_SLAVES_ONLY) {
305 kfree(priv->mfunc.master.res_tracker.slave_list);
306 priv->mfunc.master.res_tracker.slave_list = NULL;
307 }
c82e9aa0
EC
308 }
309}
310
311static void update_ud_gid(struct mlx4_dev *dev,
312 struct mlx4_qp_context *qp_ctx, u8 slave)
313{
314 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
315
316 if (MLX4_QP_ST_UD == ts)
317 qp_ctx->pri_path.mgid_index = 0x80 | slave;
318
319 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
320 slave, qp_ctx->pri_path.mgid_index);
321}
322
323static int mpt_mask(struct mlx4_dev *dev)
324{
325 return dev->caps.num_mpts - 1;
326}
327
328static void *find_res(struct mlx4_dev *dev, int res_id,
329 enum mlx4_resource type)
330{
331 struct mlx4_priv *priv = mlx4_priv(dev);
332
4af1c048
HHZ
333 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
334 res_id);
c82e9aa0
EC
335}
336
aa1ec3dd 337static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
338 enum mlx4_resource type,
339 void *res)
340{
341 struct res_common *r;
342 int err = 0;
343
344 spin_lock_irq(mlx4_tlock(dev));
345 r = find_res(dev, res_id, type);
346 if (!r) {
347 err = -ENONET;
348 goto exit;
349 }
350
351 if (r->state == RES_ANY_BUSY) {
352 err = -EBUSY;
353 goto exit;
354 }
355
356 if (r->owner != slave) {
357 err = -EPERM;
358 goto exit;
359 }
360
361 r->from_state = r->state;
362 r->state = RES_ANY_BUSY;
aa1ec3dd 363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
c82e9aa0
EC
364 ResourceType(type), r->res_id);
365
366 if (res)
367 *((struct res_common **)res) = r;
368
369exit:
370 spin_unlock_irq(mlx4_tlock(dev));
371 return err;
372}
373
374int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
375 enum mlx4_resource type,
aa1ec3dd 376 u64 res_id, int *slave)
c82e9aa0
EC
377{
378
379 struct res_common *r;
380 int err = -ENOENT;
381 int id = res_id;
382
383 if (type == RES_QP)
384 id &= 0x7fffff;
996b0541 385 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
386
387 r = find_res(dev, id, type);
388 if (r) {
389 *slave = r->owner;
390 err = 0;
391 }
996b0541 392 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
393
394 return err;
395}
396
aa1ec3dd 397static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
398 enum mlx4_resource type)
399{
400 struct res_common *r;
401
402 spin_lock_irq(mlx4_tlock(dev));
403 r = find_res(dev, res_id, type);
404 if (r)
405 r->state = r->from_state;
406 spin_unlock_irq(mlx4_tlock(dev));
407}
408
409static struct res_common *alloc_qp_tr(int id)
410{
411 struct res_qp *ret;
412
413 ret = kzalloc(sizeof *ret, GFP_KERNEL);
414 if (!ret)
415 return NULL;
416
417 ret->com.res_id = id;
418 ret->com.state = RES_QP_RESERVED;
2531188b 419 ret->local_qpn = id;
c82e9aa0
EC
420 INIT_LIST_HEAD(&ret->mcg_list);
421 spin_lock_init(&ret->mcg_spl);
422
423 return &ret->com;
424}
425
426static struct res_common *alloc_mtt_tr(int id, int order)
427{
428 struct res_mtt *ret;
429
430 ret = kzalloc(sizeof *ret, GFP_KERNEL);
431 if (!ret)
432 return NULL;
433
434 ret->com.res_id = id;
435 ret->order = order;
436 ret->com.state = RES_MTT_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
438
439 return &ret->com;
440}
441
442static struct res_common *alloc_mpt_tr(int id, int key)
443{
444 struct res_mpt *ret;
445
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
447 if (!ret)
448 return NULL;
449
450 ret->com.res_id = id;
451 ret->com.state = RES_MPT_RESERVED;
452 ret->key = key;
453
454 return &ret->com;
455}
456
457static struct res_common *alloc_eq_tr(int id)
458{
459 struct res_eq *ret;
460
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
462 if (!ret)
463 return NULL;
464
465 ret->com.res_id = id;
466 ret->com.state = RES_EQ_RESERVED;
467
468 return &ret->com;
469}
470
471static struct res_common *alloc_cq_tr(int id)
472{
473 struct res_cq *ret;
474
475 ret = kzalloc(sizeof *ret, GFP_KERNEL);
476 if (!ret)
477 return NULL;
478
479 ret->com.res_id = id;
480 ret->com.state = RES_CQ_ALLOCATED;
481 atomic_set(&ret->ref_count, 0);
482
483 return &ret->com;
484}
485
486static struct res_common *alloc_srq_tr(int id)
487{
488 struct res_srq *ret;
489
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
491 if (!ret)
492 return NULL;
493
494 ret->com.res_id = id;
495 ret->com.state = RES_SRQ_ALLOCATED;
496 atomic_set(&ret->ref_count, 0);
497
498 return &ret->com;
499}
500
501static struct res_common *alloc_counter_tr(int id)
502{
503 struct res_counter *ret;
504
505 ret = kzalloc(sizeof *ret, GFP_KERNEL);
506 if (!ret)
507 return NULL;
508
509 ret->com.res_id = id;
510 ret->com.state = RES_COUNTER_ALLOCATED;
511
512 return &ret->com;
513}
514
ba062d52
JM
515static struct res_common *alloc_xrcdn_tr(int id)
516{
517 struct res_xrcdn *ret;
518
519 ret = kzalloc(sizeof *ret, GFP_KERNEL);
520 if (!ret)
521 return NULL;
522
523 ret->com.res_id = id;
524 ret->com.state = RES_XRCD_ALLOCATED;
525
526 return &ret->com;
527}
528
1b9c6b06
HHZ
529static struct res_common *alloc_fs_rule_tr(u64 id)
530{
531 struct res_fs_rule *ret;
532
533 ret = kzalloc(sizeof *ret, GFP_KERNEL);
534 if (!ret)
535 return NULL;
536
537 ret->com.res_id = id;
538 ret->com.state = RES_FS_RULE_ALLOCATED;
539
540 return &ret->com;
541}
542
aa1ec3dd 543static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
544 int extra)
545{
546 struct res_common *ret;
547
548 switch (type) {
549 case RES_QP:
550 ret = alloc_qp_tr(id);
551 break;
552 case RES_MPT:
553 ret = alloc_mpt_tr(id, extra);
554 break;
555 case RES_MTT:
556 ret = alloc_mtt_tr(id, extra);
557 break;
558 case RES_EQ:
559 ret = alloc_eq_tr(id);
560 break;
561 case RES_CQ:
562 ret = alloc_cq_tr(id);
563 break;
564 case RES_SRQ:
565 ret = alloc_srq_tr(id);
566 break;
567 case RES_MAC:
568 printk(KERN_ERR "implementation missing\n");
569 return NULL;
570 case RES_COUNTER:
571 ret = alloc_counter_tr(id);
572 break;
ba062d52
JM
573 case RES_XRCD:
574 ret = alloc_xrcdn_tr(id);
575 break;
1b9c6b06
HHZ
576 case RES_FS_RULE:
577 ret = alloc_fs_rule_tr(id);
578 break;
c82e9aa0
EC
579 default:
580 return NULL;
581 }
582 if (ret)
583 ret->owner = slave;
584
585 return ret;
586}
587
aa1ec3dd 588static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
589 enum mlx4_resource type, int extra)
590{
591 int i;
592 int err;
593 struct mlx4_priv *priv = mlx4_priv(dev);
594 struct res_common **res_arr;
595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 596 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
597
598 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
599 if (!res_arr)
600 return -ENOMEM;
601
602 for (i = 0; i < count; ++i) {
603 res_arr[i] = alloc_tr(base + i, type, slave, extra);
604 if (!res_arr[i]) {
605 for (--i; i >= 0; --i)
606 kfree(res_arr[i]);
607
608 kfree(res_arr);
609 return -ENOMEM;
610 }
611 }
612
613 spin_lock_irq(mlx4_tlock(dev));
614 for (i = 0; i < count; ++i) {
615 if (find_res(dev, base + i, type)) {
616 err = -EEXIST;
617 goto undo;
618 }
4af1c048 619 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
620 if (err)
621 goto undo;
622 list_add_tail(&res_arr[i]->list,
623 &tracker->slave_list[slave].res_list[type]);
624 }
625 spin_unlock_irq(mlx4_tlock(dev));
626 kfree(res_arr);
627
628 return 0;
629
630undo:
631 for (--i; i >= base; --i)
4af1c048 632 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
633
634 spin_unlock_irq(mlx4_tlock(dev));
635
636 for (i = 0; i < count; ++i)
637 kfree(res_arr[i]);
638
639 kfree(res_arr);
640
641 return err;
642}
643
644static int remove_qp_ok(struct res_qp *res)
645{
646 if (res->com.state == RES_QP_BUSY)
647 return -EBUSY;
648 else if (res->com.state != RES_QP_RESERVED)
649 return -EPERM;
650
651 return 0;
652}
653
654static int remove_mtt_ok(struct res_mtt *res, int order)
655{
656 if (res->com.state == RES_MTT_BUSY ||
657 atomic_read(&res->ref_count)) {
658 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
659 __func__, __LINE__,
660 mtt_states_str(res->com.state),
661 atomic_read(&res->ref_count));
662 return -EBUSY;
663 } else if (res->com.state != RES_MTT_ALLOCATED)
664 return -EPERM;
665 else if (res->order != order)
666 return -EINVAL;
667
668 return 0;
669}
670
671static int remove_mpt_ok(struct res_mpt *res)
672{
673 if (res->com.state == RES_MPT_BUSY)
674 return -EBUSY;
675 else if (res->com.state != RES_MPT_RESERVED)
676 return -EPERM;
677
678 return 0;
679}
680
681static int remove_eq_ok(struct res_eq *res)
682{
683 if (res->com.state == RES_MPT_BUSY)
684 return -EBUSY;
685 else if (res->com.state != RES_MPT_RESERVED)
686 return -EPERM;
687
688 return 0;
689}
690
691static int remove_counter_ok(struct res_counter *res)
692{
693 if (res->com.state == RES_COUNTER_BUSY)
694 return -EBUSY;
695 else if (res->com.state != RES_COUNTER_ALLOCATED)
696 return -EPERM;
697
698 return 0;
699}
700
ba062d52
JM
701static int remove_xrcdn_ok(struct res_xrcdn *res)
702{
703 if (res->com.state == RES_XRCD_BUSY)
704 return -EBUSY;
705 else if (res->com.state != RES_XRCD_ALLOCATED)
706 return -EPERM;
707
708 return 0;
709}
710
1b9c6b06
HHZ
711static int remove_fs_rule_ok(struct res_fs_rule *res)
712{
713 if (res->com.state == RES_FS_RULE_BUSY)
714 return -EBUSY;
715 else if (res->com.state != RES_FS_RULE_ALLOCATED)
716 return -EPERM;
717
718 return 0;
719}
720
c82e9aa0
EC
721static int remove_cq_ok(struct res_cq *res)
722{
723 if (res->com.state == RES_CQ_BUSY)
724 return -EBUSY;
725 else if (res->com.state != RES_CQ_ALLOCATED)
726 return -EPERM;
727
728 return 0;
729}
730
731static int remove_srq_ok(struct res_srq *res)
732{
733 if (res->com.state == RES_SRQ_BUSY)
734 return -EBUSY;
735 else if (res->com.state != RES_SRQ_ALLOCATED)
736 return -EPERM;
737
738 return 0;
739}
740
741static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
742{
743 switch (type) {
744 case RES_QP:
745 return remove_qp_ok((struct res_qp *)res);
746 case RES_CQ:
747 return remove_cq_ok((struct res_cq *)res);
748 case RES_SRQ:
749 return remove_srq_ok((struct res_srq *)res);
750 case RES_MPT:
751 return remove_mpt_ok((struct res_mpt *)res);
752 case RES_MTT:
753 return remove_mtt_ok((struct res_mtt *)res, extra);
754 case RES_MAC:
755 return -ENOSYS;
756 case RES_EQ:
757 return remove_eq_ok((struct res_eq *)res);
758 case RES_COUNTER:
759 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
760 case RES_XRCD:
761 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
762 case RES_FS_RULE:
763 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
764 default:
765 return -EINVAL;
766 }
767}
768
aa1ec3dd 769static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
770 enum mlx4_resource type, int extra)
771{
aa1ec3dd 772 u64 i;
c82e9aa0
EC
773 int err;
774 struct mlx4_priv *priv = mlx4_priv(dev);
775 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
776 struct res_common *r;
777
778 spin_lock_irq(mlx4_tlock(dev));
779 for (i = base; i < base + count; ++i) {
4af1c048 780 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
781 if (!r) {
782 err = -ENOENT;
783 goto out;
784 }
785 if (r->owner != slave) {
786 err = -EPERM;
787 goto out;
788 }
789 err = remove_ok(r, type, extra);
790 if (err)
791 goto out;
792 }
793
794 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
795 r = res_tracker_lookup(&tracker->res_tree[type], i);
796 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
797 list_del(&r->list);
798 kfree(r);
799 }
800 err = 0;
801
802out:
803 spin_unlock_irq(mlx4_tlock(dev));
804
805 return err;
806}
807
808static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
809 enum res_qp_states state, struct res_qp **qp,
810 int alloc)
811{
812 struct mlx4_priv *priv = mlx4_priv(dev);
813 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
814 struct res_qp *r;
815 int err = 0;
816
817 spin_lock_irq(mlx4_tlock(dev));
4af1c048 818 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
819 if (!r)
820 err = -ENOENT;
821 else if (r->com.owner != slave)
822 err = -EPERM;
823 else {
824 switch (state) {
825 case RES_QP_BUSY:
aa1ec3dd 826 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
827 __func__, r->com.res_id);
828 err = -EBUSY;
829 break;
830
831 case RES_QP_RESERVED:
832 if (r->com.state == RES_QP_MAPPED && !alloc)
833 break;
834
aa1ec3dd 835 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
836 err = -EINVAL;
837 break;
838
839 case RES_QP_MAPPED:
840 if ((r->com.state == RES_QP_RESERVED && alloc) ||
841 r->com.state == RES_QP_HW)
842 break;
843 else {
aa1ec3dd 844 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
845 r->com.res_id);
846 err = -EINVAL;
847 }
848
849 break;
850
851 case RES_QP_HW:
852 if (r->com.state != RES_QP_MAPPED)
853 err = -EINVAL;
854 break;
855 default:
856 err = -EINVAL;
857 }
858
859 if (!err) {
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_QP_BUSY;
863 if (qp)
64699336 864 *qp = r;
c82e9aa0
EC
865 }
866 }
867
868 spin_unlock_irq(mlx4_tlock(dev));
869
870 return err;
871}
872
873static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
874 enum res_mpt_states state, struct res_mpt **mpt)
875{
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 struct res_mpt *r;
879 int err = 0;
880
881 spin_lock_irq(mlx4_tlock(dev));
4af1c048 882 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
883 if (!r)
884 err = -ENOENT;
885 else if (r->com.owner != slave)
886 err = -EPERM;
887 else {
888 switch (state) {
889 case RES_MPT_BUSY:
890 err = -EINVAL;
891 break;
892
893 case RES_MPT_RESERVED:
894 if (r->com.state != RES_MPT_MAPPED)
895 err = -EINVAL;
896 break;
897
898 case RES_MPT_MAPPED:
899 if (r->com.state != RES_MPT_RESERVED &&
900 r->com.state != RES_MPT_HW)
901 err = -EINVAL;
902 break;
903
904 case RES_MPT_HW:
905 if (r->com.state != RES_MPT_MAPPED)
906 err = -EINVAL;
907 break;
908 default:
909 err = -EINVAL;
910 }
911
912 if (!err) {
913 r->com.from_state = r->com.state;
914 r->com.to_state = state;
915 r->com.state = RES_MPT_BUSY;
916 if (mpt)
64699336 917 *mpt = r;
c82e9aa0
EC
918 }
919 }
920
921 spin_unlock_irq(mlx4_tlock(dev));
922
923 return err;
924}
925
926static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
927 enum res_eq_states state, struct res_eq **eq)
928{
929 struct mlx4_priv *priv = mlx4_priv(dev);
930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
931 struct res_eq *r;
932 int err = 0;
933
934 spin_lock_irq(mlx4_tlock(dev));
4af1c048 935 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
936 if (!r)
937 err = -ENOENT;
938 else if (r->com.owner != slave)
939 err = -EPERM;
940 else {
941 switch (state) {
942 case RES_EQ_BUSY:
943 err = -EINVAL;
944 break;
945
946 case RES_EQ_RESERVED:
947 if (r->com.state != RES_EQ_HW)
948 err = -EINVAL;
949 break;
950
951 case RES_EQ_HW:
952 if (r->com.state != RES_EQ_RESERVED)
953 err = -EINVAL;
954 break;
955
956 default:
957 err = -EINVAL;
958 }
959
960 if (!err) {
961 r->com.from_state = r->com.state;
962 r->com.to_state = state;
963 r->com.state = RES_EQ_BUSY;
964 if (eq)
965 *eq = r;
966 }
967 }
968
969 spin_unlock_irq(mlx4_tlock(dev));
970
971 return err;
972}
973
974static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
975 enum res_cq_states state, struct res_cq **cq)
976{
977 struct mlx4_priv *priv = mlx4_priv(dev);
978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
979 struct res_cq *r;
980 int err;
981
982 spin_lock_irq(mlx4_tlock(dev));
4af1c048 983 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
984 if (!r)
985 err = -ENOENT;
986 else if (r->com.owner != slave)
987 err = -EPERM;
988 else {
989 switch (state) {
990 case RES_CQ_BUSY:
991 err = -EBUSY;
992 break;
993
994 case RES_CQ_ALLOCATED:
995 if (r->com.state != RES_CQ_HW)
996 err = -EINVAL;
997 else if (atomic_read(&r->ref_count))
998 err = -EBUSY;
999 else
1000 err = 0;
1001 break;
1002
1003 case RES_CQ_HW:
1004 if (r->com.state != RES_CQ_ALLOCATED)
1005 err = -EINVAL;
1006 else
1007 err = 0;
1008 break;
1009
1010 default:
1011 err = -EINVAL;
1012 }
1013
1014 if (!err) {
1015 r->com.from_state = r->com.state;
1016 r->com.to_state = state;
1017 r->com.state = RES_CQ_BUSY;
1018 if (cq)
1019 *cq = r;
1020 }
1021 }
1022
1023 spin_unlock_irq(mlx4_tlock(dev));
1024
1025 return err;
1026}
1027
1028static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 enum res_cq_states state, struct res_srq **srq)
1030{
1031 struct mlx4_priv *priv = mlx4_priv(dev);
1032 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033 struct res_srq *r;
1034 int err = 0;
1035
1036 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1037 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1038 if (!r)
1039 err = -ENOENT;
1040 else if (r->com.owner != slave)
1041 err = -EPERM;
1042 else {
1043 switch (state) {
1044 case RES_SRQ_BUSY:
1045 err = -EINVAL;
1046 break;
1047
1048 case RES_SRQ_ALLOCATED:
1049 if (r->com.state != RES_SRQ_HW)
1050 err = -EINVAL;
1051 else if (atomic_read(&r->ref_count))
1052 err = -EBUSY;
1053 break;
1054
1055 case RES_SRQ_HW:
1056 if (r->com.state != RES_SRQ_ALLOCATED)
1057 err = -EINVAL;
1058 break;
1059
1060 default:
1061 err = -EINVAL;
1062 }
1063
1064 if (!err) {
1065 r->com.from_state = r->com.state;
1066 r->com.to_state = state;
1067 r->com.state = RES_SRQ_BUSY;
1068 if (srq)
1069 *srq = r;
1070 }
1071 }
1072
1073 spin_unlock_irq(mlx4_tlock(dev));
1074
1075 return err;
1076}
1077
1078static void res_abort_move(struct mlx4_dev *dev, int slave,
1079 enum mlx4_resource type, int id)
1080{
1081 struct mlx4_priv *priv = mlx4_priv(dev);
1082 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1083 struct res_common *r;
1084
1085 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1086 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1087 if (r && (r->owner == slave))
1088 r->state = r->from_state;
1089 spin_unlock_irq(mlx4_tlock(dev));
1090}
1091
1092static void res_end_move(struct mlx4_dev *dev, int slave,
1093 enum mlx4_resource type, int id)
1094{
1095 struct mlx4_priv *priv = mlx4_priv(dev);
1096 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1097 struct res_common *r;
1098
1099 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1100 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1101 if (r && (r->owner == slave))
1102 r->state = r->to_state;
1103 spin_unlock_irq(mlx4_tlock(dev));
1104}
1105
1106static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1107{
1108 return mlx4_is_qp_reserved(dev, qpn);
1109}
1110
1111static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1112 u64 in_param, u64 *out_param)
1113{
1114 int err;
1115 int count;
1116 int align;
1117 int base;
1118 int qpn;
1119
1120 switch (op) {
1121 case RES_OP_RESERVE:
1122 count = get_param_l(&in_param);
1123 align = get_param_h(&in_param);
1124 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1125 if (err)
1126 return err;
1127
1128 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1129 if (err) {
1130 __mlx4_qp_release_range(dev, base, count);
1131 return err;
1132 }
1133 set_param_l(out_param, base);
1134 break;
1135 case RES_OP_MAP_ICM:
1136 qpn = get_param_l(&in_param) & 0x7fffff;
1137 if (valid_reserved(dev, slave, qpn)) {
1138 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1139 if (err)
1140 return err;
1141 }
1142
1143 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1144 NULL, 1);
1145 if (err)
1146 return err;
1147
1148 if (!valid_reserved(dev, slave, qpn)) {
1149 err = __mlx4_qp_alloc_icm(dev, qpn);
1150 if (err) {
1151 res_abort_move(dev, slave, RES_QP, qpn);
1152 return err;
1153 }
1154 }
1155
1156 res_end_move(dev, slave, RES_QP, qpn);
1157 break;
1158
1159 default:
1160 err = -EINVAL;
1161 break;
1162 }
1163 return err;
1164}
1165
1166static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1167 u64 in_param, u64 *out_param)
1168{
1169 int err = -EINVAL;
1170 int base;
1171 int order;
1172
1173 if (op != RES_OP_RESERVE_AND_MAP)
1174 return err;
1175
1176 order = get_param_l(&in_param);
1177 base = __mlx4_alloc_mtt_range(dev, order);
1178 if (base == -1)
1179 return -ENOMEM;
1180
1181 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1182 if (err)
1183 __mlx4_free_mtt_range(dev, base, order);
1184 else
1185 set_param_l(out_param, base);
1186
1187 return err;
1188}
1189
1190static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1191 u64 in_param, u64 *out_param)
1192{
1193 int err = -EINVAL;
1194 int index;
1195 int id;
1196 struct res_mpt *mpt;
1197
1198 switch (op) {
1199 case RES_OP_RESERVE:
1200 index = __mlx4_mr_reserve(dev);
1201 if (index == -1)
1202 break;
1203 id = index & mpt_mask(dev);
1204
1205 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1206 if (err) {
1207 __mlx4_mr_release(dev, index);
1208 break;
1209 }
1210 set_param_l(out_param, index);
1211 break;
1212 case RES_OP_MAP_ICM:
1213 index = get_param_l(&in_param);
1214 id = index & mpt_mask(dev);
1215 err = mr_res_start_move_to(dev, slave, id,
1216 RES_MPT_MAPPED, &mpt);
1217 if (err)
1218 return err;
1219
1220 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1221 if (err) {
1222 res_abort_move(dev, slave, RES_MPT, id);
1223 return err;
1224 }
1225
1226 res_end_move(dev, slave, RES_MPT, id);
1227 break;
1228 }
1229 return err;
1230}
1231
1232static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1233 u64 in_param, u64 *out_param)
1234{
1235 int cqn;
1236 int err;
1237
1238 switch (op) {
1239 case RES_OP_RESERVE_AND_MAP:
1240 err = __mlx4_cq_alloc_icm(dev, &cqn);
1241 if (err)
1242 break;
1243
1244 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1245 if (err) {
1246 __mlx4_cq_free_icm(dev, cqn);
1247 break;
1248 }
1249
1250 set_param_l(out_param, cqn);
1251 break;
1252
1253 default:
1254 err = -EINVAL;
1255 }
1256
1257 return err;
1258}
1259
1260static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1261 u64 in_param, u64 *out_param)
1262{
1263 int srqn;
1264 int err;
1265
1266 switch (op) {
1267 case RES_OP_RESERVE_AND_MAP:
1268 err = __mlx4_srq_alloc_icm(dev, &srqn);
1269 if (err)
1270 break;
1271
1272 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1273 if (err) {
1274 __mlx4_srq_free_icm(dev, srqn);
1275 break;
1276 }
1277
1278 set_param_l(out_param, srqn);
1279 break;
1280
1281 default:
1282 err = -EINVAL;
1283 }
1284
1285 return err;
1286}
1287
1288static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1289{
1290 struct mlx4_priv *priv = mlx4_priv(dev);
1291 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1292 struct mac_res *res;
1293
1294 res = kzalloc(sizeof *res, GFP_KERNEL);
1295 if (!res)
1296 return -ENOMEM;
1297 res->mac = mac;
1298 res->port = (u8) port;
1299 list_add_tail(&res->list,
1300 &tracker->slave_list[slave].res_list[RES_MAC]);
1301 return 0;
1302}
1303
1304static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1305 int port)
1306{
1307 struct mlx4_priv *priv = mlx4_priv(dev);
1308 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1309 struct list_head *mac_list =
1310 &tracker->slave_list[slave].res_list[RES_MAC];
1311 struct mac_res *res, *tmp;
1312
1313 list_for_each_entry_safe(res, tmp, mac_list, list) {
1314 if (res->mac == mac && res->port == (u8) port) {
1315 list_del(&res->list);
1316 kfree(res);
1317 break;
1318 }
1319 }
1320}
1321
1322static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1323{
1324 struct mlx4_priv *priv = mlx4_priv(dev);
1325 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1326 struct list_head *mac_list =
1327 &tracker->slave_list[slave].res_list[RES_MAC];
1328 struct mac_res *res, *tmp;
1329
1330 list_for_each_entry_safe(res, tmp, mac_list, list) {
1331 list_del(&res->list);
1332 __mlx4_unregister_mac(dev, res->port, res->mac);
1333 kfree(res);
1334 }
1335}
1336
1337static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1338 u64 in_param, u64 *out_param)
1339{
1340 int err = -EINVAL;
1341 int port;
1342 u64 mac;
1343
1344 if (op != RES_OP_RESERVE_AND_MAP)
1345 return err;
1346
1347 port = get_param_l(out_param);
1348 mac = in_param;
1349
1350 err = __mlx4_register_mac(dev, port, mac);
1351 if (err >= 0) {
1352 set_param_l(out_param, err);
1353 err = 0;
1354 }
1355
1356 if (!err) {
1357 err = mac_add_to_slave(dev, slave, mac, port);
1358 if (err)
1359 __mlx4_unregister_mac(dev, port, mac);
1360 }
1361 return err;
1362}
1363
ffe455ad
EE
1364static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1365 u64 in_param, u64 *out_param)
1366{
1367 return 0;
1368}
1369
ba062d52
JM
1370static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1371 u64 in_param, u64 *out_param)
1372{
1373 u32 index;
1374 int err;
1375
1376 if (op != RES_OP_RESERVE)
1377 return -EINVAL;
1378
1379 err = __mlx4_counter_alloc(dev, &index);
1380 if (err)
1381 return err;
1382
1383 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1384 if (err)
1385 __mlx4_counter_free(dev, index);
1386 else
1387 set_param_l(out_param, index);
1388
1389 return err;
1390}
1391
1392static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1393 u64 in_param, u64 *out_param)
1394{
1395 u32 xrcdn;
1396 int err;
1397
1398 if (op != RES_OP_RESERVE)
1399 return -EINVAL;
1400
1401 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1402 if (err)
1403 return err;
1404
1405 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1406 if (err)
1407 __mlx4_xrcd_free(dev, xrcdn);
1408 else
1409 set_param_l(out_param, xrcdn);
1410
1411 return err;
1412}
1413
c82e9aa0
EC
1414int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1415 struct mlx4_vhcr *vhcr,
1416 struct mlx4_cmd_mailbox *inbox,
1417 struct mlx4_cmd_mailbox *outbox,
1418 struct mlx4_cmd_info *cmd)
1419{
1420 int err;
1421 int alop = vhcr->op_modifier;
1422
1423 switch (vhcr->in_modifier) {
1424 case RES_QP:
1425 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1426 vhcr->in_param, &vhcr->out_param);
1427 break;
1428
1429 case RES_MTT:
1430 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1431 vhcr->in_param, &vhcr->out_param);
1432 break;
1433
1434 case RES_MPT:
1435 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1436 vhcr->in_param, &vhcr->out_param);
1437 break;
1438
1439 case RES_CQ:
1440 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1441 vhcr->in_param, &vhcr->out_param);
1442 break;
1443
1444 case RES_SRQ:
1445 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1446 vhcr->in_param, &vhcr->out_param);
1447 break;
1448
1449 case RES_MAC:
1450 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1451 vhcr->in_param, &vhcr->out_param);
1452 break;
1453
ffe455ad
EE
1454 case RES_VLAN:
1455 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1456 vhcr->in_param, &vhcr->out_param);
1457 break;
1458
ba062d52
JM
1459 case RES_COUNTER:
1460 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1461 vhcr->in_param, &vhcr->out_param);
1462 break;
1463
1464 case RES_XRCD:
1465 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1466 vhcr->in_param, &vhcr->out_param);
1467 break;
1468
c82e9aa0
EC
1469 default:
1470 err = -EINVAL;
1471 break;
1472 }
1473
1474 return err;
1475}
1476
1477static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1478 u64 in_param)
1479{
1480 int err;
1481 int count;
1482 int base;
1483 int qpn;
1484
1485 switch (op) {
1486 case RES_OP_RESERVE:
1487 base = get_param_l(&in_param) & 0x7fffff;
1488 count = get_param_h(&in_param);
1489 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1490 if (err)
1491 break;
1492 __mlx4_qp_release_range(dev, base, count);
1493 break;
1494 case RES_OP_MAP_ICM:
1495 qpn = get_param_l(&in_param) & 0x7fffff;
1496 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1497 NULL, 0);
1498 if (err)
1499 return err;
1500
1501 if (!valid_reserved(dev, slave, qpn))
1502 __mlx4_qp_free_icm(dev, qpn);
1503
1504 res_end_move(dev, slave, RES_QP, qpn);
1505
1506 if (valid_reserved(dev, slave, qpn))
1507 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1508 break;
1509 default:
1510 err = -EINVAL;
1511 break;
1512 }
1513 return err;
1514}
1515
1516static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1517 u64 in_param, u64 *out_param)
1518{
1519 int err = -EINVAL;
1520 int base;
1521 int order;
1522
1523 if (op != RES_OP_RESERVE_AND_MAP)
1524 return err;
1525
1526 base = get_param_l(&in_param);
1527 order = get_param_h(&in_param);
1528 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1529 if (!err)
1530 __mlx4_free_mtt_range(dev, base, order);
1531 return err;
1532}
1533
1534static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535 u64 in_param)
1536{
1537 int err = -EINVAL;
1538 int index;
1539 int id;
1540 struct res_mpt *mpt;
1541
1542 switch (op) {
1543 case RES_OP_RESERVE:
1544 index = get_param_l(&in_param);
1545 id = index & mpt_mask(dev);
1546 err = get_res(dev, slave, id, RES_MPT, &mpt);
1547 if (err)
1548 break;
1549 index = mpt->key;
1550 put_res(dev, slave, id, RES_MPT);
1551
1552 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1553 if (err)
1554 break;
1555 __mlx4_mr_release(dev, index);
1556 break;
1557 case RES_OP_MAP_ICM:
1558 index = get_param_l(&in_param);
1559 id = index & mpt_mask(dev);
1560 err = mr_res_start_move_to(dev, slave, id,
1561 RES_MPT_RESERVED, &mpt);
1562 if (err)
1563 return err;
1564
1565 __mlx4_mr_free_icm(dev, mpt->key);
1566 res_end_move(dev, slave, RES_MPT, id);
1567 return err;
1568 break;
1569 default:
1570 err = -EINVAL;
1571 break;
1572 }
1573 return err;
1574}
1575
1576static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1577 u64 in_param, u64 *out_param)
1578{
1579 int cqn;
1580 int err;
1581
1582 switch (op) {
1583 case RES_OP_RESERVE_AND_MAP:
1584 cqn = get_param_l(&in_param);
1585 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1586 if (err)
1587 break;
1588
1589 __mlx4_cq_free_icm(dev, cqn);
1590 break;
1591
1592 default:
1593 err = -EINVAL;
1594 break;
1595 }
1596
1597 return err;
1598}
1599
1600static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1601 u64 in_param, u64 *out_param)
1602{
1603 int srqn;
1604 int err;
1605
1606 switch (op) {
1607 case RES_OP_RESERVE_AND_MAP:
1608 srqn = get_param_l(&in_param);
1609 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1610 if (err)
1611 break;
1612
1613 __mlx4_srq_free_icm(dev, srqn);
1614 break;
1615
1616 default:
1617 err = -EINVAL;
1618 break;
1619 }
1620
1621 return err;
1622}
1623
1624static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625 u64 in_param, u64 *out_param)
1626{
1627 int port;
1628 int err = 0;
1629
1630 switch (op) {
1631 case RES_OP_RESERVE_AND_MAP:
1632 port = get_param_l(out_param);
1633 mac_del_from_slave(dev, slave, in_param, port);
1634 __mlx4_unregister_mac(dev, port, in_param);
1635 break;
1636 default:
1637 err = -EINVAL;
1638 break;
1639 }
1640
1641 return err;
1642
1643}
1644
ffe455ad
EE
1645static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1646 u64 in_param, u64 *out_param)
1647{
1648 return 0;
1649}
1650
ba062d52
JM
1651static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1652 u64 in_param, u64 *out_param)
1653{
1654 int index;
1655 int err;
1656
1657 if (op != RES_OP_RESERVE)
1658 return -EINVAL;
1659
1660 index = get_param_l(&in_param);
1661 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1662 if (err)
1663 return err;
1664
1665 __mlx4_counter_free(dev, index);
1666
1667 return err;
1668}
1669
1670static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1671 u64 in_param, u64 *out_param)
1672{
1673 int xrcdn;
1674 int err;
1675
1676 if (op != RES_OP_RESERVE)
1677 return -EINVAL;
1678
1679 xrcdn = get_param_l(&in_param);
1680 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1681 if (err)
1682 return err;
1683
1684 __mlx4_xrcd_free(dev, xrcdn);
1685
1686 return err;
1687}
1688
c82e9aa0
EC
1689int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1690 struct mlx4_vhcr *vhcr,
1691 struct mlx4_cmd_mailbox *inbox,
1692 struct mlx4_cmd_mailbox *outbox,
1693 struct mlx4_cmd_info *cmd)
1694{
1695 int err = -EINVAL;
1696 int alop = vhcr->op_modifier;
1697
1698 switch (vhcr->in_modifier) {
1699 case RES_QP:
1700 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1701 vhcr->in_param);
1702 break;
1703
1704 case RES_MTT:
1705 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1706 vhcr->in_param, &vhcr->out_param);
1707 break;
1708
1709 case RES_MPT:
1710 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1711 vhcr->in_param);
1712 break;
1713
1714 case RES_CQ:
1715 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1716 vhcr->in_param, &vhcr->out_param);
1717 break;
1718
1719 case RES_SRQ:
1720 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1721 vhcr->in_param, &vhcr->out_param);
1722 break;
1723
1724 case RES_MAC:
1725 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1726 vhcr->in_param, &vhcr->out_param);
1727 break;
1728
ffe455ad
EE
1729 case RES_VLAN:
1730 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1731 vhcr->in_param, &vhcr->out_param);
1732 break;
1733
ba062d52
JM
1734 case RES_COUNTER:
1735 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1736 vhcr->in_param, &vhcr->out_param);
1737 break;
1738
1739 case RES_XRCD:
1740 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1741 vhcr->in_param, &vhcr->out_param);
1742
c82e9aa0
EC
1743 default:
1744 break;
1745 }
1746 return err;
1747}
1748
1749/* ugly but other choices are uglier */
1750static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1751{
1752 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1753}
1754
2b8fb286 1755static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1756{
2b8fb286 1757 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1758}
1759
1760static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1761{
1762 return be32_to_cpu(mpt->mtt_sz);
1763}
1764
2b8fb286 1765static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1766{
1767 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1768}
1769
2b8fb286 1770static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1771{
1772 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1773}
1774
1775static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1776{
1777 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1778 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1779 int log_sq_sride = qpc->sq_size_stride & 7;
1780 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1781 int log_rq_stride = qpc->rq_size_stride & 7;
1782 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1783 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1784 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1785 int sq_size;
1786 int rq_size;
1787 int total_pages;
1788 int total_mem;
1789 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1790
1791 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1792 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1793 total_mem = sq_size + rq_size;
1794 total_pages =
1795 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1796 page_shift);
1797
1798 return total_pages;
1799}
1800
c82e9aa0
EC
1801static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1802 int size, struct res_mtt *mtt)
1803{
2b8fb286
MA
1804 int res_start = mtt->com.res_id;
1805 int res_size = (1 << mtt->order);
c82e9aa0
EC
1806
1807 if (start < res_start || start + size > res_start + res_size)
1808 return -EPERM;
1809 return 0;
1810}
1811
1812int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1813 struct mlx4_vhcr *vhcr,
1814 struct mlx4_cmd_mailbox *inbox,
1815 struct mlx4_cmd_mailbox *outbox,
1816 struct mlx4_cmd_info *cmd)
1817{
1818 int err;
1819 int index = vhcr->in_modifier;
1820 struct res_mtt *mtt;
1821 struct res_mpt *mpt;
2b8fb286 1822 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1823 int phys;
1824 int id;
1825
1826 id = index & mpt_mask(dev);
1827 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1828 if (err)
1829 return err;
1830
1831 phys = mr_phys_mpt(inbox->buf);
1832 if (!phys) {
2b8fb286 1833 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1834 if (err)
1835 goto ex_abort;
1836
1837 err = check_mtt_range(dev, slave, mtt_base,
1838 mr_get_mtt_size(inbox->buf), mtt);
1839 if (err)
1840 goto ex_put;
1841
1842 mpt->mtt = mtt;
1843 }
1844
c82e9aa0
EC
1845 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1846 if (err)
1847 goto ex_put;
1848
1849 if (!phys) {
1850 atomic_inc(&mtt->ref_count);
1851 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1852 }
1853
1854 res_end_move(dev, slave, RES_MPT, id);
1855 return 0;
1856
1857ex_put:
1858 if (!phys)
1859 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1860ex_abort:
1861 res_abort_move(dev, slave, RES_MPT, id);
1862
1863 return err;
1864}
1865
1866int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1867 struct mlx4_vhcr *vhcr,
1868 struct mlx4_cmd_mailbox *inbox,
1869 struct mlx4_cmd_mailbox *outbox,
1870 struct mlx4_cmd_info *cmd)
1871{
1872 int err;
1873 int index = vhcr->in_modifier;
1874 struct res_mpt *mpt;
1875 int id;
1876
1877 id = index & mpt_mask(dev);
1878 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1879 if (err)
1880 return err;
1881
1882 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1883 if (err)
1884 goto ex_abort;
1885
1886 if (mpt->mtt)
1887 atomic_dec(&mpt->mtt->ref_count);
1888
1889 res_end_move(dev, slave, RES_MPT, id);
1890 return 0;
1891
1892ex_abort:
1893 res_abort_move(dev, slave, RES_MPT, id);
1894
1895 return err;
1896}
1897
1898int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1899 struct mlx4_vhcr *vhcr,
1900 struct mlx4_cmd_mailbox *inbox,
1901 struct mlx4_cmd_mailbox *outbox,
1902 struct mlx4_cmd_info *cmd)
1903{
1904 int err;
1905 int index = vhcr->in_modifier;
1906 struct res_mpt *mpt;
1907 int id;
1908
1909 id = index & mpt_mask(dev);
1910 err = get_res(dev, slave, id, RES_MPT, &mpt);
1911 if (err)
1912 return err;
1913
1914 if (mpt->com.from_state != RES_MPT_HW) {
1915 err = -EBUSY;
1916 goto out;
1917 }
1918
1919 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1920
1921out:
1922 put_res(dev, slave, id, RES_MPT);
1923 return err;
1924}
1925
1926static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1927{
1928 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1929}
1930
1931static int qp_get_scqn(struct mlx4_qp_context *qpc)
1932{
1933 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1934}
1935
1936static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1937{
1938 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1939}
1940
1941int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1942 struct mlx4_vhcr *vhcr,
1943 struct mlx4_cmd_mailbox *inbox,
1944 struct mlx4_cmd_mailbox *outbox,
1945 struct mlx4_cmd_info *cmd)
1946{
1947 int err;
1948 int qpn = vhcr->in_modifier & 0x7fffff;
1949 struct res_mtt *mtt;
1950 struct res_qp *qp;
1951 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1952 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1953 int mtt_size = qp_get_mtt_size(qpc);
1954 struct res_cq *rcq;
1955 struct res_cq *scq;
1956 int rcqn = qp_get_rcqn(qpc);
1957 int scqn = qp_get_scqn(qpc);
1958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1960 struct res_srq *srq;
1961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1962
1963 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1964 if (err)
1965 return err;
1966 qp->local_qpn = local_qpn;
1967
2b8fb286 1968 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1969 if (err)
1970 goto ex_abort;
1971
1972 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1973 if (err)
1974 goto ex_put_mtt;
1975
c82e9aa0
EC
1976 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1977 if (err)
1978 goto ex_put_mtt;
1979
1980 if (scqn != rcqn) {
1981 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1982 if (err)
1983 goto ex_put_rcq;
1984 } else
1985 scq = rcq;
1986
1987 if (use_srq) {
1988 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1989 if (err)
1990 goto ex_put_scq;
1991 }
1992
1993 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1994 if (err)
1995 goto ex_put_srq;
1996 atomic_inc(&mtt->ref_count);
1997 qp->mtt = mtt;
1998 atomic_inc(&rcq->ref_count);
1999 qp->rcq = rcq;
2000 atomic_inc(&scq->ref_count);
2001 qp->scq = scq;
2002
2003 if (scqn != rcqn)
2004 put_res(dev, slave, scqn, RES_CQ);
2005
2006 if (use_srq) {
2007 atomic_inc(&srq->ref_count);
2008 put_res(dev, slave, srqn, RES_SRQ);
2009 qp->srq = srq;
2010 }
2011 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2012 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2013 res_end_move(dev, slave, RES_QP, qpn);
2014
2015 return 0;
2016
2017ex_put_srq:
2018 if (use_srq)
2019 put_res(dev, slave, srqn, RES_SRQ);
2020ex_put_scq:
2021 if (scqn != rcqn)
2022 put_res(dev, slave, scqn, RES_CQ);
2023ex_put_rcq:
2024 put_res(dev, slave, rcqn, RES_CQ);
2025ex_put_mtt:
2b8fb286 2026 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2027ex_abort:
2028 res_abort_move(dev, slave, RES_QP, qpn);
2029
2030 return err;
2031}
2032
2b8fb286 2033static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2034{
2035 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2036}
2037
2038static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2039{
2040 int log_eq_size = eqc->log_eq_size & 0x1f;
2041 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2042
2043 if (log_eq_size + 5 < page_shift)
2044 return 1;
2045
2046 return 1 << (log_eq_size + 5 - page_shift);
2047}
2048
2b8fb286 2049static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2050{
2051 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2052}
2053
2054static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2055{
2056 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2057 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2058
2059 if (log_cq_size + 5 < page_shift)
2060 return 1;
2061
2062 return 1 << (log_cq_size + 5 - page_shift);
2063}
2064
2065int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2066 struct mlx4_vhcr *vhcr,
2067 struct mlx4_cmd_mailbox *inbox,
2068 struct mlx4_cmd_mailbox *outbox,
2069 struct mlx4_cmd_info *cmd)
2070{
2071 int err;
2072 int eqn = vhcr->in_modifier;
2073 int res_id = (slave << 8) | eqn;
2074 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2075 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2076 int mtt_size = eq_get_mtt_size(eqc);
2077 struct res_eq *eq;
2078 struct res_mtt *mtt;
2079
2080 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2081 if (err)
2082 return err;
2083 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2084 if (err)
2085 goto out_add;
2086
2b8fb286 2087 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2088 if (err)
2089 goto out_move;
2090
2091 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2092 if (err)
2093 goto out_put;
2094
2095 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2096 if (err)
2097 goto out_put;
2098
2099 atomic_inc(&mtt->ref_count);
2100 eq->mtt = mtt;
2101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2102 res_end_move(dev, slave, RES_EQ, res_id);
2103 return 0;
2104
2105out_put:
2106 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2107out_move:
2108 res_abort_move(dev, slave, RES_EQ, res_id);
2109out_add:
2110 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2111 return err;
2112}
2113
2114static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2115 int len, struct res_mtt **res)
2116{
2117 struct mlx4_priv *priv = mlx4_priv(dev);
2118 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2119 struct res_mtt *mtt;
2120 int err = -EINVAL;
2121
2122 spin_lock_irq(mlx4_tlock(dev));
2123 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2124 com.list) {
2125 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2126 *res = mtt;
2127 mtt->com.from_state = mtt->com.state;
2128 mtt->com.state = RES_MTT_BUSY;
2129 err = 0;
2130 break;
2131 }
2132 }
2133 spin_unlock_irq(mlx4_tlock(dev));
2134
2135 return err;
2136}
2137
2138int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2139 struct mlx4_vhcr *vhcr,
2140 struct mlx4_cmd_mailbox *inbox,
2141 struct mlx4_cmd_mailbox *outbox,
2142 struct mlx4_cmd_info *cmd)
2143{
2144 struct mlx4_mtt mtt;
2145 __be64 *page_list = inbox->buf;
2146 u64 *pg_list = (u64 *)page_list;
2147 int i;
2148 struct res_mtt *rmtt = NULL;
2149 int start = be64_to_cpu(page_list[0]);
2150 int npages = vhcr->in_modifier;
2151 int err;
2152
2153 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2154 if (err)
2155 return err;
2156
2157 /* Call the SW implementation of write_mtt:
2158 * - Prepare a dummy mtt struct
2159 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2160 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2161 we don't really use it */
c82e9aa0
EC
2162 mtt.order = 0;
2163 mtt.page_shift = 0;
2164 for (i = 0; i < npages; ++i)
2165 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2166
2167 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2168 ((u64 *)page_list + 2));
2169
2170 if (rmtt)
2171 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2172
2173 return err;
2174}
2175
2176int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2177 struct mlx4_vhcr *vhcr,
2178 struct mlx4_cmd_mailbox *inbox,
2179 struct mlx4_cmd_mailbox *outbox,
2180 struct mlx4_cmd_info *cmd)
2181{
2182 int eqn = vhcr->in_modifier;
2183 int res_id = eqn | (slave << 8);
2184 struct res_eq *eq;
2185 int err;
2186
2187 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2188 if (err)
2189 return err;
2190
2191 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2192 if (err)
2193 goto ex_abort;
2194
2195 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2196 if (err)
2197 goto ex_put;
2198
2199 atomic_dec(&eq->mtt->ref_count);
2200 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2201 res_end_move(dev, slave, RES_EQ, res_id);
2202 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2203
2204 return 0;
2205
2206ex_put:
2207 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2208ex_abort:
2209 res_abort_move(dev, slave, RES_EQ, res_id);
2210
2211 return err;
2212}
2213
2214int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2215{
2216 struct mlx4_priv *priv = mlx4_priv(dev);
2217 struct mlx4_slave_event_eq_info *event_eq;
2218 struct mlx4_cmd_mailbox *mailbox;
2219 u32 in_modifier = 0;
2220 int err;
2221 int res_id;
2222 struct res_eq *req;
2223
2224 if (!priv->mfunc.master.slave_state)
2225 return -EINVAL;
2226
803143fb 2227 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2228
2229 /* Create the event only if the slave is registered */
803143fb 2230 if (event_eq->eqn < 0)
c82e9aa0
EC
2231 return 0;
2232
2233 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2234 res_id = (slave << 8) | event_eq->eqn;
2235 err = get_res(dev, slave, res_id, RES_EQ, &req);
2236 if (err)
2237 goto unlock;
2238
2239 if (req->com.from_state != RES_EQ_HW) {
2240 err = -EINVAL;
2241 goto put;
2242 }
2243
2244 mailbox = mlx4_alloc_cmd_mailbox(dev);
2245 if (IS_ERR(mailbox)) {
2246 err = PTR_ERR(mailbox);
2247 goto put;
2248 }
2249
2250 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2251 ++event_eq->token;
2252 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2253 }
2254
2255 memcpy(mailbox->buf, (u8 *) eqe, 28);
2256
2257 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2258
2259 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2260 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2261 MLX4_CMD_NATIVE);
2262
2263 put_res(dev, slave, res_id, RES_EQ);
2264 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2265 mlx4_free_cmd_mailbox(dev, mailbox);
2266 return err;
2267
2268put:
2269 put_res(dev, slave, res_id, RES_EQ);
2270
2271unlock:
2272 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2273 return err;
2274}
2275
2276int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2277 struct mlx4_vhcr *vhcr,
2278 struct mlx4_cmd_mailbox *inbox,
2279 struct mlx4_cmd_mailbox *outbox,
2280 struct mlx4_cmd_info *cmd)
2281{
2282 int eqn = vhcr->in_modifier;
2283 int res_id = eqn | (slave << 8);
2284 struct res_eq *eq;
2285 int err;
2286
2287 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2288 if (err)
2289 return err;
2290
2291 if (eq->com.from_state != RES_EQ_HW) {
2292 err = -EINVAL;
2293 goto ex_put;
2294 }
2295
2296 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2297
2298ex_put:
2299 put_res(dev, slave, res_id, RES_EQ);
2300 return err;
2301}
2302
2303int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2304 struct mlx4_vhcr *vhcr,
2305 struct mlx4_cmd_mailbox *inbox,
2306 struct mlx4_cmd_mailbox *outbox,
2307 struct mlx4_cmd_info *cmd)
2308{
2309 int err;
2310 int cqn = vhcr->in_modifier;
2311 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2312 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2313 struct res_cq *cq;
2314 struct res_mtt *mtt;
2315
2316 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2317 if (err)
2318 return err;
2b8fb286 2319 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2320 if (err)
2321 goto out_move;
2322 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2323 if (err)
2324 goto out_put;
2325 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2326 if (err)
2327 goto out_put;
2328 atomic_inc(&mtt->ref_count);
2329 cq->mtt = mtt;
2330 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2331 res_end_move(dev, slave, RES_CQ, cqn);
2332 return 0;
2333
2334out_put:
2335 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2336out_move:
2337 res_abort_move(dev, slave, RES_CQ, cqn);
2338 return err;
2339}
2340
2341int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2342 struct mlx4_vhcr *vhcr,
2343 struct mlx4_cmd_mailbox *inbox,
2344 struct mlx4_cmd_mailbox *outbox,
2345 struct mlx4_cmd_info *cmd)
2346{
2347 int err;
2348 int cqn = vhcr->in_modifier;
2349 struct res_cq *cq;
2350
2351 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2352 if (err)
2353 return err;
2354 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2355 if (err)
2356 goto out_move;
2357 atomic_dec(&cq->mtt->ref_count);
2358 res_end_move(dev, slave, RES_CQ, cqn);
2359 return 0;
2360
2361out_move:
2362 res_abort_move(dev, slave, RES_CQ, cqn);
2363 return err;
2364}
2365
2366int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2367 struct mlx4_vhcr *vhcr,
2368 struct mlx4_cmd_mailbox *inbox,
2369 struct mlx4_cmd_mailbox *outbox,
2370 struct mlx4_cmd_info *cmd)
2371{
2372 int cqn = vhcr->in_modifier;
2373 struct res_cq *cq;
2374 int err;
2375
2376 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2377 if (err)
2378 return err;
2379
2380 if (cq->com.from_state != RES_CQ_HW)
2381 goto ex_put;
2382
2383 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2384ex_put:
2385 put_res(dev, slave, cqn, RES_CQ);
2386
2387 return err;
2388}
2389
2390static int handle_resize(struct mlx4_dev *dev, int slave,
2391 struct mlx4_vhcr *vhcr,
2392 struct mlx4_cmd_mailbox *inbox,
2393 struct mlx4_cmd_mailbox *outbox,
2394 struct mlx4_cmd_info *cmd,
2395 struct res_cq *cq)
2396{
2397 int err;
2398 struct res_mtt *orig_mtt;
2399 struct res_mtt *mtt;
2400 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2401 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2402
2403 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2404 if (err)
2405 return err;
2406
2407 if (orig_mtt != cq->mtt) {
2408 err = -EINVAL;
2409 goto ex_put;
2410 }
2411
2b8fb286 2412 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2413 if (err)
2414 goto ex_put;
2415
2416 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2417 if (err)
2418 goto ex_put1;
2419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2420 if (err)
2421 goto ex_put1;
2422 atomic_dec(&orig_mtt->ref_count);
2423 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2424 atomic_inc(&mtt->ref_count);
2425 cq->mtt = mtt;
2426 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2427 return 0;
2428
2429ex_put1:
2430 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2431ex_put:
2432 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2433
2434 return err;
2435
2436}
2437
2438int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2443{
2444 int cqn = vhcr->in_modifier;
2445 struct res_cq *cq;
2446 int err;
2447
2448 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2449 if (err)
2450 return err;
2451
2452 if (cq->com.from_state != RES_CQ_HW)
2453 goto ex_put;
2454
2455 if (vhcr->op_modifier == 0) {
2456 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2457 goto ex_put;
c82e9aa0
EC
2458 }
2459
2460 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2461ex_put:
2462 put_res(dev, slave, cqn, RES_CQ);
2463
2464 return err;
2465}
2466
c82e9aa0
EC
2467static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2468{
2469 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2470 int log_rq_stride = srqc->logstride & 7;
2471 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2472
2473 if (log_srq_size + log_rq_stride + 4 < page_shift)
2474 return 1;
2475
2476 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2477}
2478
2479int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2480 struct mlx4_vhcr *vhcr,
2481 struct mlx4_cmd_mailbox *inbox,
2482 struct mlx4_cmd_mailbox *outbox,
2483 struct mlx4_cmd_info *cmd)
2484{
2485 int err;
2486 int srqn = vhcr->in_modifier;
2487 struct res_mtt *mtt;
2488 struct res_srq *srq;
2489 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2490 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2491
2492 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2493 return -EINVAL;
2494
2495 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2496 if (err)
2497 return err;
2b8fb286 2498 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2499 if (err)
2500 goto ex_abort;
2501 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2502 mtt);
2503 if (err)
2504 goto ex_put_mtt;
2505
c82e9aa0
EC
2506 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2507 if (err)
2508 goto ex_put_mtt;
2509
2510 atomic_inc(&mtt->ref_count);
2511 srq->mtt = mtt;
2512 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2513 res_end_move(dev, slave, RES_SRQ, srqn);
2514 return 0;
2515
2516ex_put_mtt:
2517 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2518ex_abort:
2519 res_abort_move(dev, slave, RES_SRQ, srqn);
2520
2521 return err;
2522}
2523
2524int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2525 struct mlx4_vhcr *vhcr,
2526 struct mlx4_cmd_mailbox *inbox,
2527 struct mlx4_cmd_mailbox *outbox,
2528 struct mlx4_cmd_info *cmd)
2529{
2530 int err;
2531 int srqn = vhcr->in_modifier;
2532 struct res_srq *srq;
2533
2534 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2535 if (err)
2536 return err;
2537 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2538 if (err)
2539 goto ex_abort;
2540 atomic_dec(&srq->mtt->ref_count);
2541 if (srq->cq)
2542 atomic_dec(&srq->cq->ref_count);
2543 res_end_move(dev, slave, RES_SRQ, srqn);
2544
2545 return 0;
2546
2547ex_abort:
2548 res_abort_move(dev, slave, RES_SRQ, srqn);
2549
2550 return err;
2551}
2552
2553int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2554 struct mlx4_vhcr *vhcr,
2555 struct mlx4_cmd_mailbox *inbox,
2556 struct mlx4_cmd_mailbox *outbox,
2557 struct mlx4_cmd_info *cmd)
2558{
2559 int err;
2560 int srqn = vhcr->in_modifier;
2561 struct res_srq *srq;
2562
2563 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2564 if (err)
2565 return err;
2566 if (srq->com.from_state != RES_SRQ_HW) {
2567 err = -EBUSY;
2568 goto out;
2569 }
2570 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2571out:
2572 put_res(dev, slave, srqn, RES_SRQ);
2573 return err;
2574}
2575
2576int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2577 struct mlx4_vhcr *vhcr,
2578 struct mlx4_cmd_mailbox *inbox,
2579 struct mlx4_cmd_mailbox *outbox,
2580 struct mlx4_cmd_info *cmd)
2581{
2582 int err;
2583 int srqn = vhcr->in_modifier;
2584 struct res_srq *srq;
2585
2586 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2587 if (err)
2588 return err;
2589
2590 if (srq->com.from_state != RES_SRQ_HW) {
2591 err = -EBUSY;
2592 goto out;
2593 }
2594
2595 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2596out:
2597 put_res(dev, slave, srqn, RES_SRQ);
2598 return err;
2599}
2600
2601int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2602 struct mlx4_vhcr *vhcr,
2603 struct mlx4_cmd_mailbox *inbox,
2604 struct mlx4_cmd_mailbox *outbox,
2605 struct mlx4_cmd_info *cmd)
2606{
2607 int err;
2608 int qpn = vhcr->in_modifier & 0x7fffff;
2609 struct res_qp *qp;
2610
2611 err = get_res(dev, slave, qpn, RES_QP, &qp);
2612 if (err)
2613 return err;
2614 if (qp->com.from_state != RES_QP_HW) {
2615 err = -EBUSY;
2616 goto out;
2617 }
2618
2619 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2620out:
2621 put_res(dev, slave, qpn, RES_QP);
2622 return err;
2623}
2624
2625int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2626 struct mlx4_vhcr *vhcr,
2627 struct mlx4_cmd_mailbox *inbox,
2628 struct mlx4_cmd_mailbox *outbox,
2629 struct mlx4_cmd_info *cmd)
2630{
2631 struct mlx4_qp_context *qpc = inbox->buf + 8;
2632
2633 update_ud_gid(dev, qpc, (u8)slave);
2634
2635 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2636}
2637
2638int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2639 struct mlx4_vhcr *vhcr,
2640 struct mlx4_cmd_mailbox *inbox,
2641 struct mlx4_cmd_mailbox *outbox,
2642 struct mlx4_cmd_info *cmd)
2643{
2644 int err;
2645 int qpn = vhcr->in_modifier & 0x7fffff;
2646 struct res_qp *qp;
2647
2648 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2649 if (err)
2650 return err;
2651 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2652 if (err)
2653 goto ex_abort;
2654
2655 atomic_dec(&qp->mtt->ref_count);
2656 atomic_dec(&qp->rcq->ref_count);
2657 atomic_dec(&qp->scq->ref_count);
2658 if (qp->srq)
2659 atomic_dec(&qp->srq->ref_count);
2660 res_end_move(dev, slave, RES_QP, qpn);
2661 return 0;
2662
2663ex_abort:
2664 res_abort_move(dev, slave, RES_QP, qpn);
2665
2666 return err;
2667}
2668
2669static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2670 struct res_qp *rqp, u8 *gid)
2671{
2672 struct res_gid *res;
2673
2674 list_for_each_entry(res, &rqp->mcg_list, list) {
2675 if (!memcmp(res->gid, gid, 16))
2676 return res;
2677 }
2678 return NULL;
2679}
2680
2681static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2682 u8 *gid, enum mlx4_protocol prot,
2683 enum mlx4_steer_type steer)
c82e9aa0
EC
2684{
2685 struct res_gid *res;
2686 int err;
2687
2688 res = kzalloc(sizeof *res, GFP_KERNEL);
2689 if (!res)
2690 return -ENOMEM;
2691
2692 spin_lock_irq(&rqp->mcg_spl);
2693 if (find_gid(dev, slave, rqp, gid)) {
2694 kfree(res);
2695 err = -EEXIST;
2696 } else {
2697 memcpy(res->gid, gid, 16);
2698 res->prot = prot;
9f5b6c63 2699 res->steer = steer;
c82e9aa0
EC
2700 list_add_tail(&res->list, &rqp->mcg_list);
2701 err = 0;
2702 }
2703 spin_unlock_irq(&rqp->mcg_spl);
2704
2705 return err;
2706}
2707
2708static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2709 u8 *gid, enum mlx4_protocol prot,
2710 enum mlx4_steer_type steer)
c82e9aa0
EC
2711{
2712 struct res_gid *res;
2713 int err;
2714
2715 spin_lock_irq(&rqp->mcg_spl);
2716 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2717 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2718 err = -EINVAL;
2719 else {
2720 list_del(&res->list);
2721 kfree(res);
2722 err = 0;
2723 }
2724 spin_unlock_irq(&rqp->mcg_spl);
2725
2726 return err;
2727}
2728
2729int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2730 struct mlx4_vhcr *vhcr,
2731 struct mlx4_cmd_mailbox *inbox,
2732 struct mlx4_cmd_mailbox *outbox,
2733 struct mlx4_cmd_info *cmd)
2734{
2735 struct mlx4_qp qp; /* dummy for calling attach/detach */
2736 u8 *gid = inbox->buf;
2737 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 2738 int err;
c82e9aa0
EC
2739 int qpn;
2740 struct res_qp *rqp;
2741 int attach = vhcr->op_modifier;
2742 int block_loopback = vhcr->in_modifier >> 31;
2743 u8 steer_type_mask = 2;
75c6062c 2744 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
2745
2746 qpn = vhcr->in_modifier & 0xffffff;
2747 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2748 if (err)
2749 return err;
2750
2751 qp.qpn = qpn;
2752 if (attach) {
9f5b6c63 2753 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2754 if (err)
2755 goto ex_put;
2756
2757 err = mlx4_qp_attach_common(dev, &qp, gid,
2758 block_loopback, prot, type);
2759 if (err)
2760 goto ex_rem;
2761 } else {
9f5b6c63 2762 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2763 if (err)
2764 goto ex_put;
2765 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2766 }
2767
2768 put_res(dev, slave, qpn, RES_QP);
2769 return 0;
2770
2771ex_rem:
2772 /* ignore error return below, already in error */
162344ed 2773 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2774ex_put:
2775 put_res(dev, slave, qpn, RES_QP);
2776
2777 return err;
2778}
2779
7fb40f87
HHZ
2780/*
2781 * MAC validation for Flow Steering rules.
2782 * VF can attach rules only with a mac address which is assigned to it.
2783 */
2784static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2785 struct list_head *rlist)
2786{
2787 struct mac_res *res, *tmp;
2788 __be64 be_mac;
2789
2790 /* make sure it isn't multicast or broadcast mac*/
2791 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2792 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2793 list_for_each_entry_safe(res, tmp, rlist, list) {
2794 be_mac = cpu_to_be64(res->mac << 16);
2795 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
2796 return 0;
2797 }
2798 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2799 eth_header->eth.dst_mac, slave);
2800 return -EINVAL;
2801 }
2802 return 0;
2803}
2804
2805/*
2806 * In case of missing eth header, append eth header with a MAC address
2807 * assigned to the VF.
2808 */
2809static int add_eth_header(struct mlx4_dev *dev, int slave,
2810 struct mlx4_cmd_mailbox *inbox,
2811 struct list_head *rlist, int header_id)
2812{
2813 struct mac_res *res, *tmp;
2814 u8 port;
2815 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2816 struct mlx4_net_trans_rule_hw_eth *eth_header;
2817 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
2818 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
2819 __be64 be_mac = 0;
2820 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
2821
2822 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2823 port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
2824 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
2825
2826 /* Clear a space in the inbox for eth header */
2827 switch (header_id) {
2828 case MLX4_NET_TRANS_RULE_ID_IPV4:
2829 ip_header =
2830 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
2831 memmove(ip_header, eth_header,
2832 sizeof(*ip_header) + sizeof(*l4_header));
2833 break;
2834 case MLX4_NET_TRANS_RULE_ID_TCP:
2835 case MLX4_NET_TRANS_RULE_ID_UDP:
2836 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
2837 (eth_header + 1);
2838 memmove(l4_header, eth_header, sizeof(*l4_header));
2839 break;
2840 default:
2841 return -EINVAL;
2842 }
2843 list_for_each_entry_safe(res, tmp, rlist, list) {
2844 if (port == res->port) {
2845 be_mac = cpu_to_be64(res->mac << 16);
2846 break;
2847 }
2848 }
2849 if (!be_mac) {
2850 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
2851 port);
2852 return -EINVAL;
2853 }
2854
2855 memset(eth_header, 0, sizeof(*eth_header));
2856 eth_header->size = sizeof(*eth_header) >> 2;
2857 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
2858 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
2859 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
2860
2861 return 0;
2862
2863}
2864
8fcfb4db
HHZ
2865int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2866 struct mlx4_vhcr *vhcr,
2867 struct mlx4_cmd_mailbox *inbox,
2868 struct mlx4_cmd_mailbox *outbox,
2869 struct mlx4_cmd_info *cmd)
2870{
7fb40f87
HHZ
2871
2872 struct mlx4_priv *priv = mlx4_priv(dev);
2873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2874 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 2875 int err;
7fb40f87
HHZ
2876 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2877 struct _rule_hw *rule_header;
2878 int header_id;
1b9c6b06 2879
0ff1fb65
HHZ
2880 if (dev->caps.steering_mode !=
2881 MLX4_STEERING_MODE_DEVICE_MANAGED)
2882 return -EOPNOTSUPP;
1b9c6b06 2883
7fb40f87
HHZ
2884 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2885 rule_header = (struct _rule_hw *)(ctrl + 1);
2886 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
2887
2888 switch (header_id) {
2889 case MLX4_NET_TRANS_RULE_ID_ETH:
2890 if (validate_eth_header_mac(slave, rule_header, rlist))
2891 return -EINVAL;
2892 break;
2893 case MLX4_NET_TRANS_RULE_ID_IPV4:
2894 case MLX4_NET_TRANS_RULE_ID_TCP:
2895 case MLX4_NET_TRANS_RULE_ID_UDP:
2896 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
2897 if (add_eth_header(dev, slave, inbox, rlist, header_id))
2898 return -EINVAL;
2899 vhcr->in_modifier +=
2900 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
2901 break;
2902 default:
2903 pr_err("Corrupted mailbox.\n");
2904 return -EINVAL;
2905 }
2906
1b9c6b06
HHZ
2907 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2908 vhcr->in_modifier, 0,
2909 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2910 MLX4_CMD_NATIVE);
2911 if (err)
2912 return err;
2913
2914 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2915 if (err) {
2916 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2917 /* detach rule*/
2918 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2919 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2920 MLX4_CMD_NATIVE);
2921 }
2922 return err;
8fcfb4db
HHZ
2923}
2924
2925int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2926 struct mlx4_vhcr *vhcr,
2927 struct mlx4_cmd_mailbox *inbox,
2928 struct mlx4_cmd_mailbox *outbox,
2929 struct mlx4_cmd_info *cmd)
2930{
1b9c6b06
HHZ
2931 int err;
2932
0ff1fb65
HHZ
2933 if (dev->caps.steering_mode !=
2934 MLX4_STEERING_MODE_DEVICE_MANAGED)
2935 return -EOPNOTSUPP;
1b9c6b06
HHZ
2936
2937 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2938 if (err) {
2939 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2940 return err;
2941 }
2942
2943 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2944 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2945 MLX4_CMD_NATIVE);
2946 return err;
8fcfb4db
HHZ
2947}
2948
c82e9aa0
EC
2949enum {
2950 BUSY_MAX_RETRIES = 10
2951};
2952
2953int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2954 struct mlx4_vhcr *vhcr,
2955 struct mlx4_cmd_mailbox *inbox,
2956 struct mlx4_cmd_mailbox *outbox,
2957 struct mlx4_cmd_info *cmd)
2958{
2959 int err;
2960 int index = vhcr->in_modifier & 0xffff;
2961
2962 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2963 if (err)
2964 return err;
2965
2966 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2967 put_res(dev, slave, index, RES_COUNTER);
2968 return err;
2969}
2970
2971static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2972{
2973 struct res_gid *rgid;
2974 struct res_gid *tmp;
c82e9aa0
EC
2975 struct mlx4_qp qp; /* dummy for calling attach/detach */
2976
2977 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2978 qp.qpn = rqp->local_qpn;
162344ed
OG
2979 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2980 rgid->steer);
c82e9aa0
EC
2981 list_del(&rgid->list);
2982 kfree(rgid);
2983 }
2984}
2985
2986static int _move_all_busy(struct mlx4_dev *dev, int slave,
2987 enum mlx4_resource type, int print)
2988{
2989 struct mlx4_priv *priv = mlx4_priv(dev);
2990 struct mlx4_resource_tracker *tracker =
2991 &priv->mfunc.master.res_tracker;
2992 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2993 struct res_common *r;
2994 struct res_common *tmp;
2995 int busy;
2996
2997 busy = 0;
2998 spin_lock_irq(mlx4_tlock(dev));
2999 list_for_each_entry_safe(r, tmp, rlist, list) {
3000 if (r->owner == slave) {
3001 if (!r->removing) {
3002 if (r->state == RES_ANY_BUSY) {
3003 if (print)
3004 mlx4_dbg(dev,
aa1ec3dd 3005 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3006 ResourceType(type),
3007 r->res_id);
3008 ++busy;
3009 } else {
3010 r->from_state = r->state;
3011 r->state = RES_ANY_BUSY;
3012 r->removing = 1;
3013 }
3014 }
3015 }
3016 }
3017 spin_unlock_irq(mlx4_tlock(dev));
3018
3019 return busy;
3020}
3021
3022static int move_all_busy(struct mlx4_dev *dev, int slave,
3023 enum mlx4_resource type)
3024{
3025 unsigned long begin;
3026 int busy;
3027
3028 begin = jiffies;
3029 do {
3030 busy = _move_all_busy(dev, slave, type, 0);
3031 if (time_after(jiffies, begin + 5 * HZ))
3032 break;
3033 if (busy)
3034 cond_resched();
3035 } while (busy);
3036
3037 if (busy)
3038 busy = _move_all_busy(dev, slave, type, 1);
3039
3040 return busy;
3041}
3042static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3043{
3044 struct mlx4_priv *priv = mlx4_priv(dev);
3045 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3046 struct list_head *qp_list =
3047 &tracker->slave_list[slave].res_list[RES_QP];
3048 struct res_qp *qp;
3049 struct res_qp *tmp;
3050 int state;
3051 u64 in_param;
3052 int qpn;
3053 int err;
3054
3055 err = move_all_busy(dev, slave, RES_QP);
3056 if (err)
3057 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3058 "for slave %d\n", slave);
3059
3060 spin_lock_irq(mlx4_tlock(dev));
3061 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3062 spin_unlock_irq(mlx4_tlock(dev));
3063 if (qp->com.owner == slave) {
3064 qpn = qp->com.res_id;
3065 detach_qp(dev, slave, qp);
3066 state = qp->com.from_state;
3067 while (state != 0) {
3068 switch (state) {
3069 case RES_QP_RESERVED:
3070 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3071 rb_erase(&qp->com.node,
3072 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3073 list_del(&qp->com.list);
3074 spin_unlock_irq(mlx4_tlock(dev));
3075 kfree(qp);
3076 state = 0;
3077 break;
3078 case RES_QP_MAPPED:
3079 if (!valid_reserved(dev, slave, qpn))
3080 __mlx4_qp_free_icm(dev, qpn);
3081 state = RES_QP_RESERVED;
3082 break;
3083 case RES_QP_HW:
3084 in_param = slave;
3085 err = mlx4_cmd(dev, in_param,
3086 qp->local_qpn, 2,
3087 MLX4_CMD_2RST_QP,
3088 MLX4_CMD_TIME_CLASS_A,
3089 MLX4_CMD_NATIVE);
3090 if (err)
3091 mlx4_dbg(dev, "rem_slave_qps: failed"
3092 " to move slave %d qpn %d to"
3093 " reset\n", slave,
3094 qp->local_qpn);
3095 atomic_dec(&qp->rcq->ref_count);
3096 atomic_dec(&qp->scq->ref_count);
3097 atomic_dec(&qp->mtt->ref_count);
3098 if (qp->srq)
3099 atomic_dec(&qp->srq->ref_count);
3100 state = RES_QP_MAPPED;
3101 break;
3102 default:
3103 state = 0;
3104 }
3105 }
3106 }
3107 spin_lock_irq(mlx4_tlock(dev));
3108 }
3109 spin_unlock_irq(mlx4_tlock(dev));
3110}
3111
3112static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3113{
3114 struct mlx4_priv *priv = mlx4_priv(dev);
3115 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3116 struct list_head *srq_list =
3117 &tracker->slave_list[slave].res_list[RES_SRQ];
3118 struct res_srq *srq;
3119 struct res_srq *tmp;
3120 int state;
3121 u64 in_param;
3122 LIST_HEAD(tlist);
3123 int srqn;
3124 int err;
3125
3126 err = move_all_busy(dev, slave, RES_SRQ);
3127 if (err)
3128 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3129 "busy for slave %d\n", slave);
3130
3131 spin_lock_irq(mlx4_tlock(dev));
3132 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3133 spin_unlock_irq(mlx4_tlock(dev));
3134 if (srq->com.owner == slave) {
3135 srqn = srq->com.res_id;
3136 state = srq->com.from_state;
3137 while (state != 0) {
3138 switch (state) {
3139 case RES_SRQ_ALLOCATED:
3140 __mlx4_srq_free_icm(dev, srqn);
3141 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3142 rb_erase(&srq->com.node,
3143 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3144 list_del(&srq->com.list);
3145 spin_unlock_irq(mlx4_tlock(dev));
3146 kfree(srq);
3147 state = 0;
3148 break;
3149
3150 case RES_SRQ_HW:
3151 in_param = slave;
3152 err = mlx4_cmd(dev, in_param, srqn, 1,
3153 MLX4_CMD_HW2SW_SRQ,
3154 MLX4_CMD_TIME_CLASS_A,
3155 MLX4_CMD_NATIVE);
3156 if (err)
3157 mlx4_dbg(dev, "rem_slave_srqs: failed"
3158 " to move slave %d srq %d to"
3159 " SW ownership\n",
3160 slave, srqn);
3161
3162 atomic_dec(&srq->mtt->ref_count);
3163 if (srq->cq)
3164 atomic_dec(&srq->cq->ref_count);
3165 state = RES_SRQ_ALLOCATED;
3166 break;
3167
3168 default:
3169 state = 0;
3170 }
3171 }
3172 }
3173 spin_lock_irq(mlx4_tlock(dev));
3174 }
3175 spin_unlock_irq(mlx4_tlock(dev));
3176}
3177
3178static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3179{
3180 struct mlx4_priv *priv = mlx4_priv(dev);
3181 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3182 struct list_head *cq_list =
3183 &tracker->slave_list[slave].res_list[RES_CQ];
3184 struct res_cq *cq;
3185 struct res_cq *tmp;
3186 int state;
3187 u64 in_param;
3188 LIST_HEAD(tlist);
3189 int cqn;
3190 int err;
3191
3192 err = move_all_busy(dev, slave, RES_CQ);
3193 if (err)
3194 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3195 "busy for slave %d\n", slave);
3196
3197 spin_lock_irq(mlx4_tlock(dev));
3198 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3199 spin_unlock_irq(mlx4_tlock(dev));
3200 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3201 cqn = cq->com.res_id;
3202 state = cq->com.from_state;
3203 while (state != 0) {
3204 switch (state) {
3205 case RES_CQ_ALLOCATED:
3206 __mlx4_cq_free_icm(dev, cqn);
3207 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3208 rb_erase(&cq->com.node,
3209 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3210 list_del(&cq->com.list);
3211 spin_unlock_irq(mlx4_tlock(dev));
3212 kfree(cq);
3213 state = 0;
3214 break;
3215
3216 case RES_CQ_HW:
3217 in_param = slave;
3218 err = mlx4_cmd(dev, in_param, cqn, 1,
3219 MLX4_CMD_HW2SW_CQ,
3220 MLX4_CMD_TIME_CLASS_A,
3221 MLX4_CMD_NATIVE);
3222 if (err)
3223 mlx4_dbg(dev, "rem_slave_cqs: failed"
3224 " to move slave %d cq %d to"
3225 " SW ownership\n",
3226 slave, cqn);
3227 atomic_dec(&cq->mtt->ref_count);
3228 state = RES_CQ_ALLOCATED;
3229 break;
3230
3231 default:
3232 state = 0;
3233 }
3234 }
3235 }
3236 spin_lock_irq(mlx4_tlock(dev));
3237 }
3238 spin_unlock_irq(mlx4_tlock(dev));
3239}
3240
3241static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3242{
3243 struct mlx4_priv *priv = mlx4_priv(dev);
3244 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3245 struct list_head *mpt_list =
3246 &tracker->slave_list[slave].res_list[RES_MPT];
3247 struct res_mpt *mpt;
3248 struct res_mpt *tmp;
3249 int state;
3250 u64 in_param;
3251 LIST_HEAD(tlist);
3252 int mptn;
3253 int err;
3254
3255 err = move_all_busy(dev, slave, RES_MPT);
3256 if (err)
3257 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3258 "busy for slave %d\n", slave);
3259
3260 spin_lock_irq(mlx4_tlock(dev));
3261 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (mpt->com.owner == slave) {
3264 mptn = mpt->com.res_id;
3265 state = mpt->com.from_state;
3266 while (state != 0) {
3267 switch (state) {
3268 case RES_MPT_RESERVED:
3269 __mlx4_mr_release(dev, mpt->key);
3270 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3271 rb_erase(&mpt->com.node,
3272 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3273 list_del(&mpt->com.list);
3274 spin_unlock_irq(mlx4_tlock(dev));
3275 kfree(mpt);
3276 state = 0;
3277 break;
3278
3279 case RES_MPT_MAPPED:
3280 __mlx4_mr_free_icm(dev, mpt->key);
3281 state = RES_MPT_RESERVED;
3282 break;
3283
3284 case RES_MPT_HW:
3285 in_param = slave;
3286 err = mlx4_cmd(dev, in_param, mptn, 0,
3287 MLX4_CMD_HW2SW_MPT,
3288 MLX4_CMD_TIME_CLASS_A,
3289 MLX4_CMD_NATIVE);
3290 if (err)
3291 mlx4_dbg(dev, "rem_slave_mrs: failed"
3292 " to move slave %d mpt %d to"
3293 " SW ownership\n",
3294 slave, mptn);
3295 if (mpt->mtt)
3296 atomic_dec(&mpt->mtt->ref_count);
3297 state = RES_MPT_MAPPED;
3298 break;
3299 default:
3300 state = 0;
3301 }
3302 }
3303 }
3304 spin_lock_irq(mlx4_tlock(dev));
3305 }
3306 spin_unlock_irq(mlx4_tlock(dev));
3307}
3308
3309static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3310{
3311 struct mlx4_priv *priv = mlx4_priv(dev);
3312 struct mlx4_resource_tracker *tracker =
3313 &priv->mfunc.master.res_tracker;
3314 struct list_head *mtt_list =
3315 &tracker->slave_list[slave].res_list[RES_MTT];
3316 struct res_mtt *mtt;
3317 struct res_mtt *tmp;
3318 int state;
3319 LIST_HEAD(tlist);
3320 int base;
3321 int err;
3322
3323 err = move_all_busy(dev, slave, RES_MTT);
3324 if (err)
3325 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3326 "busy for slave %d\n", slave);
3327
3328 spin_lock_irq(mlx4_tlock(dev));
3329 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3330 spin_unlock_irq(mlx4_tlock(dev));
3331 if (mtt->com.owner == slave) {
3332 base = mtt->com.res_id;
3333 state = mtt->com.from_state;
3334 while (state != 0) {
3335 switch (state) {
3336 case RES_MTT_ALLOCATED:
3337 __mlx4_free_mtt_range(dev, base,
3338 mtt->order);
3339 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3340 rb_erase(&mtt->com.node,
3341 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3342 list_del(&mtt->com.list);
3343 spin_unlock_irq(mlx4_tlock(dev));
3344 kfree(mtt);
3345 state = 0;
3346 break;
3347
3348 default:
3349 state = 0;
3350 }
3351 }
3352 }
3353 spin_lock_irq(mlx4_tlock(dev));
3354 }
3355 spin_unlock_irq(mlx4_tlock(dev));
3356}
3357
1b9c6b06
HHZ
3358static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3359{
3360 struct mlx4_priv *priv = mlx4_priv(dev);
3361 struct mlx4_resource_tracker *tracker =
3362 &priv->mfunc.master.res_tracker;
3363 struct list_head *fs_rule_list =
3364 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3365 struct res_fs_rule *fs_rule;
3366 struct res_fs_rule *tmp;
3367 int state;
3368 u64 base;
3369 int err;
3370
3371 err = move_all_busy(dev, slave, RES_FS_RULE);
3372 if (err)
3373 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3374 slave);
3375
3376 spin_lock_irq(mlx4_tlock(dev));
3377 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3378 spin_unlock_irq(mlx4_tlock(dev));
3379 if (fs_rule->com.owner == slave) {
3380 base = fs_rule->com.res_id;
3381 state = fs_rule->com.from_state;
3382 while (state != 0) {
3383 switch (state) {
3384 case RES_FS_RULE_ALLOCATED:
3385 /* detach rule */
3386 err = mlx4_cmd(dev, base, 0, 0,
3387 MLX4_QP_FLOW_STEERING_DETACH,
3388 MLX4_CMD_TIME_CLASS_A,
3389 MLX4_CMD_NATIVE);
3390
3391 spin_lock_irq(mlx4_tlock(dev));
3392 rb_erase(&fs_rule->com.node,
3393 &tracker->res_tree[RES_FS_RULE]);
3394 list_del(&fs_rule->com.list);
3395 spin_unlock_irq(mlx4_tlock(dev));
3396 kfree(fs_rule);
3397 state = 0;
3398 break;
3399
3400 default:
3401 state = 0;
3402 }
3403 }
3404 }
3405 spin_lock_irq(mlx4_tlock(dev));
3406 }
3407 spin_unlock_irq(mlx4_tlock(dev));
3408}
3409
c82e9aa0
EC
3410static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3411{
3412 struct mlx4_priv *priv = mlx4_priv(dev);
3413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3414 struct list_head *eq_list =
3415 &tracker->slave_list[slave].res_list[RES_EQ];
3416 struct res_eq *eq;
3417 struct res_eq *tmp;
3418 int err;
3419 int state;
3420 LIST_HEAD(tlist);
3421 int eqn;
3422 struct mlx4_cmd_mailbox *mailbox;
3423
3424 err = move_all_busy(dev, slave, RES_EQ);
3425 if (err)
3426 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3427 "busy for slave %d\n", slave);
3428
3429 spin_lock_irq(mlx4_tlock(dev));
3430 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3431 spin_unlock_irq(mlx4_tlock(dev));
3432 if (eq->com.owner == slave) {
3433 eqn = eq->com.res_id;
3434 state = eq->com.from_state;
3435 while (state != 0) {
3436 switch (state) {
3437 case RES_EQ_RESERVED:
3438 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3439 rb_erase(&eq->com.node,
3440 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3441 list_del(&eq->com.list);
3442 spin_unlock_irq(mlx4_tlock(dev));
3443 kfree(eq);
3444 state = 0;
3445 break;
3446
3447 case RES_EQ_HW:
3448 mailbox = mlx4_alloc_cmd_mailbox(dev);
3449 if (IS_ERR(mailbox)) {
3450 cond_resched();
3451 continue;
3452 }
3453 err = mlx4_cmd_box(dev, slave, 0,
3454 eqn & 0xff, 0,
3455 MLX4_CMD_HW2SW_EQ,
3456 MLX4_CMD_TIME_CLASS_A,
3457 MLX4_CMD_NATIVE);
eb71d0d6
JM
3458 if (err)
3459 mlx4_dbg(dev, "rem_slave_eqs: failed"
3460 " to move slave %d eqs %d to"
3461 " SW ownership\n", slave, eqn);
c82e9aa0 3462 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3463 atomic_dec(&eq->mtt->ref_count);
3464 state = RES_EQ_RESERVED;
c82e9aa0
EC
3465 break;
3466
3467 default:
3468 state = 0;
3469 }
3470 }
3471 }
3472 spin_lock_irq(mlx4_tlock(dev));
3473 }
3474 spin_unlock_irq(mlx4_tlock(dev));
3475}
3476
ba062d52
JM
3477static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3478{
3479 struct mlx4_priv *priv = mlx4_priv(dev);
3480 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3481 struct list_head *counter_list =
3482 &tracker->slave_list[slave].res_list[RES_COUNTER];
3483 struct res_counter *counter;
3484 struct res_counter *tmp;
3485 int err;
3486 int index;
3487
3488 err = move_all_busy(dev, slave, RES_COUNTER);
3489 if (err)
3490 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3491 "busy for slave %d\n", slave);
3492
3493 spin_lock_irq(mlx4_tlock(dev));
3494 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3495 if (counter->com.owner == slave) {
3496 index = counter->com.res_id;
4af1c048
HHZ
3497 rb_erase(&counter->com.node,
3498 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3499 list_del(&counter->com.list);
3500 kfree(counter);
3501 __mlx4_counter_free(dev, index);
3502 }
3503 }
3504 spin_unlock_irq(mlx4_tlock(dev));
3505}
3506
3507static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3508{
3509 struct mlx4_priv *priv = mlx4_priv(dev);
3510 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3511 struct list_head *xrcdn_list =
3512 &tracker->slave_list[slave].res_list[RES_XRCD];
3513 struct res_xrcdn *xrcd;
3514 struct res_xrcdn *tmp;
3515 int err;
3516 int xrcdn;
3517
3518 err = move_all_busy(dev, slave, RES_XRCD);
3519 if (err)
3520 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3521 "busy for slave %d\n", slave);
3522
3523 spin_lock_irq(mlx4_tlock(dev));
3524 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3525 if (xrcd->com.owner == slave) {
3526 xrcdn = xrcd->com.res_id;
4af1c048 3527 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3528 list_del(&xrcd->com.list);
3529 kfree(xrcd);
3530 __mlx4_xrcd_free(dev, xrcdn);
3531 }
3532 }
3533 spin_unlock_irq(mlx4_tlock(dev));
3534}
3535
c82e9aa0
EC
3536void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3537{
3538 struct mlx4_priv *priv = mlx4_priv(dev);
3539
3540 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3541 /*VLAN*/
3542 rem_slave_macs(dev, slave);
3543 rem_slave_qps(dev, slave);
3544 rem_slave_srqs(dev, slave);
3545 rem_slave_cqs(dev, slave);
3546 rem_slave_mrs(dev, slave);
3547 rem_slave_eqs(dev, slave);
3548 rem_slave_mtts(dev, slave);
ba062d52
JM
3549 rem_slave_counters(dev, slave);
3550 rem_slave_xrcdns(dev, slave);
1b9c6b06 3551 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
3552 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3553}
This page took 0.233608 seconds and 5 git commands to generate.