mlx4_core: Fix integer overflow issues around MTT table
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
c82e9aa0
EC
45
46#include "mlx4.h"
47#include "fw.h"
48
49#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
50
51struct mac_res {
52 struct list_head list;
53 u64 mac;
54 u8 port;
55};
56
57struct res_common {
58 struct list_head list;
4af1c048 59 struct rb_node node;
aa1ec3dd 60 u64 res_id;
c82e9aa0
EC
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66};
67
68enum {
69 RES_ANY_BUSY = 1
70};
71
72struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
9f5b6c63 76 enum mlx4_steer_type steer;
c82e9aa0
EC
77};
78
79enum res_qp_states {
80 RES_QP_BUSY = RES_ANY_BUSY,
81
82 /* QP number was allocated */
83 RES_QP_RESERVED,
84
85 /* ICM memory for QP context was mapped */
86 RES_QP_MAPPED,
87
88 /* QP is in hw ownership */
89 RES_QP_HW
90};
91
c82e9aa0
EC
92struct res_qp {
93 struct res_common com;
94 struct res_mtt *mtt;
95 struct res_cq *rcq;
96 struct res_cq *scq;
97 struct res_srq *srq;
98 struct list_head mcg_list;
99 spinlock_t mcg_spl;
100 int local_qpn;
101};
102
103enum res_mtt_states {
104 RES_MTT_BUSY = RES_ANY_BUSY,
105 RES_MTT_ALLOCATED,
106};
107
108static inline const char *mtt_states_str(enum res_mtt_states state)
109{
110 switch (state) {
111 case RES_MTT_BUSY: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
114 }
115}
116
117struct res_mtt {
118 struct res_common com;
119 int order;
120 atomic_t ref_count;
121};
122
123enum res_mpt_states {
124 RES_MPT_BUSY = RES_ANY_BUSY,
125 RES_MPT_RESERVED,
126 RES_MPT_MAPPED,
127 RES_MPT_HW,
128};
129
130struct res_mpt {
131 struct res_common com;
132 struct res_mtt *mtt;
133 int key;
134};
135
136enum res_eq_states {
137 RES_EQ_BUSY = RES_ANY_BUSY,
138 RES_EQ_RESERVED,
139 RES_EQ_HW,
140};
141
142struct res_eq {
143 struct res_common com;
144 struct res_mtt *mtt;
145};
146
147enum res_cq_states {
148 RES_CQ_BUSY = RES_ANY_BUSY,
149 RES_CQ_ALLOCATED,
150 RES_CQ_HW,
151};
152
153struct res_cq {
154 struct res_common com;
155 struct res_mtt *mtt;
156 atomic_t ref_count;
157};
158
159enum res_srq_states {
160 RES_SRQ_BUSY = RES_ANY_BUSY,
161 RES_SRQ_ALLOCATED,
162 RES_SRQ_HW,
163};
164
c82e9aa0
EC
165struct res_srq {
166 struct res_common com;
167 struct res_mtt *mtt;
168 struct res_cq *cq;
169 atomic_t ref_count;
170};
171
172enum res_counter_states {
173 RES_COUNTER_BUSY = RES_ANY_BUSY,
174 RES_COUNTER_ALLOCATED,
175};
176
c82e9aa0
EC
177struct res_counter {
178 struct res_common com;
179 int port;
180};
181
ba062d52
JM
182enum res_xrcdn_states {
183 RES_XRCD_BUSY = RES_ANY_BUSY,
184 RES_XRCD_ALLOCATED,
185};
186
187struct res_xrcdn {
188 struct res_common com;
189 int port;
190};
191
1b9c6b06
HHZ
192enum res_fs_rule_states {
193 RES_FS_RULE_BUSY = RES_ANY_BUSY,
194 RES_FS_RULE_ALLOCATED,
195};
196
197struct res_fs_rule {
198 struct res_common com;
199};
200
4af1c048
HHZ
201static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
202{
203 struct rb_node *node = root->rb_node;
204
205 while (node) {
206 struct res_common *res = container_of(node, struct res_common,
207 node);
208
209 if (res_id < res->res_id)
210 node = node->rb_left;
211 else if (res_id > res->res_id)
212 node = node->rb_right;
213 else
214 return res;
215 }
216 return NULL;
217}
218
219static int res_tracker_insert(struct rb_root *root, struct res_common *res)
220{
221 struct rb_node **new = &(root->rb_node), *parent = NULL;
222
223 /* Figure out where to put new node */
224 while (*new) {
225 struct res_common *this = container_of(*new, struct res_common,
226 node);
227
228 parent = *new;
229 if (res->res_id < this->res_id)
230 new = &((*new)->rb_left);
231 else if (res->res_id > this->res_id)
232 new = &((*new)->rb_right);
233 else
234 return -EEXIST;
235 }
236
237 /* Add new node and rebalance tree. */
238 rb_link_node(&res->node, parent, new);
239 rb_insert_color(&res->node, root);
240
241 return 0;
242}
243
c82e9aa0
EC
244/* For Debug uses */
245static const char *ResourceType(enum mlx4_resource rt)
246{
247 switch (rt) {
248 case RES_QP: return "RES_QP";
249 case RES_CQ: return "RES_CQ";
250 case RES_SRQ: return "RES_SRQ";
251 case RES_MPT: return "RES_MPT";
252 case RES_MTT: return "RES_MTT";
253 case RES_MAC: return "RES_MAC";
254 case RES_EQ: return "RES_EQ";
255 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 256 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 257 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
258 default: return "Unknown resource type !!!";
259 };
260}
261
c82e9aa0
EC
262int mlx4_init_resource_tracker(struct mlx4_dev *dev)
263{
264 struct mlx4_priv *priv = mlx4_priv(dev);
265 int i;
266 int t;
267
268 priv->mfunc.master.res_tracker.slave_list =
269 kzalloc(dev->num_slaves * sizeof(struct slave_list),
270 GFP_KERNEL);
271 if (!priv->mfunc.master.res_tracker.slave_list)
272 return -ENOMEM;
273
274 for (i = 0 ; i < dev->num_slaves; i++) {
275 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
276 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
277 slave_list[i].res_list[t]);
278 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
279 }
280
281 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
282 dev->num_slaves);
283 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 284 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
285
286 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
287 return 0 ;
288}
289
b8924951
JM
290void mlx4_free_resource_tracker(struct mlx4_dev *dev,
291 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
292{
293 struct mlx4_priv *priv = mlx4_priv(dev);
294 int i;
295
296 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
297 if (type != RES_TR_FREE_STRUCTS_ONLY)
298 for (i = 0 ; i < dev->num_slaves; i++)
299 if (type == RES_TR_FREE_ALL ||
300 dev->caps.function != i)
301 mlx4_delete_all_resources_for_slave(dev, i);
302
303 if (type != RES_TR_FREE_SLAVES_ONLY) {
304 kfree(priv->mfunc.master.res_tracker.slave_list);
305 priv->mfunc.master.res_tracker.slave_list = NULL;
306 }
c82e9aa0
EC
307 }
308}
309
310static void update_ud_gid(struct mlx4_dev *dev,
311 struct mlx4_qp_context *qp_ctx, u8 slave)
312{
313 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
314
315 if (MLX4_QP_ST_UD == ts)
316 qp_ctx->pri_path.mgid_index = 0x80 | slave;
317
318 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
319 slave, qp_ctx->pri_path.mgid_index);
320}
321
322static int mpt_mask(struct mlx4_dev *dev)
323{
324 return dev->caps.num_mpts - 1;
325}
326
327static void *find_res(struct mlx4_dev *dev, int res_id,
328 enum mlx4_resource type)
329{
330 struct mlx4_priv *priv = mlx4_priv(dev);
331
4af1c048
HHZ
332 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
333 res_id);
c82e9aa0
EC
334}
335
aa1ec3dd 336static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
337 enum mlx4_resource type,
338 void *res)
339{
340 struct res_common *r;
341 int err = 0;
342
343 spin_lock_irq(mlx4_tlock(dev));
344 r = find_res(dev, res_id, type);
345 if (!r) {
346 err = -ENONET;
347 goto exit;
348 }
349
350 if (r->state == RES_ANY_BUSY) {
351 err = -EBUSY;
352 goto exit;
353 }
354
355 if (r->owner != slave) {
356 err = -EPERM;
357 goto exit;
358 }
359
360 r->from_state = r->state;
361 r->state = RES_ANY_BUSY;
aa1ec3dd 362 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
c82e9aa0
EC
363 ResourceType(type), r->res_id);
364
365 if (res)
366 *((struct res_common **)res) = r;
367
368exit:
369 spin_unlock_irq(mlx4_tlock(dev));
370 return err;
371}
372
373int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
374 enum mlx4_resource type,
aa1ec3dd 375 u64 res_id, int *slave)
c82e9aa0
EC
376{
377
378 struct res_common *r;
379 int err = -ENOENT;
380 int id = res_id;
381
382 if (type == RES_QP)
383 id &= 0x7fffff;
996b0541 384 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
385
386 r = find_res(dev, id, type);
387 if (r) {
388 *slave = r->owner;
389 err = 0;
390 }
996b0541 391 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
392
393 return err;
394}
395
aa1ec3dd 396static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
397 enum mlx4_resource type)
398{
399 struct res_common *r;
400
401 spin_lock_irq(mlx4_tlock(dev));
402 r = find_res(dev, res_id, type);
403 if (r)
404 r->state = r->from_state;
405 spin_unlock_irq(mlx4_tlock(dev));
406}
407
408static struct res_common *alloc_qp_tr(int id)
409{
410 struct res_qp *ret;
411
412 ret = kzalloc(sizeof *ret, GFP_KERNEL);
413 if (!ret)
414 return NULL;
415
416 ret->com.res_id = id;
417 ret->com.state = RES_QP_RESERVED;
2531188b 418 ret->local_qpn = id;
c82e9aa0
EC
419 INIT_LIST_HEAD(&ret->mcg_list);
420 spin_lock_init(&ret->mcg_spl);
421
422 return &ret->com;
423}
424
425static struct res_common *alloc_mtt_tr(int id, int order)
426{
427 struct res_mtt *ret;
428
429 ret = kzalloc(sizeof *ret, GFP_KERNEL);
430 if (!ret)
431 return NULL;
432
433 ret->com.res_id = id;
434 ret->order = order;
435 ret->com.state = RES_MTT_ALLOCATED;
436 atomic_set(&ret->ref_count, 0);
437
438 return &ret->com;
439}
440
441static struct res_common *alloc_mpt_tr(int id, int key)
442{
443 struct res_mpt *ret;
444
445 ret = kzalloc(sizeof *ret, GFP_KERNEL);
446 if (!ret)
447 return NULL;
448
449 ret->com.res_id = id;
450 ret->com.state = RES_MPT_RESERVED;
451 ret->key = key;
452
453 return &ret->com;
454}
455
456static struct res_common *alloc_eq_tr(int id)
457{
458 struct res_eq *ret;
459
460 ret = kzalloc(sizeof *ret, GFP_KERNEL);
461 if (!ret)
462 return NULL;
463
464 ret->com.res_id = id;
465 ret->com.state = RES_EQ_RESERVED;
466
467 return &ret->com;
468}
469
470static struct res_common *alloc_cq_tr(int id)
471{
472 struct res_cq *ret;
473
474 ret = kzalloc(sizeof *ret, GFP_KERNEL);
475 if (!ret)
476 return NULL;
477
478 ret->com.res_id = id;
479 ret->com.state = RES_CQ_ALLOCATED;
480 atomic_set(&ret->ref_count, 0);
481
482 return &ret->com;
483}
484
485static struct res_common *alloc_srq_tr(int id)
486{
487 struct res_srq *ret;
488
489 ret = kzalloc(sizeof *ret, GFP_KERNEL);
490 if (!ret)
491 return NULL;
492
493 ret->com.res_id = id;
494 ret->com.state = RES_SRQ_ALLOCATED;
495 atomic_set(&ret->ref_count, 0);
496
497 return &ret->com;
498}
499
500static struct res_common *alloc_counter_tr(int id)
501{
502 struct res_counter *ret;
503
504 ret = kzalloc(sizeof *ret, GFP_KERNEL);
505 if (!ret)
506 return NULL;
507
508 ret->com.res_id = id;
509 ret->com.state = RES_COUNTER_ALLOCATED;
510
511 return &ret->com;
512}
513
ba062d52
JM
514static struct res_common *alloc_xrcdn_tr(int id)
515{
516 struct res_xrcdn *ret;
517
518 ret = kzalloc(sizeof *ret, GFP_KERNEL);
519 if (!ret)
520 return NULL;
521
522 ret->com.res_id = id;
523 ret->com.state = RES_XRCD_ALLOCATED;
524
525 return &ret->com;
526}
527
1b9c6b06
HHZ
528static struct res_common *alloc_fs_rule_tr(u64 id)
529{
530 struct res_fs_rule *ret;
531
532 ret = kzalloc(sizeof *ret, GFP_KERNEL);
533 if (!ret)
534 return NULL;
535
536 ret->com.res_id = id;
537 ret->com.state = RES_FS_RULE_ALLOCATED;
538
539 return &ret->com;
540}
541
aa1ec3dd 542static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
543 int extra)
544{
545 struct res_common *ret;
546
547 switch (type) {
548 case RES_QP:
549 ret = alloc_qp_tr(id);
550 break;
551 case RES_MPT:
552 ret = alloc_mpt_tr(id, extra);
553 break;
554 case RES_MTT:
555 ret = alloc_mtt_tr(id, extra);
556 break;
557 case RES_EQ:
558 ret = alloc_eq_tr(id);
559 break;
560 case RES_CQ:
561 ret = alloc_cq_tr(id);
562 break;
563 case RES_SRQ:
564 ret = alloc_srq_tr(id);
565 break;
566 case RES_MAC:
567 printk(KERN_ERR "implementation missing\n");
568 return NULL;
569 case RES_COUNTER:
570 ret = alloc_counter_tr(id);
571 break;
ba062d52
JM
572 case RES_XRCD:
573 ret = alloc_xrcdn_tr(id);
574 break;
1b9c6b06
HHZ
575 case RES_FS_RULE:
576 ret = alloc_fs_rule_tr(id);
577 break;
c82e9aa0
EC
578 default:
579 return NULL;
580 }
581 if (ret)
582 ret->owner = slave;
583
584 return ret;
585}
586
aa1ec3dd 587static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
588 enum mlx4_resource type, int extra)
589{
590 int i;
591 int err;
592 struct mlx4_priv *priv = mlx4_priv(dev);
593 struct res_common **res_arr;
594 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 595 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
596
597 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
598 if (!res_arr)
599 return -ENOMEM;
600
601 for (i = 0; i < count; ++i) {
602 res_arr[i] = alloc_tr(base + i, type, slave, extra);
603 if (!res_arr[i]) {
604 for (--i; i >= 0; --i)
605 kfree(res_arr[i]);
606
607 kfree(res_arr);
608 return -ENOMEM;
609 }
610 }
611
612 spin_lock_irq(mlx4_tlock(dev));
613 for (i = 0; i < count; ++i) {
614 if (find_res(dev, base + i, type)) {
615 err = -EEXIST;
616 goto undo;
617 }
4af1c048 618 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
619 if (err)
620 goto undo;
621 list_add_tail(&res_arr[i]->list,
622 &tracker->slave_list[slave].res_list[type]);
623 }
624 spin_unlock_irq(mlx4_tlock(dev));
625 kfree(res_arr);
626
627 return 0;
628
629undo:
630 for (--i; i >= base; --i)
4af1c048 631 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
632
633 spin_unlock_irq(mlx4_tlock(dev));
634
635 for (i = 0; i < count; ++i)
636 kfree(res_arr[i]);
637
638 kfree(res_arr);
639
640 return err;
641}
642
643static int remove_qp_ok(struct res_qp *res)
644{
645 if (res->com.state == RES_QP_BUSY)
646 return -EBUSY;
647 else if (res->com.state != RES_QP_RESERVED)
648 return -EPERM;
649
650 return 0;
651}
652
653static int remove_mtt_ok(struct res_mtt *res, int order)
654{
655 if (res->com.state == RES_MTT_BUSY ||
656 atomic_read(&res->ref_count)) {
657 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
658 __func__, __LINE__,
659 mtt_states_str(res->com.state),
660 atomic_read(&res->ref_count));
661 return -EBUSY;
662 } else if (res->com.state != RES_MTT_ALLOCATED)
663 return -EPERM;
664 else if (res->order != order)
665 return -EINVAL;
666
667 return 0;
668}
669
670static int remove_mpt_ok(struct res_mpt *res)
671{
672 if (res->com.state == RES_MPT_BUSY)
673 return -EBUSY;
674 else if (res->com.state != RES_MPT_RESERVED)
675 return -EPERM;
676
677 return 0;
678}
679
680static int remove_eq_ok(struct res_eq *res)
681{
682 if (res->com.state == RES_MPT_BUSY)
683 return -EBUSY;
684 else if (res->com.state != RES_MPT_RESERVED)
685 return -EPERM;
686
687 return 0;
688}
689
690static int remove_counter_ok(struct res_counter *res)
691{
692 if (res->com.state == RES_COUNTER_BUSY)
693 return -EBUSY;
694 else if (res->com.state != RES_COUNTER_ALLOCATED)
695 return -EPERM;
696
697 return 0;
698}
699
ba062d52
JM
700static int remove_xrcdn_ok(struct res_xrcdn *res)
701{
702 if (res->com.state == RES_XRCD_BUSY)
703 return -EBUSY;
704 else if (res->com.state != RES_XRCD_ALLOCATED)
705 return -EPERM;
706
707 return 0;
708}
709
1b9c6b06
HHZ
710static int remove_fs_rule_ok(struct res_fs_rule *res)
711{
712 if (res->com.state == RES_FS_RULE_BUSY)
713 return -EBUSY;
714 else if (res->com.state != RES_FS_RULE_ALLOCATED)
715 return -EPERM;
716
717 return 0;
718}
719
c82e9aa0
EC
720static int remove_cq_ok(struct res_cq *res)
721{
722 if (res->com.state == RES_CQ_BUSY)
723 return -EBUSY;
724 else if (res->com.state != RES_CQ_ALLOCATED)
725 return -EPERM;
726
727 return 0;
728}
729
730static int remove_srq_ok(struct res_srq *res)
731{
732 if (res->com.state == RES_SRQ_BUSY)
733 return -EBUSY;
734 else if (res->com.state != RES_SRQ_ALLOCATED)
735 return -EPERM;
736
737 return 0;
738}
739
740static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
741{
742 switch (type) {
743 case RES_QP:
744 return remove_qp_ok((struct res_qp *)res);
745 case RES_CQ:
746 return remove_cq_ok((struct res_cq *)res);
747 case RES_SRQ:
748 return remove_srq_ok((struct res_srq *)res);
749 case RES_MPT:
750 return remove_mpt_ok((struct res_mpt *)res);
751 case RES_MTT:
752 return remove_mtt_ok((struct res_mtt *)res, extra);
753 case RES_MAC:
754 return -ENOSYS;
755 case RES_EQ:
756 return remove_eq_ok((struct res_eq *)res);
757 case RES_COUNTER:
758 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
759 case RES_XRCD:
760 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
761 case RES_FS_RULE:
762 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
763 default:
764 return -EINVAL;
765 }
766}
767
aa1ec3dd 768static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
769 enum mlx4_resource type, int extra)
770{
aa1ec3dd 771 u64 i;
c82e9aa0
EC
772 int err;
773 struct mlx4_priv *priv = mlx4_priv(dev);
774 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
775 struct res_common *r;
776
777 spin_lock_irq(mlx4_tlock(dev));
778 for (i = base; i < base + count; ++i) {
4af1c048 779 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
780 if (!r) {
781 err = -ENOENT;
782 goto out;
783 }
784 if (r->owner != slave) {
785 err = -EPERM;
786 goto out;
787 }
788 err = remove_ok(r, type, extra);
789 if (err)
790 goto out;
791 }
792
793 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
794 r = res_tracker_lookup(&tracker->res_tree[type], i);
795 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
796 list_del(&r->list);
797 kfree(r);
798 }
799 err = 0;
800
801out:
802 spin_unlock_irq(mlx4_tlock(dev));
803
804 return err;
805}
806
807static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
808 enum res_qp_states state, struct res_qp **qp,
809 int alloc)
810{
811 struct mlx4_priv *priv = mlx4_priv(dev);
812 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
813 struct res_qp *r;
814 int err = 0;
815
816 spin_lock_irq(mlx4_tlock(dev));
4af1c048 817 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
818 if (!r)
819 err = -ENOENT;
820 else if (r->com.owner != slave)
821 err = -EPERM;
822 else {
823 switch (state) {
824 case RES_QP_BUSY:
aa1ec3dd 825 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
826 __func__, r->com.res_id);
827 err = -EBUSY;
828 break;
829
830 case RES_QP_RESERVED:
831 if (r->com.state == RES_QP_MAPPED && !alloc)
832 break;
833
aa1ec3dd 834 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
835 err = -EINVAL;
836 break;
837
838 case RES_QP_MAPPED:
839 if ((r->com.state == RES_QP_RESERVED && alloc) ||
840 r->com.state == RES_QP_HW)
841 break;
842 else {
aa1ec3dd 843 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
844 r->com.res_id);
845 err = -EINVAL;
846 }
847
848 break;
849
850 case RES_QP_HW:
851 if (r->com.state != RES_QP_MAPPED)
852 err = -EINVAL;
853 break;
854 default:
855 err = -EINVAL;
856 }
857
858 if (!err) {
859 r->com.from_state = r->com.state;
860 r->com.to_state = state;
861 r->com.state = RES_QP_BUSY;
862 if (qp)
64699336 863 *qp = r;
c82e9aa0
EC
864 }
865 }
866
867 spin_unlock_irq(mlx4_tlock(dev));
868
869 return err;
870}
871
872static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
873 enum res_mpt_states state, struct res_mpt **mpt)
874{
875 struct mlx4_priv *priv = mlx4_priv(dev);
876 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
877 struct res_mpt *r;
878 int err = 0;
879
880 spin_lock_irq(mlx4_tlock(dev));
4af1c048 881 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
882 if (!r)
883 err = -ENOENT;
884 else if (r->com.owner != slave)
885 err = -EPERM;
886 else {
887 switch (state) {
888 case RES_MPT_BUSY:
889 err = -EINVAL;
890 break;
891
892 case RES_MPT_RESERVED:
893 if (r->com.state != RES_MPT_MAPPED)
894 err = -EINVAL;
895 break;
896
897 case RES_MPT_MAPPED:
898 if (r->com.state != RES_MPT_RESERVED &&
899 r->com.state != RES_MPT_HW)
900 err = -EINVAL;
901 break;
902
903 case RES_MPT_HW:
904 if (r->com.state != RES_MPT_MAPPED)
905 err = -EINVAL;
906 break;
907 default:
908 err = -EINVAL;
909 }
910
911 if (!err) {
912 r->com.from_state = r->com.state;
913 r->com.to_state = state;
914 r->com.state = RES_MPT_BUSY;
915 if (mpt)
64699336 916 *mpt = r;
c82e9aa0
EC
917 }
918 }
919
920 spin_unlock_irq(mlx4_tlock(dev));
921
922 return err;
923}
924
925static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
926 enum res_eq_states state, struct res_eq **eq)
927{
928 struct mlx4_priv *priv = mlx4_priv(dev);
929 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
930 struct res_eq *r;
931 int err = 0;
932
933 spin_lock_irq(mlx4_tlock(dev));
4af1c048 934 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
935 if (!r)
936 err = -ENOENT;
937 else if (r->com.owner != slave)
938 err = -EPERM;
939 else {
940 switch (state) {
941 case RES_EQ_BUSY:
942 err = -EINVAL;
943 break;
944
945 case RES_EQ_RESERVED:
946 if (r->com.state != RES_EQ_HW)
947 err = -EINVAL;
948 break;
949
950 case RES_EQ_HW:
951 if (r->com.state != RES_EQ_RESERVED)
952 err = -EINVAL;
953 break;
954
955 default:
956 err = -EINVAL;
957 }
958
959 if (!err) {
960 r->com.from_state = r->com.state;
961 r->com.to_state = state;
962 r->com.state = RES_EQ_BUSY;
963 if (eq)
964 *eq = r;
965 }
966 }
967
968 spin_unlock_irq(mlx4_tlock(dev));
969
970 return err;
971}
972
973static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
974 enum res_cq_states state, struct res_cq **cq)
975{
976 struct mlx4_priv *priv = mlx4_priv(dev);
977 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
978 struct res_cq *r;
979 int err;
980
981 spin_lock_irq(mlx4_tlock(dev));
4af1c048 982 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
983 if (!r)
984 err = -ENOENT;
985 else if (r->com.owner != slave)
986 err = -EPERM;
987 else {
988 switch (state) {
989 case RES_CQ_BUSY:
990 err = -EBUSY;
991 break;
992
993 case RES_CQ_ALLOCATED:
994 if (r->com.state != RES_CQ_HW)
995 err = -EINVAL;
996 else if (atomic_read(&r->ref_count))
997 err = -EBUSY;
998 else
999 err = 0;
1000 break;
1001
1002 case RES_CQ_HW:
1003 if (r->com.state != RES_CQ_ALLOCATED)
1004 err = -EINVAL;
1005 else
1006 err = 0;
1007 break;
1008
1009 default:
1010 err = -EINVAL;
1011 }
1012
1013 if (!err) {
1014 r->com.from_state = r->com.state;
1015 r->com.to_state = state;
1016 r->com.state = RES_CQ_BUSY;
1017 if (cq)
1018 *cq = r;
1019 }
1020 }
1021
1022 spin_unlock_irq(mlx4_tlock(dev));
1023
1024 return err;
1025}
1026
1027static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1028 enum res_cq_states state, struct res_srq **srq)
1029{
1030 struct mlx4_priv *priv = mlx4_priv(dev);
1031 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1032 struct res_srq *r;
1033 int err = 0;
1034
1035 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1036 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1037 if (!r)
1038 err = -ENOENT;
1039 else if (r->com.owner != slave)
1040 err = -EPERM;
1041 else {
1042 switch (state) {
1043 case RES_SRQ_BUSY:
1044 err = -EINVAL;
1045 break;
1046
1047 case RES_SRQ_ALLOCATED:
1048 if (r->com.state != RES_SRQ_HW)
1049 err = -EINVAL;
1050 else if (atomic_read(&r->ref_count))
1051 err = -EBUSY;
1052 break;
1053
1054 case RES_SRQ_HW:
1055 if (r->com.state != RES_SRQ_ALLOCATED)
1056 err = -EINVAL;
1057 break;
1058
1059 default:
1060 err = -EINVAL;
1061 }
1062
1063 if (!err) {
1064 r->com.from_state = r->com.state;
1065 r->com.to_state = state;
1066 r->com.state = RES_SRQ_BUSY;
1067 if (srq)
1068 *srq = r;
1069 }
1070 }
1071
1072 spin_unlock_irq(mlx4_tlock(dev));
1073
1074 return err;
1075}
1076
1077static void res_abort_move(struct mlx4_dev *dev, int slave,
1078 enum mlx4_resource type, int id)
1079{
1080 struct mlx4_priv *priv = mlx4_priv(dev);
1081 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1082 struct res_common *r;
1083
1084 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1085 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1086 if (r && (r->owner == slave))
1087 r->state = r->from_state;
1088 spin_unlock_irq(mlx4_tlock(dev));
1089}
1090
1091static void res_end_move(struct mlx4_dev *dev, int slave,
1092 enum mlx4_resource type, int id)
1093{
1094 struct mlx4_priv *priv = mlx4_priv(dev);
1095 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1096 struct res_common *r;
1097
1098 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1099 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1100 if (r && (r->owner == slave))
1101 r->state = r->to_state;
1102 spin_unlock_irq(mlx4_tlock(dev));
1103}
1104
1105static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1106{
1107 return mlx4_is_qp_reserved(dev, qpn);
1108}
1109
1110static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1111 u64 in_param, u64 *out_param)
1112{
1113 int err;
1114 int count;
1115 int align;
1116 int base;
1117 int qpn;
1118
1119 switch (op) {
1120 case RES_OP_RESERVE:
1121 count = get_param_l(&in_param);
1122 align = get_param_h(&in_param);
1123 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1124 if (err)
1125 return err;
1126
1127 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1128 if (err) {
1129 __mlx4_qp_release_range(dev, base, count);
1130 return err;
1131 }
1132 set_param_l(out_param, base);
1133 break;
1134 case RES_OP_MAP_ICM:
1135 qpn = get_param_l(&in_param) & 0x7fffff;
1136 if (valid_reserved(dev, slave, qpn)) {
1137 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1138 if (err)
1139 return err;
1140 }
1141
1142 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1143 NULL, 1);
1144 if (err)
1145 return err;
1146
1147 if (!valid_reserved(dev, slave, qpn)) {
1148 err = __mlx4_qp_alloc_icm(dev, qpn);
1149 if (err) {
1150 res_abort_move(dev, slave, RES_QP, qpn);
1151 return err;
1152 }
1153 }
1154
1155 res_end_move(dev, slave, RES_QP, qpn);
1156 break;
1157
1158 default:
1159 err = -EINVAL;
1160 break;
1161 }
1162 return err;
1163}
1164
1165static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1166 u64 in_param, u64 *out_param)
1167{
1168 int err = -EINVAL;
1169 int base;
1170 int order;
1171
1172 if (op != RES_OP_RESERVE_AND_MAP)
1173 return err;
1174
1175 order = get_param_l(&in_param);
1176 base = __mlx4_alloc_mtt_range(dev, order);
1177 if (base == -1)
1178 return -ENOMEM;
1179
1180 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1181 if (err)
1182 __mlx4_free_mtt_range(dev, base, order);
1183 else
1184 set_param_l(out_param, base);
1185
1186 return err;
1187}
1188
1189static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1190 u64 in_param, u64 *out_param)
1191{
1192 int err = -EINVAL;
1193 int index;
1194 int id;
1195 struct res_mpt *mpt;
1196
1197 switch (op) {
1198 case RES_OP_RESERVE:
1199 index = __mlx4_mr_reserve(dev);
1200 if (index == -1)
1201 break;
1202 id = index & mpt_mask(dev);
1203
1204 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1205 if (err) {
1206 __mlx4_mr_release(dev, index);
1207 break;
1208 }
1209 set_param_l(out_param, index);
1210 break;
1211 case RES_OP_MAP_ICM:
1212 index = get_param_l(&in_param);
1213 id = index & mpt_mask(dev);
1214 err = mr_res_start_move_to(dev, slave, id,
1215 RES_MPT_MAPPED, &mpt);
1216 if (err)
1217 return err;
1218
1219 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1220 if (err) {
1221 res_abort_move(dev, slave, RES_MPT, id);
1222 return err;
1223 }
1224
1225 res_end_move(dev, slave, RES_MPT, id);
1226 break;
1227 }
1228 return err;
1229}
1230
1231static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1232 u64 in_param, u64 *out_param)
1233{
1234 int cqn;
1235 int err;
1236
1237 switch (op) {
1238 case RES_OP_RESERVE_AND_MAP:
1239 err = __mlx4_cq_alloc_icm(dev, &cqn);
1240 if (err)
1241 break;
1242
1243 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1244 if (err) {
1245 __mlx4_cq_free_icm(dev, cqn);
1246 break;
1247 }
1248
1249 set_param_l(out_param, cqn);
1250 break;
1251
1252 default:
1253 err = -EINVAL;
1254 }
1255
1256 return err;
1257}
1258
1259static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1260 u64 in_param, u64 *out_param)
1261{
1262 int srqn;
1263 int err;
1264
1265 switch (op) {
1266 case RES_OP_RESERVE_AND_MAP:
1267 err = __mlx4_srq_alloc_icm(dev, &srqn);
1268 if (err)
1269 break;
1270
1271 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1272 if (err) {
1273 __mlx4_srq_free_icm(dev, srqn);
1274 break;
1275 }
1276
1277 set_param_l(out_param, srqn);
1278 break;
1279
1280 default:
1281 err = -EINVAL;
1282 }
1283
1284 return err;
1285}
1286
1287static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1288{
1289 struct mlx4_priv *priv = mlx4_priv(dev);
1290 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1291 struct mac_res *res;
1292
1293 res = kzalloc(sizeof *res, GFP_KERNEL);
1294 if (!res)
1295 return -ENOMEM;
1296 res->mac = mac;
1297 res->port = (u8) port;
1298 list_add_tail(&res->list,
1299 &tracker->slave_list[slave].res_list[RES_MAC]);
1300 return 0;
1301}
1302
1303static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1304 int port)
1305{
1306 struct mlx4_priv *priv = mlx4_priv(dev);
1307 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1308 struct list_head *mac_list =
1309 &tracker->slave_list[slave].res_list[RES_MAC];
1310 struct mac_res *res, *tmp;
1311
1312 list_for_each_entry_safe(res, tmp, mac_list, list) {
1313 if (res->mac == mac && res->port == (u8) port) {
1314 list_del(&res->list);
1315 kfree(res);
1316 break;
1317 }
1318 }
1319}
1320
1321static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1322{
1323 struct mlx4_priv *priv = mlx4_priv(dev);
1324 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1325 struct list_head *mac_list =
1326 &tracker->slave_list[slave].res_list[RES_MAC];
1327 struct mac_res *res, *tmp;
1328
1329 list_for_each_entry_safe(res, tmp, mac_list, list) {
1330 list_del(&res->list);
1331 __mlx4_unregister_mac(dev, res->port, res->mac);
1332 kfree(res);
1333 }
1334}
1335
1336static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1337 u64 in_param, u64 *out_param)
1338{
1339 int err = -EINVAL;
1340 int port;
1341 u64 mac;
1342
1343 if (op != RES_OP_RESERVE_AND_MAP)
1344 return err;
1345
1346 port = get_param_l(out_param);
1347 mac = in_param;
1348
1349 err = __mlx4_register_mac(dev, port, mac);
1350 if (err >= 0) {
1351 set_param_l(out_param, err);
1352 err = 0;
1353 }
1354
1355 if (!err) {
1356 err = mac_add_to_slave(dev, slave, mac, port);
1357 if (err)
1358 __mlx4_unregister_mac(dev, port, mac);
1359 }
1360 return err;
1361}
1362
ffe455ad
EE
1363static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1364 u64 in_param, u64 *out_param)
1365{
1366 return 0;
1367}
1368
ba062d52
JM
1369static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1370 u64 in_param, u64 *out_param)
1371{
1372 u32 index;
1373 int err;
1374
1375 if (op != RES_OP_RESERVE)
1376 return -EINVAL;
1377
1378 err = __mlx4_counter_alloc(dev, &index);
1379 if (err)
1380 return err;
1381
1382 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1383 if (err)
1384 __mlx4_counter_free(dev, index);
1385 else
1386 set_param_l(out_param, index);
1387
1388 return err;
1389}
1390
1391static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1392 u64 in_param, u64 *out_param)
1393{
1394 u32 xrcdn;
1395 int err;
1396
1397 if (op != RES_OP_RESERVE)
1398 return -EINVAL;
1399
1400 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1401 if (err)
1402 return err;
1403
1404 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1405 if (err)
1406 __mlx4_xrcd_free(dev, xrcdn);
1407 else
1408 set_param_l(out_param, xrcdn);
1409
1410 return err;
1411}
1412
c82e9aa0
EC
1413int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1414 struct mlx4_vhcr *vhcr,
1415 struct mlx4_cmd_mailbox *inbox,
1416 struct mlx4_cmd_mailbox *outbox,
1417 struct mlx4_cmd_info *cmd)
1418{
1419 int err;
1420 int alop = vhcr->op_modifier;
1421
1422 switch (vhcr->in_modifier) {
1423 case RES_QP:
1424 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1425 vhcr->in_param, &vhcr->out_param);
1426 break;
1427
1428 case RES_MTT:
1429 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1430 vhcr->in_param, &vhcr->out_param);
1431 break;
1432
1433 case RES_MPT:
1434 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1435 vhcr->in_param, &vhcr->out_param);
1436 break;
1437
1438 case RES_CQ:
1439 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1440 vhcr->in_param, &vhcr->out_param);
1441 break;
1442
1443 case RES_SRQ:
1444 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1445 vhcr->in_param, &vhcr->out_param);
1446 break;
1447
1448 case RES_MAC:
1449 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1450 vhcr->in_param, &vhcr->out_param);
1451 break;
1452
ffe455ad
EE
1453 case RES_VLAN:
1454 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1455 vhcr->in_param, &vhcr->out_param);
1456 break;
1457
ba062d52
JM
1458 case RES_COUNTER:
1459 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1460 vhcr->in_param, &vhcr->out_param);
1461 break;
1462
1463 case RES_XRCD:
1464 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1465 vhcr->in_param, &vhcr->out_param);
1466 break;
1467
c82e9aa0
EC
1468 default:
1469 err = -EINVAL;
1470 break;
1471 }
1472
1473 return err;
1474}
1475
1476static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1477 u64 in_param)
1478{
1479 int err;
1480 int count;
1481 int base;
1482 int qpn;
1483
1484 switch (op) {
1485 case RES_OP_RESERVE:
1486 base = get_param_l(&in_param) & 0x7fffff;
1487 count = get_param_h(&in_param);
1488 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1489 if (err)
1490 break;
1491 __mlx4_qp_release_range(dev, base, count);
1492 break;
1493 case RES_OP_MAP_ICM:
1494 qpn = get_param_l(&in_param) & 0x7fffff;
1495 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1496 NULL, 0);
1497 if (err)
1498 return err;
1499
1500 if (!valid_reserved(dev, slave, qpn))
1501 __mlx4_qp_free_icm(dev, qpn);
1502
1503 res_end_move(dev, slave, RES_QP, qpn);
1504
1505 if (valid_reserved(dev, slave, qpn))
1506 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1507 break;
1508 default:
1509 err = -EINVAL;
1510 break;
1511 }
1512 return err;
1513}
1514
1515static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1516 u64 in_param, u64 *out_param)
1517{
1518 int err = -EINVAL;
1519 int base;
1520 int order;
1521
1522 if (op != RES_OP_RESERVE_AND_MAP)
1523 return err;
1524
1525 base = get_param_l(&in_param);
1526 order = get_param_h(&in_param);
1527 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1528 if (!err)
1529 __mlx4_free_mtt_range(dev, base, order);
1530 return err;
1531}
1532
1533static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1534 u64 in_param)
1535{
1536 int err = -EINVAL;
1537 int index;
1538 int id;
1539 struct res_mpt *mpt;
1540
1541 switch (op) {
1542 case RES_OP_RESERVE:
1543 index = get_param_l(&in_param);
1544 id = index & mpt_mask(dev);
1545 err = get_res(dev, slave, id, RES_MPT, &mpt);
1546 if (err)
1547 break;
1548 index = mpt->key;
1549 put_res(dev, slave, id, RES_MPT);
1550
1551 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1552 if (err)
1553 break;
1554 __mlx4_mr_release(dev, index);
1555 break;
1556 case RES_OP_MAP_ICM:
1557 index = get_param_l(&in_param);
1558 id = index & mpt_mask(dev);
1559 err = mr_res_start_move_to(dev, slave, id,
1560 RES_MPT_RESERVED, &mpt);
1561 if (err)
1562 return err;
1563
1564 __mlx4_mr_free_icm(dev, mpt->key);
1565 res_end_move(dev, slave, RES_MPT, id);
1566 return err;
1567 break;
1568 default:
1569 err = -EINVAL;
1570 break;
1571 }
1572 return err;
1573}
1574
1575static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1576 u64 in_param, u64 *out_param)
1577{
1578 int cqn;
1579 int err;
1580
1581 switch (op) {
1582 case RES_OP_RESERVE_AND_MAP:
1583 cqn = get_param_l(&in_param);
1584 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1585 if (err)
1586 break;
1587
1588 __mlx4_cq_free_icm(dev, cqn);
1589 break;
1590
1591 default:
1592 err = -EINVAL;
1593 break;
1594 }
1595
1596 return err;
1597}
1598
1599static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1600 u64 in_param, u64 *out_param)
1601{
1602 int srqn;
1603 int err;
1604
1605 switch (op) {
1606 case RES_OP_RESERVE_AND_MAP:
1607 srqn = get_param_l(&in_param);
1608 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1609 if (err)
1610 break;
1611
1612 __mlx4_srq_free_icm(dev, srqn);
1613 break;
1614
1615 default:
1616 err = -EINVAL;
1617 break;
1618 }
1619
1620 return err;
1621}
1622
1623static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1624 u64 in_param, u64 *out_param)
1625{
1626 int port;
1627 int err = 0;
1628
1629 switch (op) {
1630 case RES_OP_RESERVE_AND_MAP:
1631 port = get_param_l(out_param);
1632 mac_del_from_slave(dev, slave, in_param, port);
1633 __mlx4_unregister_mac(dev, port, in_param);
1634 break;
1635 default:
1636 err = -EINVAL;
1637 break;
1638 }
1639
1640 return err;
1641
1642}
1643
ffe455ad
EE
1644static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1645 u64 in_param, u64 *out_param)
1646{
1647 return 0;
1648}
1649
ba062d52
JM
1650static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1651 u64 in_param, u64 *out_param)
1652{
1653 int index;
1654 int err;
1655
1656 if (op != RES_OP_RESERVE)
1657 return -EINVAL;
1658
1659 index = get_param_l(&in_param);
1660 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1661 if (err)
1662 return err;
1663
1664 __mlx4_counter_free(dev, index);
1665
1666 return err;
1667}
1668
1669static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1670 u64 in_param, u64 *out_param)
1671{
1672 int xrcdn;
1673 int err;
1674
1675 if (op != RES_OP_RESERVE)
1676 return -EINVAL;
1677
1678 xrcdn = get_param_l(&in_param);
1679 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1680 if (err)
1681 return err;
1682
1683 __mlx4_xrcd_free(dev, xrcdn);
1684
1685 return err;
1686}
1687
c82e9aa0
EC
1688int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1689 struct mlx4_vhcr *vhcr,
1690 struct mlx4_cmd_mailbox *inbox,
1691 struct mlx4_cmd_mailbox *outbox,
1692 struct mlx4_cmd_info *cmd)
1693{
1694 int err = -EINVAL;
1695 int alop = vhcr->op_modifier;
1696
1697 switch (vhcr->in_modifier) {
1698 case RES_QP:
1699 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1700 vhcr->in_param);
1701 break;
1702
1703 case RES_MTT:
1704 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1705 vhcr->in_param, &vhcr->out_param);
1706 break;
1707
1708 case RES_MPT:
1709 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1710 vhcr->in_param);
1711 break;
1712
1713 case RES_CQ:
1714 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1715 vhcr->in_param, &vhcr->out_param);
1716 break;
1717
1718 case RES_SRQ:
1719 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1720 vhcr->in_param, &vhcr->out_param);
1721 break;
1722
1723 case RES_MAC:
1724 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1725 vhcr->in_param, &vhcr->out_param);
1726 break;
1727
ffe455ad
EE
1728 case RES_VLAN:
1729 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1730 vhcr->in_param, &vhcr->out_param);
1731 break;
1732
ba062d52
JM
1733 case RES_COUNTER:
1734 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1735 vhcr->in_param, &vhcr->out_param);
1736 break;
1737
1738 case RES_XRCD:
1739 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1740 vhcr->in_param, &vhcr->out_param);
1741
c82e9aa0
EC
1742 default:
1743 break;
1744 }
1745 return err;
1746}
1747
1748/* ugly but other choices are uglier */
1749static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1750{
1751 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1752}
1753
2b8fb286 1754static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1755{
2b8fb286 1756 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1757}
1758
1759static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1760{
1761 return be32_to_cpu(mpt->mtt_sz);
1762}
1763
2b8fb286 1764static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1765{
1766 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1767}
1768
2b8fb286 1769static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1770{
1771 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1772}
1773
1774static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1775{
1776 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1777 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1778 int log_sq_sride = qpc->sq_size_stride & 7;
1779 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1780 int log_rq_stride = qpc->rq_size_stride & 7;
1781 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1782 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1783 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1784 int sq_size;
1785 int rq_size;
1786 int total_pages;
1787 int total_mem;
1788 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1789
1790 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1791 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1792 total_mem = sq_size + rq_size;
1793 total_pages =
1794 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1795 page_shift);
1796
1797 return total_pages;
1798}
1799
c82e9aa0
EC
1800static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1801 int size, struct res_mtt *mtt)
1802{
2b8fb286
MA
1803 int res_start = mtt->com.res_id;
1804 int res_size = (1 << mtt->order);
c82e9aa0
EC
1805
1806 if (start < res_start || start + size > res_start + res_size)
1807 return -EPERM;
1808 return 0;
1809}
1810
1811int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1812 struct mlx4_vhcr *vhcr,
1813 struct mlx4_cmd_mailbox *inbox,
1814 struct mlx4_cmd_mailbox *outbox,
1815 struct mlx4_cmd_info *cmd)
1816{
1817 int err;
1818 int index = vhcr->in_modifier;
1819 struct res_mtt *mtt;
1820 struct res_mpt *mpt;
2b8fb286 1821 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1822 int phys;
1823 int id;
1824
1825 id = index & mpt_mask(dev);
1826 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1827 if (err)
1828 return err;
1829
1830 phys = mr_phys_mpt(inbox->buf);
1831 if (!phys) {
2b8fb286 1832 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1833 if (err)
1834 goto ex_abort;
1835
1836 err = check_mtt_range(dev, slave, mtt_base,
1837 mr_get_mtt_size(inbox->buf), mtt);
1838 if (err)
1839 goto ex_put;
1840
1841 mpt->mtt = mtt;
1842 }
1843
c82e9aa0
EC
1844 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1845 if (err)
1846 goto ex_put;
1847
1848 if (!phys) {
1849 atomic_inc(&mtt->ref_count);
1850 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1851 }
1852
1853 res_end_move(dev, slave, RES_MPT, id);
1854 return 0;
1855
1856ex_put:
1857 if (!phys)
1858 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1859ex_abort:
1860 res_abort_move(dev, slave, RES_MPT, id);
1861
1862 return err;
1863}
1864
1865int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1866 struct mlx4_vhcr *vhcr,
1867 struct mlx4_cmd_mailbox *inbox,
1868 struct mlx4_cmd_mailbox *outbox,
1869 struct mlx4_cmd_info *cmd)
1870{
1871 int err;
1872 int index = vhcr->in_modifier;
1873 struct res_mpt *mpt;
1874 int id;
1875
1876 id = index & mpt_mask(dev);
1877 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1878 if (err)
1879 return err;
1880
1881 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1882 if (err)
1883 goto ex_abort;
1884
1885 if (mpt->mtt)
1886 atomic_dec(&mpt->mtt->ref_count);
1887
1888 res_end_move(dev, slave, RES_MPT, id);
1889 return 0;
1890
1891ex_abort:
1892 res_abort_move(dev, slave, RES_MPT, id);
1893
1894 return err;
1895}
1896
1897int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1898 struct mlx4_vhcr *vhcr,
1899 struct mlx4_cmd_mailbox *inbox,
1900 struct mlx4_cmd_mailbox *outbox,
1901 struct mlx4_cmd_info *cmd)
1902{
1903 int err;
1904 int index = vhcr->in_modifier;
1905 struct res_mpt *mpt;
1906 int id;
1907
1908 id = index & mpt_mask(dev);
1909 err = get_res(dev, slave, id, RES_MPT, &mpt);
1910 if (err)
1911 return err;
1912
1913 if (mpt->com.from_state != RES_MPT_HW) {
1914 err = -EBUSY;
1915 goto out;
1916 }
1917
1918 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1919
1920out:
1921 put_res(dev, slave, id, RES_MPT);
1922 return err;
1923}
1924
1925static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1926{
1927 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1928}
1929
1930static int qp_get_scqn(struct mlx4_qp_context *qpc)
1931{
1932 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1933}
1934
1935static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1936{
1937 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1938}
1939
1940int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1941 struct mlx4_vhcr *vhcr,
1942 struct mlx4_cmd_mailbox *inbox,
1943 struct mlx4_cmd_mailbox *outbox,
1944 struct mlx4_cmd_info *cmd)
1945{
1946 int err;
1947 int qpn = vhcr->in_modifier & 0x7fffff;
1948 struct res_mtt *mtt;
1949 struct res_qp *qp;
1950 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1951 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1952 int mtt_size = qp_get_mtt_size(qpc);
1953 struct res_cq *rcq;
1954 struct res_cq *scq;
1955 int rcqn = qp_get_rcqn(qpc);
1956 int scqn = qp_get_scqn(qpc);
1957 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1958 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1959 struct res_srq *srq;
1960 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1961
1962 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1963 if (err)
1964 return err;
1965 qp->local_qpn = local_qpn;
1966
2b8fb286 1967 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1968 if (err)
1969 goto ex_abort;
1970
1971 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1972 if (err)
1973 goto ex_put_mtt;
1974
c82e9aa0
EC
1975 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1976 if (err)
1977 goto ex_put_mtt;
1978
1979 if (scqn != rcqn) {
1980 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1981 if (err)
1982 goto ex_put_rcq;
1983 } else
1984 scq = rcq;
1985
1986 if (use_srq) {
1987 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1988 if (err)
1989 goto ex_put_scq;
1990 }
1991
1992 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1993 if (err)
1994 goto ex_put_srq;
1995 atomic_inc(&mtt->ref_count);
1996 qp->mtt = mtt;
1997 atomic_inc(&rcq->ref_count);
1998 qp->rcq = rcq;
1999 atomic_inc(&scq->ref_count);
2000 qp->scq = scq;
2001
2002 if (scqn != rcqn)
2003 put_res(dev, slave, scqn, RES_CQ);
2004
2005 if (use_srq) {
2006 atomic_inc(&srq->ref_count);
2007 put_res(dev, slave, srqn, RES_SRQ);
2008 qp->srq = srq;
2009 }
2010 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2011 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2012 res_end_move(dev, slave, RES_QP, qpn);
2013
2014 return 0;
2015
2016ex_put_srq:
2017 if (use_srq)
2018 put_res(dev, slave, srqn, RES_SRQ);
2019ex_put_scq:
2020 if (scqn != rcqn)
2021 put_res(dev, slave, scqn, RES_CQ);
2022ex_put_rcq:
2023 put_res(dev, slave, rcqn, RES_CQ);
2024ex_put_mtt:
2b8fb286 2025 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2026ex_abort:
2027 res_abort_move(dev, slave, RES_QP, qpn);
2028
2029 return err;
2030}
2031
2b8fb286 2032static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2033{
2034 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2035}
2036
2037static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2038{
2039 int log_eq_size = eqc->log_eq_size & 0x1f;
2040 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2041
2042 if (log_eq_size + 5 < page_shift)
2043 return 1;
2044
2045 return 1 << (log_eq_size + 5 - page_shift);
2046}
2047
2b8fb286 2048static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2049{
2050 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2051}
2052
2053static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2054{
2055 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2056 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2057
2058 if (log_cq_size + 5 < page_shift)
2059 return 1;
2060
2061 return 1 << (log_cq_size + 5 - page_shift);
2062}
2063
2064int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2065 struct mlx4_vhcr *vhcr,
2066 struct mlx4_cmd_mailbox *inbox,
2067 struct mlx4_cmd_mailbox *outbox,
2068 struct mlx4_cmd_info *cmd)
2069{
2070 int err;
2071 int eqn = vhcr->in_modifier;
2072 int res_id = (slave << 8) | eqn;
2073 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2074 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2075 int mtt_size = eq_get_mtt_size(eqc);
2076 struct res_eq *eq;
2077 struct res_mtt *mtt;
2078
2079 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2080 if (err)
2081 return err;
2082 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2083 if (err)
2084 goto out_add;
2085
2b8fb286 2086 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2087 if (err)
2088 goto out_move;
2089
2090 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2091 if (err)
2092 goto out_put;
2093
2094 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2095 if (err)
2096 goto out_put;
2097
2098 atomic_inc(&mtt->ref_count);
2099 eq->mtt = mtt;
2100 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2101 res_end_move(dev, slave, RES_EQ, res_id);
2102 return 0;
2103
2104out_put:
2105 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2106out_move:
2107 res_abort_move(dev, slave, RES_EQ, res_id);
2108out_add:
2109 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2110 return err;
2111}
2112
2113static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2114 int len, struct res_mtt **res)
2115{
2116 struct mlx4_priv *priv = mlx4_priv(dev);
2117 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2118 struct res_mtt *mtt;
2119 int err = -EINVAL;
2120
2121 spin_lock_irq(mlx4_tlock(dev));
2122 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2123 com.list) {
2124 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2125 *res = mtt;
2126 mtt->com.from_state = mtt->com.state;
2127 mtt->com.state = RES_MTT_BUSY;
2128 err = 0;
2129 break;
2130 }
2131 }
2132 spin_unlock_irq(mlx4_tlock(dev));
2133
2134 return err;
2135}
2136
2137int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2138 struct mlx4_vhcr *vhcr,
2139 struct mlx4_cmd_mailbox *inbox,
2140 struct mlx4_cmd_mailbox *outbox,
2141 struct mlx4_cmd_info *cmd)
2142{
2143 struct mlx4_mtt mtt;
2144 __be64 *page_list = inbox->buf;
2145 u64 *pg_list = (u64 *)page_list;
2146 int i;
2147 struct res_mtt *rmtt = NULL;
2148 int start = be64_to_cpu(page_list[0]);
2149 int npages = vhcr->in_modifier;
2150 int err;
2151
2152 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2153 if (err)
2154 return err;
2155
2156 /* Call the SW implementation of write_mtt:
2157 * - Prepare a dummy mtt struct
2158 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2159 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2160 we don't really use it */
c82e9aa0
EC
2161 mtt.order = 0;
2162 mtt.page_shift = 0;
2163 for (i = 0; i < npages; ++i)
2164 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2165
2166 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2167 ((u64 *)page_list + 2));
2168
2169 if (rmtt)
2170 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2171
2172 return err;
2173}
2174
2175int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2176 struct mlx4_vhcr *vhcr,
2177 struct mlx4_cmd_mailbox *inbox,
2178 struct mlx4_cmd_mailbox *outbox,
2179 struct mlx4_cmd_info *cmd)
2180{
2181 int eqn = vhcr->in_modifier;
2182 int res_id = eqn | (slave << 8);
2183 struct res_eq *eq;
2184 int err;
2185
2186 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2187 if (err)
2188 return err;
2189
2190 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2191 if (err)
2192 goto ex_abort;
2193
2194 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2195 if (err)
2196 goto ex_put;
2197
2198 atomic_dec(&eq->mtt->ref_count);
2199 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2200 res_end_move(dev, slave, RES_EQ, res_id);
2201 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2202
2203 return 0;
2204
2205ex_put:
2206 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2207ex_abort:
2208 res_abort_move(dev, slave, RES_EQ, res_id);
2209
2210 return err;
2211}
2212
2213int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2214{
2215 struct mlx4_priv *priv = mlx4_priv(dev);
2216 struct mlx4_slave_event_eq_info *event_eq;
2217 struct mlx4_cmd_mailbox *mailbox;
2218 u32 in_modifier = 0;
2219 int err;
2220 int res_id;
2221 struct res_eq *req;
2222
2223 if (!priv->mfunc.master.slave_state)
2224 return -EINVAL;
2225
803143fb 2226 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2227
2228 /* Create the event only if the slave is registered */
803143fb 2229 if (event_eq->eqn < 0)
c82e9aa0
EC
2230 return 0;
2231
2232 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2233 res_id = (slave << 8) | event_eq->eqn;
2234 err = get_res(dev, slave, res_id, RES_EQ, &req);
2235 if (err)
2236 goto unlock;
2237
2238 if (req->com.from_state != RES_EQ_HW) {
2239 err = -EINVAL;
2240 goto put;
2241 }
2242
2243 mailbox = mlx4_alloc_cmd_mailbox(dev);
2244 if (IS_ERR(mailbox)) {
2245 err = PTR_ERR(mailbox);
2246 goto put;
2247 }
2248
2249 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2250 ++event_eq->token;
2251 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2252 }
2253
2254 memcpy(mailbox->buf, (u8 *) eqe, 28);
2255
2256 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2257
2258 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2259 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2260 MLX4_CMD_NATIVE);
2261
2262 put_res(dev, slave, res_id, RES_EQ);
2263 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2264 mlx4_free_cmd_mailbox(dev, mailbox);
2265 return err;
2266
2267put:
2268 put_res(dev, slave, res_id, RES_EQ);
2269
2270unlock:
2271 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2272 return err;
2273}
2274
2275int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2276 struct mlx4_vhcr *vhcr,
2277 struct mlx4_cmd_mailbox *inbox,
2278 struct mlx4_cmd_mailbox *outbox,
2279 struct mlx4_cmd_info *cmd)
2280{
2281 int eqn = vhcr->in_modifier;
2282 int res_id = eqn | (slave << 8);
2283 struct res_eq *eq;
2284 int err;
2285
2286 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2287 if (err)
2288 return err;
2289
2290 if (eq->com.from_state != RES_EQ_HW) {
2291 err = -EINVAL;
2292 goto ex_put;
2293 }
2294
2295 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2296
2297ex_put:
2298 put_res(dev, slave, res_id, RES_EQ);
2299 return err;
2300}
2301
2302int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2303 struct mlx4_vhcr *vhcr,
2304 struct mlx4_cmd_mailbox *inbox,
2305 struct mlx4_cmd_mailbox *outbox,
2306 struct mlx4_cmd_info *cmd)
2307{
2308 int err;
2309 int cqn = vhcr->in_modifier;
2310 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2311 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2312 struct res_cq *cq;
2313 struct res_mtt *mtt;
2314
2315 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2316 if (err)
2317 return err;
2b8fb286 2318 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2319 if (err)
2320 goto out_move;
2321 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2322 if (err)
2323 goto out_put;
2324 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2325 if (err)
2326 goto out_put;
2327 atomic_inc(&mtt->ref_count);
2328 cq->mtt = mtt;
2329 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2330 res_end_move(dev, slave, RES_CQ, cqn);
2331 return 0;
2332
2333out_put:
2334 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2335out_move:
2336 res_abort_move(dev, slave, RES_CQ, cqn);
2337 return err;
2338}
2339
2340int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2341 struct mlx4_vhcr *vhcr,
2342 struct mlx4_cmd_mailbox *inbox,
2343 struct mlx4_cmd_mailbox *outbox,
2344 struct mlx4_cmd_info *cmd)
2345{
2346 int err;
2347 int cqn = vhcr->in_modifier;
2348 struct res_cq *cq;
2349
2350 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2351 if (err)
2352 return err;
2353 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2354 if (err)
2355 goto out_move;
2356 atomic_dec(&cq->mtt->ref_count);
2357 res_end_move(dev, slave, RES_CQ, cqn);
2358 return 0;
2359
2360out_move:
2361 res_abort_move(dev, slave, RES_CQ, cqn);
2362 return err;
2363}
2364
2365int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2366 struct mlx4_vhcr *vhcr,
2367 struct mlx4_cmd_mailbox *inbox,
2368 struct mlx4_cmd_mailbox *outbox,
2369 struct mlx4_cmd_info *cmd)
2370{
2371 int cqn = vhcr->in_modifier;
2372 struct res_cq *cq;
2373 int err;
2374
2375 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2376 if (err)
2377 return err;
2378
2379 if (cq->com.from_state != RES_CQ_HW)
2380 goto ex_put;
2381
2382 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2383ex_put:
2384 put_res(dev, slave, cqn, RES_CQ);
2385
2386 return err;
2387}
2388
2389static int handle_resize(struct mlx4_dev *dev, int slave,
2390 struct mlx4_vhcr *vhcr,
2391 struct mlx4_cmd_mailbox *inbox,
2392 struct mlx4_cmd_mailbox *outbox,
2393 struct mlx4_cmd_info *cmd,
2394 struct res_cq *cq)
2395{
2396 int err;
2397 struct res_mtt *orig_mtt;
2398 struct res_mtt *mtt;
2399 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2400 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2401
2402 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2403 if (err)
2404 return err;
2405
2406 if (orig_mtt != cq->mtt) {
2407 err = -EINVAL;
2408 goto ex_put;
2409 }
2410
2b8fb286 2411 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2412 if (err)
2413 goto ex_put;
2414
2415 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2416 if (err)
2417 goto ex_put1;
2418 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2419 if (err)
2420 goto ex_put1;
2421 atomic_dec(&orig_mtt->ref_count);
2422 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2423 atomic_inc(&mtt->ref_count);
2424 cq->mtt = mtt;
2425 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2426 return 0;
2427
2428ex_put1:
2429 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2430ex_put:
2431 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2432
2433 return err;
2434
2435}
2436
2437int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2438 struct mlx4_vhcr *vhcr,
2439 struct mlx4_cmd_mailbox *inbox,
2440 struct mlx4_cmd_mailbox *outbox,
2441 struct mlx4_cmd_info *cmd)
2442{
2443 int cqn = vhcr->in_modifier;
2444 struct res_cq *cq;
2445 int err;
2446
2447 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2448 if (err)
2449 return err;
2450
2451 if (cq->com.from_state != RES_CQ_HW)
2452 goto ex_put;
2453
2454 if (vhcr->op_modifier == 0) {
2455 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2456 goto ex_put;
c82e9aa0
EC
2457 }
2458
2459 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2460ex_put:
2461 put_res(dev, slave, cqn, RES_CQ);
2462
2463 return err;
2464}
2465
c82e9aa0
EC
2466static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2467{
2468 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2469 int log_rq_stride = srqc->logstride & 7;
2470 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2471
2472 if (log_srq_size + log_rq_stride + 4 < page_shift)
2473 return 1;
2474
2475 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2476}
2477
2478int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2479 struct mlx4_vhcr *vhcr,
2480 struct mlx4_cmd_mailbox *inbox,
2481 struct mlx4_cmd_mailbox *outbox,
2482 struct mlx4_cmd_info *cmd)
2483{
2484 int err;
2485 int srqn = vhcr->in_modifier;
2486 struct res_mtt *mtt;
2487 struct res_srq *srq;
2488 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2489 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2490
2491 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2492 return -EINVAL;
2493
2494 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2495 if (err)
2496 return err;
2b8fb286 2497 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2498 if (err)
2499 goto ex_abort;
2500 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2501 mtt);
2502 if (err)
2503 goto ex_put_mtt;
2504
c82e9aa0
EC
2505 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2506 if (err)
2507 goto ex_put_mtt;
2508
2509 atomic_inc(&mtt->ref_count);
2510 srq->mtt = mtt;
2511 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2512 res_end_move(dev, slave, RES_SRQ, srqn);
2513 return 0;
2514
2515ex_put_mtt:
2516 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2517ex_abort:
2518 res_abort_move(dev, slave, RES_SRQ, srqn);
2519
2520 return err;
2521}
2522
2523int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2524 struct mlx4_vhcr *vhcr,
2525 struct mlx4_cmd_mailbox *inbox,
2526 struct mlx4_cmd_mailbox *outbox,
2527 struct mlx4_cmd_info *cmd)
2528{
2529 int err;
2530 int srqn = vhcr->in_modifier;
2531 struct res_srq *srq;
2532
2533 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2534 if (err)
2535 return err;
2536 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2537 if (err)
2538 goto ex_abort;
2539 atomic_dec(&srq->mtt->ref_count);
2540 if (srq->cq)
2541 atomic_dec(&srq->cq->ref_count);
2542 res_end_move(dev, slave, RES_SRQ, srqn);
2543
2544 return 0;
2545
2546ex_abort:
2547 res_abort_move(dev, slave, RES_SRQ, srqn);
2548
2549 return err;
2550}
2551
2552int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2553 struct mlx4_vhcr *vhcr,
2554 struct mlx4_cmd_mailbox *inbox,
2555 struct mlx4_cmd_mailbox *outbox,
2556 struct mlx4_cmd_info *cmd)
2557{
2558 int err;
2559 int srqn = vhcr->in_modifier;
2560 struct res_srq *srq;
2561
2562 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2563 if (err)
2564 return err;
2565 if (srq->com.from_state != RES_SRQ_HW) {
2566 err = -EBUSY;
2567 goto out;
2568 }
2569 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2570out:
2571 put_res(dev, slave, srqn, RES_SRQ);
2572 return err;
2573}
2574
2575int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2576 struct mlx4_vhcr *vhcr,
2577 struct mlx4_cmd_mailbox *inbox,
2578 struct mlx4_cmd_mailbox *outbox,
2579 struct mlx4_cmd_info *cmd)
2580{
2581 int err;
2582 int srqn = vhcr->in_modifier;
2583 struct res_srq *srq;
2584
2585 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2586 if (err)
2587 return err;
2588
2589 if (srq->com.from_state != RES_SRQ_HW) {
2590 err = -EBUSY;
2591 goto out;
2592 }
2593
2594 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2595out:
2596 put_res(dev, slave, srqn, RES_SRQ);
2597 return err;
2598}
2599
2600int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2601 struct mlx4_vhcr *vhcr,
2602 struct mlx4_cmd_mailbox *inbox,
2603 struct mlx4_cmd_mailbox *outbox,
2604 struct mlx4_cmd_info *cmd)
2605{
2606 int err;
2607 int qpn = vhcr->in_modifier & 0x7fffff;
2608 struct res_qp *qp;
2609
2610 err = get_res(dev, slave, qpn, RES_QP, &qp);
2611 if (err)
2612 return err;
2613 if (qp->com.from_state != RES_QP_HW) {
2614 err = -EBUSY;
2615 goto out;
2616 }
2617
2618 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2619out:
2620 put_res(dev, slave, qpn, RES_QP);
2621 return err;
2622}
2623
2624int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2625 struct mlx4_vhcr *vhcr,
2626 struct mlx4_cmd_mailbox *inbox,
2627 struct mlx4_cmd_mailbox *outbox,
2628 struct mlx4_cmd_info *cmd)
2629{
2630 struct mlx4_qp_context *qpc = inbox->buf + 8;
2631
2632 update_ud_gid(dev, qpc, (u8)slave);
2633
2634 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2635}
2636
2637int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2638 struct mlx4_vhcr *vhcr,
2639 struct mlx4_cmd_mailbox *inbox,
2640 struct mlx4_cmd_mailbox *outbox,
2641 struct mlx4_cmd_info *cmd)
2642{
2643 int err;
2644 int qpn = vhcr->in_modifier & 0x7fffff;
2645 struct res_qp *qp;
2646
2647 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2648 if (err)
2649 return err;
2650 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2651 if (err)
2652 goto ex_abort;
2653
2654 atomic_dec(&qp->mtt->ref_count);
2655 atomic_dec(&qp->rcq->ref_count);
2656 atomic_dec(&qp->scq->ref_count);
2657 if (qp->srq)
2658 atomic_dec(&qp->srq->ref_count);
2659 res_end_move(dev, slave, RES_QP, qpn);
2660 return 0;
2661
2662ex_abort:
2663 res_abort_move(dev, slave, RES_QP, qpn);
2664
2665 return err;
2666}
2667
2668static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2669 struct res_qp *rqp, u8 *gid)
2670{
2671 struct res_gid *res;
2672
2673 list_for_each_entry(res, &rqp->mcg_list, list) {
2674 if (!memcmp(res->gid, gid, 16))
2675 return res;
2676 }
2677 return NULL;
2678}
2679
2680static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2681 u8 *gid, enum mlx4_protocol prot,
2682 enum mlx4_steer_type steer)
c82e9aa0
EC
2683{
2684 struct res_gid *res;
2685 int err;
2686
2687 res = kzalloc(sizeof *res, GFP_KERNEL);
2688 if (!res)
2689 return -ENOMEM;
2690
2691 spin_lock_irq(&rqp->mcg_spl);
2692 if (find_gid(dev, slave, rqp, gid)) {
2693 kfree(res);
2694 err = -EEXIST;
2695 } else {
2696 memcpy(res->gid, gid, 16);
2697 res->prot = prot;
9f5b6c63 2698 res->steer = steer;
c82e9aa0
EC
2699 list_add_tail(&res->list, &rqp->mcg_list);
2700 err = 0;
2701 }
2702 spin_unlock_irq(&rqp->mcg_spl);
2703
2704 return err;
2705}
2706
2707static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2708 u8 *gid, enum mlx4_protocol prot,
2709 enum mlx4_steer_type steer)
c82e9aa0
EC
2710{
2711 struct res_gid *res;
2712 int err;
2713
2714 spin_lock_irq(&rqp->mcg_spl);
2715 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2716 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2717 err = -EINVAL;
2718 else {
2719 list_del(&res->list);
2720 kfree(res);
2721 err = 0;
2722 }
2723 spin_unlock_irq(&rqp->mcg_spl);
2724
2725 return err;
2726}
2727
2728int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2729 struct mlx4_vhcr *vhcr,
2730 struct mlx4_cmd_mailbox *inbox,
2731 struct mlx4_cmd_mailbox *outbox,
2732 struct mlx4_cmd_info *cmd)
2733{
2734 struct mlx4_qp qp; /* dummy for calling attach/detach */
2735 u8 *gid = inbox->buf;
2736 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 2737 int err;
c82e9aa0
EC
2738 int qpn;
2739 struct res_qp *rqp;
2740 int attach = vhcr->op_modifier;
2741 int block_loopback = vhcr->in_modifier >> 31;
2742 u8 steer_type_mask = 2;
75c6062c 2743 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
2744
2745 qpn = vhcr->in_modifier & 0xffffff;
2746 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2747 if (err)
2748 return err;
2749
2750 qp.qpn = qpn;
2751 if (attach) {
9f5b6c63 2752 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2753 if (err)
2754 goto ex_put;
2755
2756 err = mlx4_qp_attach_common(dev, &qp, gid,
2757 block_loopback, prot, type);
2758 if (err)
2759 goto ex_rem;
2760 } else {
9f5b6c63 2761 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2762 if (err)
2763 goto ex_put;
2764 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2765 }
2766
2767 put_res(dev, slave, qpn, RES_QP);
2768 return 0;
2769
2770ex_rem:
2771 /* ignore error return below, already in error */
162344ed 2772 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2773ex_put:
2774 put_res(dev, slave, qpn, RES_QP);
2775
2776 return err;
2777}
2778
8fcfb4db
HHZ
2779int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2780 struct mlx4_vhcr *vhcr,
2781 struct mlx4_cmd_mailbox *inbox,
2782 struct mlx4_cmd_mailbox *outbox,
2783 struct mlx4_cmd_info *cmd)
2784{
1b9c6b06
HHZ
2785 int err;
2786
0ff1fb65
HHZ
2787 if (dev->caps.steering_mode !=
2788 MLX4_STEERING_MODE_DEVICE_MANAGED)
2789 return -EOPNOTSUPP;
1b9c6b06
HHZ
2790
2791 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2792 vhcr->in_modifier, 0,
2793 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2794 MLX4_CMD_NATIVE);
2795 if (err)
2796 return err;
2797
2798 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2799 if (err) {
2800 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2801 /* detach rule*/
2802 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2803 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2804 MLX4_CMD_NATIVE);
2805 }
2806 return err;
8fcfb4db
HHZ
2807}
2808
2809int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2810 struct mlx4_vhcr *vhcr,
2811 struct mlx4_cmd_mailbox *inbox,
2812 struct mlx4_cmd_mailbox *outbox,
2813 struct mlx4_cmd_info *cmd)
2814{
1b9c6b06
HHZ
2815 int err;
2816
0ff1fb65
HHZ
2817 if (dev->caps.steering_mode !=
2818 MLX4_STEERING_MODE_DEVICE_MANAGED)
2819 return -EOPNOTSUPP;
1b9c6b06
HHZ
2820
2821 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2822 if (err) {
2823 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2824 return err;
2825 }
2826
2827 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2828 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2829 MLX4_CMD_NATIVE);
2830 return err;
8fcfb4db
HHZ
2831}
2832
c82e9aa0
EC
2833enum {
2834 BUSY_MAX_RETRIES = 10
2835};
2836
2837int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2838 struct mlx4_vhcr *vhcr,
2839 struct mlx4_cmd_mailbox *inbox,
2840 struct mlx4_cmd_mailbox *outbox,
2841 struct mlx4_cmd_info *cmd)
2842{
2843 int err;
2844 int index = vhcr->in_modifier & 0xffff;
2845
2846 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2847 if (err)
2848 return err;
2849
2850 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2851 put_res(dev, slave, index, RES_COUNTER);
2852 return err;
2853}
2854
2855static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2856{
2857 struct res_gid *rgid;
2858 struct res_gid *tmp;
c82e9aa0
EC
2859 struct mlx4_qp qp; /* dummy for calling attach/detach */
2860
2861 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2862 qp.qpn = rqp->local_qpn;
162344ed
OG
2863 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2864 rgid->steer);
c82e9aa0
EC
2865 list_del(&rgid->list);
2866 kfree(rgid);
2867 }
2868}
2869
2870static int _move_all_busy(struct mlx4_dev *dev, int slave,
2871 enum mlx4_resource type, int print)
2872{
2873 struct mlx4_priv *priv = mlx4_priv(dev);
2874 struct mlx4_resource_tracker *tracker =
2875 &priv->mfunc.master.res_tracker;
2876 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2877 struct res_common *r;
2878 struct res_common *tmp;
2879 int busy;
2880
2881 busy = 0;
2882 spin_lock_irq(mlx4_tlock(dev));
2883 list_for_each_entry_safe(r, tmp, rlist, list) {
2884 if (r->owner == slave) {
2885 if (!r->removing) {
2886 if (r->state == RES_ANY_BUSY) {
2887 if (print)
2888 mlx4_dbg(dev,
aa1ec3dd 2889 "%s id 0x%llx is busy\n",
c82e9aa0
EC
2890 ResourceType(type),
2891 r->res_id);
2892 ++busy;
2893 } else {
2894 r->from_state = r->state;
2895 r->state = RES_ANY_BUSY;
2896 r->removing = 1;
2897 }
2898 }
2899 }
2900 }
2901 spin_unlock_irq(mlx4_tlock(dev));
2902
2903 return busy;
2904}
2905
2906static int move_all_busy(struct mlx4_dev *dev, int slave,
2907 enum mlx4_resource type)
2908{
2909 unsigned long begin;
2910 int busy;
2911
2912 begin = jiffies;
2913 do {
2914 busy = _move_all_busy(dev, slave, type, 0);
2915 if (time_after(jiffies, begin + 5 * HZ))
2916 break;
2917 if (busy)
2918 cond_resched();
2919 } while (busy);
2920
2921 if (busy)
2922 busy = _move_all_busy(dev, slave, type, 1);
2923
2924 return busy;
2925}
2926static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2927{
2928 struct mlx4_priv *priv = mlx4_priv(dev);
2929 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2930 struct list_head *qp_list =
2931 &tracker->slave_list[slave].res_list[RES_QP];
2932 struct res_qp *qp;
2933 struct res_qp *tmp;
2934 int state;
2935 u64 in_param;
2936 int qpn;
2937 int err;
2938
2939 err = move_all_busy(dev, slave, RES_QP);
2940 if (err)
2941 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2942 "for slave %d\n", slave);
2943
2944 spin_lock_irq(mlx4_tlock(dev));
2945 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2946 spin_unlock_irq(mlx4_tlock(dev));
2947 if (qp->com.owner == slave) {
2948 qpn = qp->com.res_id;
2949 detach_qp(dev, slave, qp);
2950 state = qp->com.from_state;
2951 while (state != 0) {
2952 switch (state) {
2953 case RES_QP_RESERVED:
2954 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
2955 rb_erase(&qp->com.node,
2956 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
2957 list_del(&qp->com.list);
2958 spin_unlock_irq(mlx4_tlock(dev));
2959 kfree(qp);
2960 state = 0;
2961 break;
2962 case RES_QP_MAPPED:
2963 if (!valid_reserved(dev, slave, qpn))
2964 __mlx4_qp_free_icm(dev, qpn);
2965 state = RES_QP_RESERVED;
2966 break;
2967 case RES_QP_HW:
2968 in_param = slave;
2969 err = mlx4_cmd(dev, in_param,
2970 qp->local_qpn, 2,
2971 MLX4_CMD_2RST_QP,
2972 MLX4_CMD_TIME_CLASS_A,
2973 MLX4_CMD_NATIVE);
2974 if (err)
2975 mlx4_dbg(dev, "rem_slave_qps: failed"
2976 " to move slave %d qpn %d to"
2977 " reset\n", slave,
2978 qp->local_qpn);
2979 atomic_dec(&qp->rcq->ref_count);
2980 atomic_dec(&qp->scq->ref_count);
2981 atomic_dec(&qp->mtt->ref_count);
2982 if (qp->srq)
2983 atomic_dec(&qp->srq->ref_count);
2984 state = RES_QP_MAPPED;
2985 break;
2986 default:
2987 state = 0;
2988 }
2989 }
2990 }
2991 spin_lock_irq(mlx4_tlock(dev));
2992 }
2993 spin_unlock_irq(mlx4_tlock(dev));
2994}
2995
2996static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2997{
2998 struct mlx4_priv *priv = mlx4_priv(dev);
2999 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3000 struct list_head *srq_list =
3001 &tracker->slave_list[slave].res_list[RES_SRQ];
3002 struct res_srq *srq;
3003 struct res_srq *tmp;
3004 int state;
3005 u64 in_param;
3006 LIST_HEAD(tlist);
3007 int srqn;
3008 int err;
3009
3010 err = move_all_busy(dev, slave, RES_SRQ);
3011 if (err)
3012 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3013 "busy for slave %d\n", slave);
3014
3015 spin_lock_irq(mlx4_tlock(dev));
3016 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3017 spin_unlock_irq(mlx4_tlock(dev));
3018 if (srq->com.owner == slave) {
3019 srqn = srq->com.res_id;
3020 state = srq->com.from_state;
3021 while (state != 0) {
3022 switch (state) {
3023 case RES_SRQ_ALLOCATED:
3024 __mlx4_srq_free_icm(dev, srqn);
3025 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3026 rb_erase(&srq->com.node,
3027 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3028 list_del(&srq->com.list);
3029 spin_unlock_irq(mlx4_tlock(dev));
3030 kfree(srq);
3031 state = 0;
3032 break;
3033
3034 case RES_SRQ_HW:
3035 in_param = slave;
3036 err = mlx4_cmd(dev, in_param, srqn, 1,
3037 MLX4_CMD_HW2SW_SRQ,
3038 MLX4_CMD_TIME_CLASS_A,
3039 MLX4_CMD_NATIVE);
3040 if (err)
3041 mlx4_dbg(dev, "rem_slave_srqs: failed"
3042 " to move slave %d srq %d to"
3043 " SW ownership\n",
3044 slave, srqn);
3045
3046 atomic_dec(&srq->mtt->ref_count);
3047 if (srq->cq)
3048 atomic_dec(&srq->cq->ref_count);
3049 state = RES_SRQ_ALLOCATED;
3050 break;
3051
3052 default:
3053 state = 0;
3054 }
3055 }
3056 }
3057 spin_lock_irq(mlx4_tlock(dev));
3058 }
3059 spin_unlock_irq(mlx4_tlock(dev));
3060}
3061
3062static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3063{
3064 struct mlx4_priv *priv = mlx4_priv(dev);
3065 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3066 struct list_head *cq_list =
3067 &tracker->slave_list[slave].res_list[RES_CQ];
3068 struct res_cq *cq;
3069 struct res_cq *tmp;
3070 int state;
3071 u64 in_param;
3072 LIST_HEAD(tlist);
3073 int cqn;
3074 int err;
3075
3076 err = move_all_busy(dev, slave, RES_CQ);
3077 if (err)
3078 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3079 "busy for slave %d\n", slave);
3080
3081 spin_lock_irq(mlx4_tlock(dev));
3082 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3083 spin_unlock_irq(mlx4_tlock(dev));
3084 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3085 cqn = cq->com.res_id;
3086 state = cq->com.from_state;
3087 while (state != 0) {
3088 switch (state) {
3089 case RES_CQ_ALLOCATED:
3090 __mlx4_cq_free_icm(dev, cqn);
3091 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3092 rb_erase(&cq->com.node,
3093 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3094 list_del(&cq->com.list);
3095 spin_unlock_irq(mlx4_tlock(dev));
3096 kfree(cq);
3097 state = 0;
3098 break;
3099
3100 case RES_CQ_HW:
3101 in_param = slave;
3102 err = mlx4_cmd(dev, in_param, cqn, 1,
3103 MLX4_CMD_HW2SW_CQ,
3104 MLX4_CMD_TIME_CLASS_A,
3105 MLX4_CMD_NATIVE);
3106 if (err)
3107 mlx4_dbg(dev, "rem_slave_cqs: failed"
3108 " to move slave %d cq %d to"
3109 " SW ownership\n",
3110 slave, cqn);
3111 atomic_dec(&cq->mtt->ref_count);
3112 state = RES_CQ_ALLOCATED;
3113 break;
3114
3115 default:
3116 state = 0;
3117 }
3118 }
3119 }
3120 spin_lock_irq(mlx4_tlock(dev));
3121 }
3122 spin_unlock_irq(mlx4_tlock(dev));
3123}
3124
3125static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3126{
3127 struct mlx4_priv *priv = mlx4_priv(dev);
3128 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3129 struct list_head *mpt_list =
3130 &tracker->slave_list[slave].res_list[RES_MPT];
3131 struct res_mpt *mpt;
3132 struct res_mpt *tmp;
3133 int state;
3134 u64 in_param;
3135 LIST_HEAD(tlist);
3136 int mptn;
3137 int err;
3138
3139 err = move_all_busy(dev, slave, RES_MPT);
3140 if (err)
3141 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3142 "busy for slave %d\n", slave);
3143
3144 spin_lock_irq(mlx4_tlock(dev));
3145 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3146 spin_unlock_irq(mlx4_tlock(dev));
3147 if (mpt->com.owner == slave) {
3148 mptn = mpt->com.res_id;
3149 state = mpt->com.from_state;
3150 while (state != 0) {
3151 switch (state) {
3152 case RES_MPT_RESERVED:
3153 __mlx4_mr_release(dev, mpt->key);
3154 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3155 rb_erase(&mpt->com.node,
3156 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3157 list_del(&mpt->com.list);
3158 spin_unlock_irq(mlx4_tlock(dev));
3159 kfree(mpt);
3160 state = 0;
3161 break;
3162
3163 case RES_MPT_MAPPED:
3164 __mlx4_mr_free_icm(dev, mpt->key);
3165 state = RES_MPT_RESERVED;
3166 break;
3167
3168 case RES_MPT_HW:
3169 in_param = slave;
3170 err = mlx4_cmd(dev, in_param, mptn, 0,
3171 MLX4_CMD_HW2SW_MPT,
3172 MLX4_CMD_TIME_CLASS_A,
3173 MLX4_CMD_NATIVE);
3174 if (err)
3175 mlx4_dbg(dev, "rem_slave_mrs: failed"
3176 " to move slave %d mpt %d to"
3177 " SW ownership\n",
3178 slave, mptn);
3179 if (mpt->mtt)
3180 atomic_dec(&mpt->mtt->ref_count);
3181 state = RES_MPT_MAPPED;
3182 break;
3183 default:
3184 state = 0;
3185 }
3186 }
3187 }
3188 spin_lock_irq(mlx4_tlock(dev));
3189 }
3190 spin_unlock_irq(mlx4_tlock(dev));
3191}
3192
3193static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3194{
3195 struct mlx4_priv *priv = mlx4_priv(dev);
3196 struct mlx4_resource_tracker *tracker =
3197 &priv->mfunc.master.res_tracker;
3198 struct list_head *mtt_list =
3199 &tracker->slave_list[slave].res_list[RES_MTT];
3200 struct res_mtt *mtt;
3201 struct res_mtt *tmp;
3202 int state;
3203 LIST_HEAD(tlist);
3204 int base;
3205 int err;
3206
3207 err = move_all_busy(dev, slave, RES_MTT);
3208 if (err)
3209 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3210 "busy for slave %d\n", slave);
3211
3212 spin_lock_irq(mlx4_tlock(dev));
3213 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3214 spin_unlock_irq(mlx4_tlock(dev));
3215 if (mtt->com.owner == slave) {
3216 base = mtt->com.res_id;
3217 state = mtt->com.from_state;
3218 while (state != 0) {
3219 switch (state) {
3220 case RES_MTT_ALLOCATED:
3221 __mlx4_free_mtt_range(dev, base,
3222 mtt->order);
3223 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3224 rb_erase(&mtt->com.node,
3225 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3226 list_del(&mtt->com.list);
3227 spin_unlock_irq(mlx4_tlock(dev));
3228 kfree(mtt);
3229 state = 0;
3230 break;
3231
3232 default:
3233 state = 0;
3234 }
3235 }
3236 }
3237 spin_lock_irq(mlx4_tlock(dev));
3238 }
3239 spin_unlock_irq(mlx4_tlock(dev));
3240}
3241
1b9c6b06
HHZ
3242static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3243{
3244 struct mlx4_priv *priv = mlx4_priv(dev);
3245 struct mlx4_resource_tracker *tracker =
3246 &priv->mfunc.master.res_tracker;
3247 struct list_head *fs_rule_list =
3248 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3249 struct res_fs_rule *fs_rule;
3250 struct res_fs_rule *tmp;
3251 int state;
3252 u64 base;
3253 int err;
3254
3255 err = move_all_busy(dev, slave, RES_FS_RULE);
3256 if (err)
3257 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3258 slave);
3259
3260 spin_lock_irq(mlx4_tlock(dev));
3261 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (fs_rule->com.owner == slave) {
3264 base = fs_rule->com.res_id;
3265 state = fs_rule->com.from_state;
3266 while (state != 0) {
3267 switch (state) {
3268 case RES_FS_RULE_ALLOCATED:
3269 /* detach rule */
3270 err = mlx4_cmd(dev, base, 0, 0,
3271 MLX4_QP_FLOW_STEERING_DETACH,
3272 MLX4_CMD_TIME_CLASS_A,
3273 MLX4_CMD_NATIVE);
3274
3275 spin_lock_irq(mlx4_tlock(dev));
3276 rb_erase(&fs_rule->com.node,
3277 &tracker->res_tree[RES_FS_RULE]);
3278 list_del(&fs_rule->com.list);
3279 spin_unlock_irq(mlx4_tlock(dev));
3280 kfree(fs_rule);
3281 state = 0;
3282 break;
3283
3284 default:
3285 state = 0;
3286 }
3287 }
3288 }
3289 spin_lock_irq(mlx4_tlock(dev));
3290 }
3291 spin_unlock_irq(mlx4_tlock(dev));
3292}
3293
c82e9aa0
EC
3294static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3295{
3296 struct mlx4_priv *priv = mlx4_priv(dev);
3297 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3298 struct list_head *eq_list =
3299 &tracker->slave_list[slave].res_list[RES_EQ];
3300 struct res_eq *eq;
3301 struct res_eq *tmp;
3302 int err;
3303 int state;
3304 LIST_HEAD(tlist);
3305 int eqn;
3306 struct mlx4_cmd_mailbox *mailbox;
3307
3308 err = move_all_busy(dev, slave, RES_EQ);
3309 if (err)
3310 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3311 "busy for slave %d\n", slave);
3312
3313 spin_lock_irq(mlx4_tlock(dev));
3314 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3315 spin_unlock_irq(mlx4_tlock(dev));
3316 if (eq->com.owner == slave) {
3317 eqn = eq->com.res_id;
3318 state = eq->com.from_state;
3319 while (state != 0) {
3320 switch (state) {
3321 case RES_EQ_RESERVED:
3322 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3323 rb_erase(&eq->com.node,
3324 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3325 list_del(&eq->com.list);
3326 spin_unlock_irq(mlx4_tlock(dev));
3327 kfree(eq);
3328 state = 0;
3329 break;
3330
3331 case RES_EQ_HW:
3332 mailbox = mlx4_alloc_cmd_mailbox(dev);
3333 if (IS_ERR(mailbox)) {
3334 cond_resched();
3335 continue;
3336 }
3337 err = mlx4_cmd_box(dev, slave, 0,
3338 eqn & 0xff, 0,
3339 MLX4_CMD_HW2SW_EQ,
3340 MLX4_CMD_TIME_CLASS_A,
3341 MLX4_CMD_NATIVE);
eb71d0d6
JM
3342 if (err)
3343 mlx4_dbg(dev, "rem_slave_eqs: failed"
3344 " to move slave %d eqs %d to"
3345 " SW ownership\n", slave, eqn);
c82e9aa0 3346 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3347 atomic_dec(&eq->mtt->ref_count);
3348 state = RES_EQ_RESERVED;
c82e9aa0
EC
3349 break;
3350
3351 default:
3352 state = 0;
3353 }
3354 }
3355 }
3356 spin_lock_irq(mlx4_tlock(dev));
3357 }
3358 spin_unlock_irq(mlx4_tlock(dev));
3359}
3360
ba062d52
JM
3361static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3362{
3363 struct mlx4_priv *priv = mlx4_priv(dev);
3364 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3365 struct list_head *counter_list =
3366 &tracker->slave_list[slave].res_list[RES_COUNTER];
3367 struct res_counter *counter;
3368 struct res_counter *tmp;
3369 int err;
3370 int index;
3371
3372 err = move_all_busy(dev, slave, RES_COUNTER);
3373 if (err)
3374 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3375 "busy for slave %d\n", slave);
3376
3377 spin_lock_irq(mlx4_tlock(dev));
3378 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3379 if (counter->com.owner == slave) {
3380 index = counter->com.res_id;
4af1c048
HHZ
3381 rb_erase(&counter->com.node,
3382 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3383 list_del(&counter->com.list);
3384 kfree(counter);
3385 __mlx4_counter_free(dev, index);
3386 }
3387 }
3388 spin_unlock_irq(mlx4_tlock(dev));
3389}
3390
3391static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3392{
3393 struct mlx4_priv *priv = mlx4_priv(dev);
3394 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3395 struct list_head *xrcdn_list =
3396 &tracker->slave_list[slave].res_list[RES_XRCD];
3397 struct res_xrcdn *xrcd;
3398 struct res_xrcdn *tmp;
3399 int err;
3400 int xrcdn;
3401
3402 err = move_all_busy(dev, slave, RES_XRCD);
3403 if (err)
3404 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3405 "busy for slave %d\n", slave);
3406
3407 spin_lock_irq(mlx4_tlock(dev));
3408 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3409 if (xrcd->com.owner == slave) {
3410 xrcdn = xrcd->com.res_id;
4af1c048 3411 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3412 list_del(&xrcd->com.list);
3413 kfree(xrcd);
3414 __mlx4_xrcd_free(dev, xrcdn);
3415 }
3416 }
3417 spin_unlock_irq(mlx4_tlock(dev));
3418}
3419
c82e9aa0
EC
3420void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3421{
3422 struct mlx4_priv *priv = mlx4_priv(dev);
3423
3424 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3425 /*VLAN*/
3426 rem_slave_macs(dev, slave);
3427 rem_slave_qps(dev, slave);
3428 rem_slave_srqs(dev, slave);
3429 rem_slave_cqs(dev, slave);
3430 rem_slave_mrs(dev, slave);
3431 rem_slave_eqs(dev, slave);
3432 rem_slave_mtts(dev, slave);
ba062d52
JM
3433 rem_slave_counters(dev, slave);
3434 rem_slave_xrcdns(dev, slave);
1b9c6b06 3435 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
3436 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3437}
This page took 0.43692 seconds and 5 git commands to generate.