net/mlx4: Set steering mode according to device capabilities
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
4af1c048 60 struct rb_node node;
aa1ec3dd 61 u64 res_id;
c82e9aa0
EC
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67};
68
69enum {
70 RES_ANY_BUSY = 1
71};
72
73struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
9f5b6c63 77 enum mlx4_steer_type steer;
c82e9aa0
EC
78};
79
80enum res_qp_states {
81 RES_QP_BUSY = RES_ANY_BUSY,
82
83 /* QP number was allocated */
84 RES_QP_RESERVED,
85
86 /* ICM memory for QP context was mapped */
87 RES_QP_MAPPED,
88
89 /* QP is in hw ownership */
90 RES_QP_HW
91};
92
c82e9aa0
EC
93struct res_qp {
94 struct res_common com;
95 struct res_mtt *mtt;
96 struct res_cq *rcq;
97 struct res_cq *scq;
98 struct res_srq *srq;
99 struct list_head mcg_list;
100 spinlock_t mcg_spl;
101 int local_qpn;
102};
103
104enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
106 RES_MTT_ALLOCATED,
107};
108
109static inline const char *mtt_states_str(enum res_mtt_states state)
110{
111 switch (state) {
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
115 }
116}
117
118struct res_mtt {
119 struct res_common com;
120 int order;
121 atomic_t ref_count;
122};
123
124enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
126 RES_MPT_RESERVED,
127 RES_MPT_MAPPED,
128 RES_MPT_HW,
129};
130
131struct res_mpt {
132 struct res_common com;
133 struct res_mtt *mtt;
134 int key;
135};
136
137enum res_eq_states {
138 RES_EQ_BUSY = RES_ANY_BUSY,
139 RES_EQ_RESERVED,
140 RES_EQ_HW,
141};
142
143struct res_eq {
144 struct res_common com;
145 struct res_mtt *mtt;
146};
147
148enum res_cq_states {
149 RES_CQ_BUSY = RES_ANY_BUSY,
150 RES_CQ_ALLOCATED,
151 RES_CQ_HW,
152};
153
154struct res_cq {
155 struct res_common com;
156 struct res_mtt *mtt;
157 atomic_t ref_count;
158};
159
160enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
162 RES_SRQ_ALLOCATED,
163 RES_SRQ_HW,
164};
165
c82e9aa0
EC
166struct res_srq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 struct res_cq *cq;
170 atomic_t ref_count;
171};
172
173enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
176};
177
c82e9aa0
EC
178struct res_counter {
179 struct res_common com;
180 int port;
181};
182
ba062d52
JM
183enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
185 RES_XRCD_ALLOCATED,
186};
187
188struct res_xrcdn {
189 struct res_common com;
190 int port;
191};
192
4af1c048
HHZ
193static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
194{
195 struct rb_node *node = root->rb_node;
196
197 while (node) {
198 struct res_common *res = container_of(node, struct res_common,
199 node);
200
201 if (res_id < res->res_id)
202 node = node->rb_left;
203 else if (res_id > res->res_id)
204 node = node->rb_right;
205 else
206 return res;
207 }
208 return NULL;
209}
210
211static int res_tracker_insert(struct rb_root *root, struct res_common *res)
212{
213 struct rb_node **new = &(root->rb_node), *parent = NULL;
214
215 /* Figure out where to put new node */
216 while (*new) {
217 struct res_common *this = container_of(*new, struct res_common,
218 node);
219
220 parent = *new;
221 if (res->res_id < this->res_id)
222 new = &((*new)->rb_left);
223 else if (res->res_id > this->res_id)
224 new = &((*new)->rb_right);
225 else
226 return -EEXIST;
227 }
228
229 /* Add new node and rebalance tree. */
230 rb_link_node(&res->node, parent, new);
231 rb_insert_color(&res->node, root);
232
233 return 0;
234}
235
c82e9aa0
EC
236/* For Debug uses */
237static const char *ResourceType(enum mlx4_resource rt)
238{
239 switch (rt) {
240 case RES_QP: return "RES_QP";
241 case RES_CQ: return "RES_CQ";
242 case RES_SRQ: return "RES_SRQ";
243 case RES_MPT: return "RES_MPT";
244 case RES_MTT: return "RES_MTT";
245 case RES_MAC: return "RES_MAC";
246 case RES_EQ: return "RES_EQ";
247 case RES_COUNTER: return "RES_COUNTER";
ba062d52 248 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
249 default: return "Unknown resource type !!!";
250 };
251}
252
c82e9aa0
EC
253int mlx4_init_resource_tracker(struct mlx4_dev *dev)
254{
255 struct mlx4_priv *priv = mlx4_priv(dev);
256 int i;
257 int t;
258
259 priv->mfunc.master.res_tracker.slave_list =
260 kzalloc(dev->num_slaves * sizeof(struct slave_list),
261 GFP_KERNEL);
262 if (!priv->mfunc.master.res_tracker.slave_list)
263 return -ENOMEM;
264
265 for (i = 0 ; i < dev->num_slaves; i++) {
266 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
267 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
268 slave_list[i].res_list[t]);
269 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
270 }
271
272 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
273 dev->num_slaves);
274 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 275 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
276
277 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
278 return 0 ;
279}
280
b8924951
JM
281void mlx4_free_resource_tracker(struct mlx4_dev *dev,
282 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
283{
284 struct mlx4_priv *priv = mlx4_priv(dev);
285 int i;
286
287 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
288 if (type != RES_TR_FREE_STRUCTS_ONLY)
289 for (i = 0 ; i < dev->num_slaves; i++)
290 if (type == RES_TR_FREE_ALL ||
291 dev->caps.function != i)
292 mlx4_delete_all_resources_for_slave(dev, i);
293
294 if (type != RES_TR_FREE_SLAVES_ONLY) {
295 kfree(priv->mfunc.master.res_tracker.slave_list);
296 priv->mfunc.master.res_tracker.slave_list = NULL;
297 }
c82e9aa0
EC
298 }
299}
300
301static void update_ud_gid(struct mlx4_dev *dev,
302 struct mlx4_qp_context *qp_ctx, u8 slave)
303{
304 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
305
306 if (MLX4_QP_ST_UD == ts)
307 qp_ctx->pri_path.mgid_index = 0x80 | slave;
308
309 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
310 slave, qp_ctx->pri_path.mgid_index);
311}
312
313static int mpt_mask(struct mlx4_dev *dev)
314{
315 return dev->caps.num_mpts - 1;
316}
317
318static void *find_res(struct mlx4_dev *dev, int res_id,
319 enum mlx4_resource type)
320{
321 struct mlx4_priv *priv = mlx4_priv(dev);
322
4af1c048
HHZ
323 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
324 res_id);
c82e9aa0
EC
325}
326
aa1ec3dd 327static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
328 enum mlx4_resource type,
329 void *res)
330{
331 struct res_common *r;
332 int err = 0;
333
334 spin_lock_irq(mlx4_tlock(dev));
335 r = find_res(dev, res_id, type);
336 if (!r) {
337 err = -ENONET;
338 goto exit;
339 }
340
341 if (r->state == RES_ANY_BUSY) {
342 err = -EBUSY;
343 goto exit;
344 }
345
346 if (r->owner != slave) {
347 err = -EPERM;
348 goto exit;
349 }
350
351 r->from_state = r->state;
352 r->state = RES_ANY_BUSY;
aa1ec3dd 353 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
c82e9aa0
EC
354 ResourceType(type), r->res_id);
355
356 if (res)
357 *((struct res_common **)res) = r;
358
359exit:
360 spin_unlock_irq(mlx4_tlock(dev));
361 return err;
362}
363
364int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
365 enum mlx4_resource type,
aa1ec3dd 366 u64 res_id, int *slave)
c82e9aa0
EC
367{
368
369 struct res_common *r;
370 int err = -ENOENT;
371 int id = res_id;
372
373 if (type == RES_QP)
374 id &= 0x7fffff;
996b0541 375 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
376
377 r = find_res(dev, id, type);
378 if (r) {
379 *slave = r->owner;
380 err = 0;
381 }
996b0541 382 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
383
384 return err;
385}
386
aa1ec3dd 387static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
388 enum mlx4_resource type)
389{
390 struct res_common *r;
391
392 spin_lock_irq(mlx4_tlock(dev));
393 r = find_res(dev, res_id, type);
394 if (r)
395 r->state = r->from_state;
396 spin_unlock_irq(mlx4_tlock(dev));
397}
398
399static struct res_common *alloc_qp_tr(int id)
400{
401 struct res_qp *ret;
402
403 ret = kzalloc(sizeof *ret, GFP_KERNEL);
404 if (!ret)
405 return NULL;
406
407 ret->com.res_id = id;
408 ret->com.state = RES_QP_RESERVED;
2531188b 409 ret->local_qpn = id;
c82e9aa0
EC
410 INIT_LIST_HEAD(&ret->mcg_list);
411 spin_lock_init(&ret->mcg_spl);
412
413 return &ret->com;
414}
415
416static struct res_common *alloc_mtt_tr(int id, int order)
417{
418 struct res_mtt *ret;
419
420 ret = kzalloc(sizeof *ret, GFP_KERNEL);
421 if (!ret)
422 return NULL;
423
424 ret->com.res_id = id;
425 ret->order = order;
426 ret->com.state = RES_MTT_ALLOCATED;
427 atomic_set(&ret->ref_count, 0);
428
429 return &ret->com;
430}
431
432static struct res_common *alloc_mpt_tr(int id, int key)
433{
434 struct res_mpt *ret;
435
436 ret = kzalloc(sizeof *ret, GFP_KERNEL);
437 if (!ret)
438 return NULL;
439
440 ret->com.res_id = id;
441 ret->com.state = RES_MPT_RESERVED;
442 ret->key = key;
443
444 return &ret->com;
445}
446
447static struct res_common *alloc_eq_tr(int id)
448{
449 struct res_eq *ret;
450
451 ret = kzalloc(sizeof *ret, GFP_KERNEL);
452 if (!ret)
453 return NULL;
454
455 ret->com.res_id = id;
456 ret->com.state = RES_EQ_RESERVED;
457
458 return &ret->com;
459}
460
461static struct res_common *alloc_cq_tr(int id)
462{
463 struct res_cq *ret;
464
465 ret = kzalloc(sizeof *ret, GFP_KERNEL);
466 if (!ret)
467 return NULL;
468
469 ret->com.res_id = id;
470 ret->com.state = RES_CQ_ALLOCATED;
471 atomic_set(&ret->ref_count, 0);
472
473 return &ret->com;
474}
475
476static struct res_common *alloc_srq_tr(int id)
477{
478 struct res_srq *ret;
479
480 ret = kzalloc(sizeof *ret, GFP_KERNEL);
481 if (!ret)
482 return NULL;
483
484 ret->com.res_id = id;
485 ret->com.state = RES_SRQ_ALLOCATED;
486 atomic_set(&ret->ref_count, 0);
487
488 return &ret->com;
489}
490
491static struct res_common *alloc_counter_tr(int id)
492{
493 struct res_counter *ret;
494
495 ret = kzalloc(sizeof *ret, GFP_KERNEL);
496 if (!ret)
497 return NULL;
498
499 ret->com.res_id = id;
500 ret->com.state = RES_COUNTER_ALLOCATED;
501
502 return &ret->com;
503}
504
ba062d52
JM
505static struct res_common *alloc_xrcdn_tr(int id)
506{
507 struct res_xrcdn *ret;
508
509 ret = kzalloc(sizeof *ret, GFP_KERNEL);
510 if (!ret)
511 return NULL;
512
513 ret->com.res_id = id;
514 ret->com.state = RES_XRCD_ALLOCATED;
515
516 return &ret->com;
517}
518
aa1ec3dd 519static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
520 int extra)
521{
522 struct res_common *ret;
523
524 switch (type) {
525 case RES_QP:
526 ret = alloc_qp_tr(id);
527 break;
528 case RES_MPT:
529 ret = alloc_mpt_tr(id, extra);
530 break;
531 case RES_MTT:
532 ret = alloc_mtt_tr(id, extra);
533 break;
534 case RES_EQ:
535 ret = alloc_eq_tr(id);
536 break;
537 case RES_CQ:
538 ret = alloc_cq_tr(id);
539 break;
540 case RES_SRQ:
541 ret = alloc_srq_tr(id);
542 break;
543 case RES_MAC:
544 printk(KERN_ERR "implementation missing\n");
545 return NULL;
546 case RES_COUNTER:
547 ret = alloc_counter_tr(id);
548 break;
ba062d52
JM
549 case RES_XRCD:
550 ret = alloc_xrcdn_tr(id);
551 break;
c82e9aa0
EC
552 default:
553 return NULL;
554 }
555 if (ret)
556 ret->owner = slave;
557
558 return ret;
559}
560
aa1ec3dd 561static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
562 enum mlx4_resource type, int extra)
563{
564 int i;
565 int err;
566 struct mlx4_priv *priv = mlx4_priv(dev);
567 struct res_common **res_arr;
568 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 569 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
570
571 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
572 if (!res_arr)
573 return -ENOMEM;
574
575 for (i = 0; i < count; ++i) {
576 res_arr[i] = alloc_tr(base + i, type, slave, extra);
577 if (!res_arr[i]) {
578 for (--i; i >= 0; --i)
579 kfree(res_arr[i]);
580
581 kfree(res_arr);
582 return -ENOMEM;
583 }
584 }
585
586 spin_lock_irq(mlx4_tlock(dev));
587 for (i = 0; i < count; ++i) {
588 if (find_res(dev, base + i, type)) {
589 err = -EEXIST;
590 goto undo;
591 }
4af1c048 592 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
593 if (err)
594 goto undo;
595 list_add_tail(&res_arr[i]->list,
596 &tracker->slave_list[slave].res_list[type]);
597 }
598 spin_unlock_irq(mlx4_tlock(dev));
599 kfree(res_arr);
600
601 return 0;
602
603undo:
604 for (--i; i >= base; --i)
4af1c048 605 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
606
607 spin_unlock_irq(mlx4_tlock(dev));
608
609 for (i = 0; i < count; ++i)
610 kfree(res_arr[i]);
611
612 kfree(res_arr);
613
614 return err;
615}
616
617static int remove_qp_ok(struct res_qp *res)
618{
619 if (res->com.state == RES_QP_BUSY)
620 return -EBUSY;
621 else if (res->com.state != RES_QP_RESERVED)
622 return -EPERM;
623
624 return 0;
625}
626
627static int remove_mtt_ok(struct res_mtt *res, int order)
628{
629 if (res->com.state == RES_MTT_BUSY ||
630 atomic_read(&res->ref_count)) {
631 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
632 __func__, __LINE__,
633 mtt_states_str(res->com.state),
634 atomic_read(&res->ref_count));
635 return -EBUSY;
636 } else if (res->com.state != RES_MTT_ALLOCATED)
637 return -EPERM;
638 else if (res->order != order)
639 return -EINVAL;
640
641 return 0;
642}
643
644static int remove_mpt_ok(struct res_mpt *res)
645{
646 if (res->com.state == RES_MPT_BUSY)
647 return -EBUSY;
648 else if (res->com.state != RES_MPT_RESERVED)
649 return -EPERM;
650
651 return 0;
652}
653
654static int remove_eq_ok(struct res_eq *res)
655{
656 if (res->com.state == RES_MPT_BUSY)
657 return -EBUSY;
658 else if (res->com.state != RES_MPT_RESERVED)
659 return -EPERM;
660
661 return 0;
662}
663
664static int remove_counter_ok(struct res_counter *res)
665{
666 if (res->com.state == RES_COUNTER_BUSY)
667 return -EBUSY;
668 else if (res->com.state != RES_COUNTER_ALLOCATED)
669 return -EPERM;
670
671 return 0;
672}
673
ba062d52
JM
674static int remove_xrcdn_ok(struct res_xrcdn *res)
675{
676 if (res->com.state == RES_XRCD_BUSY)
677 return -EBUSY;
678 else if (res->com.state != RES_XRCD_ALLOCATED)
679 return -EPERM;
680
681 return 0;
682}
683
c82e9aa0
EC
684static int remove_cq_ok(struct res_cq *res)
685{
686 if (res->com.state == RES_CQ_BUSY)
687 return -EBUSY;
688 else if (res->com.state != RES_CQ_ALLOCATED)
689 return -EPERM;
690
691 return 0;
692}
693
694static int remove_srq_ok(struct res_srq *res)
695{
696 if (res->com.state == RES_SRQ_BUSY)
697 return -EBUSY;
698 else if (res->com.state != RES_SRQ_ALLOCATED)
699 return -EPERM;
700
701 return 0;
702}
703
704static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
705{
706 switch (type) {
707 case RES_QP:
708 return remove_qp_ok((struct res_qp *)res);
709 case RES_CQ:
710 return remove_cq_ok((struct res_cq *)res);
711 case RES_SRQ:
712 return remove_srq_ok((struct res_srq *)res);
713 case RES_MPT:
714 return remove_mpt_ok((struct res_mpt *)res);
715 case RES_MTT:
716 return remove_mtt_ok((struct res_mtt *)res, extra);
717 case RES_MAC:
718 return -ENOSYS;
719 case RES_EQ:
720 return remove_eq_ok((struct res_eq *)res);
721 case RES_COUNTER:
722 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
723 case RES_XRCD:
724 return remove_xrcdn_ok((struct res_xrcdn *)res);
c82e9aa0
EC
725 default:
726 return -EINVAL;
727 }
728}
729
aa1ec3dd 730static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
731 enum mlx4_resource type, int extra)
732{
aa1ec3dd 733 u64 i;
c82e9aa0
EC
734 int err;
735 struct mlx4_priv *priv = mlx4_priv(dev);
736 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
737 struct res_common *r;
738
739 spin_lock_irq(mlx4_tlock(dev));
740 for (i = base; i < base + count; ++i) {
4af1c048 741 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
742 if (!r) {
743 err = -ENOENT;
744 goto out;
745 }
746 if (r->owner != slave) {
747 err = -EPERM;
748 goto out;
749 }
750 err = remove_ok(r, type, extra);
751 if (err)
752 goto out;
753 }
754
755 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
756 r = res_tracker_lookup(&tracker->res_tree[type], i);
757 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
758 list_del(&r->list);
759 kfree(r);
760 }
761 err = 0;
762
763out:
764 spin_unlock_irq(mlx4_tlock(dev));
765
766 return err;
767}
768
769static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
770 enum res_qp_states state, struct res_qp **qp,
771 int alloc)
772{
773 struct mlx4_priv *priv = mlx4_priv(dev);
774 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
775 struct res_qp *r;
776 int err = 0;
777
778 spin_lock_irq(mlx4_tlock(dev));
4af1c048 779 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
780 if (!r)
781 err = -ENOENT;
782 else if (r->com.owner != slave)
783 err = -EPERM;
784 else {
785 switch (state) {
786 case RES_QP_BUSY:
aa1ec3dd 787 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
788 __func__, r->com.res_id);
789 err = -EBUSY;
790 break;
791
792 case RES_QP_RESERVED:
793 if (r->com.state == RES_QP_MAPPED && !alloc)
794 break;
795
aa1ec3dd 796 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
797 err = -EINVAL;
798 break;
799
800 case RES_QP_MAPPED:
801 if ((r->com.state == RES_QP_RESERVED && alloc) ||
802 r->com.state == RES_QP_HW)
803 break;
804 else {
aa1ec3dd 805 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
806 r->com.res_id);
807 err = -EINVAL;
808 }
809
810 break;
811
812 case RES_QP_HW:
813 if (r->com.state != RES_QP_MAPPED)
814 err = -EINVAL;
815 break;
816 default:
817 err = -EINVAL;
818 }
819
820 if (!err) {
821 r->com.from_state = r->com.state;
822 r->com.to_state = state;
823 r->com.state = RES_QP_BUSY;
824 if (qp)
64699336 825 *qp = r;
c82e9aa0
EC
826 }
827 }
828
829 spin_unlock_irq(mlx4_tlock(dev));
830
831 return err;
832}
833
834static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
835 enum res_mpt_states state, struct res_mpt **mpt)
836{
837 struct mlx4_priv *priv = mlx4_priv(dev);
838 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
839 struct res_mpt *r;
840 int err = 0;
841
842 spin_lock_irq(mlx4_tlock(dev));
4af1c048 843 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
844 if (!r)
845 err = -ENOENT;
846 else if (r->com.owner != slave)
847 err = -EPERM;
848 else {
849 switch (state) {
850 case RES_MPT_BUSY:
851 err = -EINVAL;
852 break;
853
854 case RES_MPT_RESERVED:
855 if (r->com.state != RES_MPT_MAPPED)
856 err = -EINVAL;
857 break;
858
859 case RES_MPT_MAPPED:
860 if (r->com.state != RES_MPT_RESERVED &&
861 r->com.state != RES_MPT_HW)
862 err = -EINVAL;
863 break;
864
865 case RES_MPT_HW:
866 if (r->com.state != RES_MPT_MAPPED)
867 err = -EINVAL;
868 break;
869 default:
870 err = -EINVAL;
871 }
872
873 if (!err) {
874 r->com.from_state = r->com.state;
875 r->com.to_state = state;
876 r->com.state = RES_MPT_BUSY;
877 if (mpt)
64699336 878 *mpt = r;
c82e9aa0
EC
879 }
880 }
881
882 spin_unlock_irq(mlx4_tlock(dev));
883
884 return err;
885}
886
887static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
888 enum res_eq_states state, struct res_eq **eq)
889{
890 struct mlx4_priv *priv = mlx4_priv(dev);
891 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
892 struct res_eq *r;
893 int err = 0;
894
895 spin_lock_irq(mlx4_tlock(dev));
4af1c048 896 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
897 if (!r)
898 err = -ENOENT;
899 else if (r->com.owner != slave)
900 err = -EPERM;
901 else {
902 switch (state) {
903 case RES_EQ_BUSY:
904 err = -EINVAL;
905 break;
906
907 case RES_EQ_RESERVED:
908 if (r->com.state != RES_EQ_HW)
909 err = -EINVAL;
910 break;
911
912 case RES_EQ_HW:
913 if (r->com.state != RES_EQ_RESERVED)
914 err = -EINVAL;
915 break;
916
917 default:
918 err = -EINVAL;
919 }
920
921 if (!err) {
922 r->com.from_state = r->com.state;
923 r->com.to_state = state;
924 r->com.state = RES_EQ_BUSY;
925 if (eq)
926 *eq = r;
927 }
928 }
929
930 spin_unlock_irq(mlx4_tlock(dev));
931
932 return err;
933}
934
935static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
936 enum res_cq_states state, struct res_cq **cq)
937{
938 struct mlx4_priv *priv = mlx4_priv(dev);
939 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
940 struct res_cq *r;
941 int err;
942
943 spin_lock_irq(mlx4_tlock(dev));
4af1c048 944 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
945 if (!r)
946 err = -ENOENT;
947 else if (r->com.owner != slave)
948 err = -EPERM;
949 else {
950 switch (state) {
951 case RES_CQ_BUSY:
952 err = -EBUSY;
953 break;
954
955 case RES_CQ_ALLOCATED:
956 if (r->com.state != RES_CQ_HW)
957 err = -EINVAL;
958 else if (atomic_read(&r->ref_count))
959 err = -EBUSY;
960 else
961 err = 0;
962 break;
963
964 case RES_CQ_HW:
965 if (r->com.state != RES_CQ_ALLOCATED)
966 err = -EINVAL;
967 else
968 err = 0;
969 break;
970
971 default:
972 err = -EINVAL;
973 }
974
975 if (!err) {
976 r->com.from_state = r->com.state;
977 r->com.to_state = state;
978 r->com.state = RES_CQ_BUSY;
979 if (cq)
980 *cq = r;
981 }
982 }
983
984 spin_unlock_irq(mlx4_tlock(dev));
985
986 return err;
987}
988
989static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
990 enum res_cq_states state, struct res_srq **srq)
991{
992 struct mlx4_priv *priv = mlx4_priv(dev);
993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
994 struct res_srq *r;
995 int err = 0;
996
997 spin_lock_irq(mlx4_tlock(dev));
4af1c048 998 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
999 if (!r)
1000 err = -ENOENT;
1001 else if (r->com.owner != slave)
1002 err = -EPERM;
1003 else {
1004 switch (state) {
1005 case RES_SRQ_BUSY:
1006 err = -EINVAL;
1007 break;
1008
1009 case RES_SRQ_ALLOCATED:
1010 if (r->com.state != RES_SRQ_HW)
1011 err = -EINVAL;
1012 else if (atomic_read(&r->ref_count))
1013 err = -EBUSY;
1014 break;
1015
1016 case RES_SRQ_HW:
1017 if (r->com.state != RES_SRQ_ALLOCATED)
1018 err = -EINVAL;
1019 break;
1020
1021 default:
1022 err = -EINVAL;
1023 }
1024
1025 if (!err) {
1026 r->com.from_state = r->com.state;
1027 r->com.to_state = state;
1028 r->com.state = RES_SRQ_BUSY;
1029 if (srq)
1030 *srq = r;
1031 }
1032 }
1033
1034 spin_unlock_irq(mlx4_tlock(dev));
1035
1036 return err;
1037}
1038
1039static void res_abort_move(struct mlx4_dev *dev, int slave,
1040 enum mlx4_resource type, int id)
1041{
1042 struct mlx4_priv *priv = mlx4_priv(dev);
1043 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1044 struct res_common *r;
1045
1046 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1047 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1048 if (r && (r->owner == slave))
1049 r->state = r->from_state;
1050 spin_unlock_irq(mlx4_tlock(dev));
1051}
1052
1053static void res_end_move(struct mlx4_dev *dev, int slave,
1054 enum mlx4_resource type, int id)
1055{
1056 struct mlx4_priv *priv = mlx4_priv(dev);
1057 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1058 struct res_common *r;
1059
1060 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1061 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1062 if (r && (r->owner == slave))
1063 r->state = r->to_state;
1064 spin_unlock_irq(mlx4_tlock(dev));
1065}
1066
1067static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1068{
1069 return mlx4_is_qp_reserved(dev, qpn);
1070}
1071
1072static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1073 u64 in_param, u64 *out_param)
1074{
1075 int err;
1076 int count;
1077 int align;
1078 int base;
1079 int qpn;
1080
1081 switch (op) {
1082 case RES_OP_RESERVE:
1083 count = get_param_l(&in_param);
1084 align = get_param_h(&in_param);
1085 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1086 if (err)
1087 return err;
1088
1089 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1090 if (err) {
1091 __mlx4_qp_release_range(dev, base, count);
1092 return err;
1093 }
1094 set_param_l(out_param, base);
1095 break;
1096 case RES_OP_MAP_ICM:
1097 qpn = get_param_l(&in_param) & 0x7fffff;
1098 if (valid_reserved(dev, slave, qpn)) {
1099 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1100 if (err)
1101 return err;
1102 }
1103
1104 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1105 NULL, 1);
1106 if (err)
1107 return err;
1108
1109 if (!valid_reserved(dev, slave, qpn)) {
1110 err = __mlx4_qp_alloc_icm(dev, qpn);
1111 if (err) {
1112 res_abort_move(dev, slave, RES_QP, qpn);
1113 return err;
1114 }
1115 }
1116
1117 res_end_move(dev, slave, RES_QP, qpn);
1118 break;
1119
1120 default:
1121 err = -EINVAL;
1122 break;
1123 }
1124 return err;
1125}
1126
1127static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1128 u64 in_param, u64 *out_param)
1129{
1130 int err = -EINVAL;
1131 int base;
1132 int order;
1133
1134 if (op != RES_OP_RESERVE_AND_MAP)
1135 return err;
1136
1137 order = get_param_l(&in_param);
1138 base = __mlx4_alloc_mtt_range(dev, order);
1139 if (base == -1)
1140 return -ENOMEM;
1141
1142 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1143 if (err)
1144 __mlx4_free_mtt_range(dev, base, order);
1145 else
1146 set_param_l(out_param, base);
1147
1148 return err;
1149}
1150
1151static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1152 u64 in_param, u64 *out_param)
1153{
1154 int err = -EINVAL;
1155 int index;
1156 int id;
1157 struct res_mpt *mpt;
1158
1159 switch (op) {
1160 case RES_OP_RESERVE:
1161 index = __mlx4_mr_reserve(dev);
1162 if (index == -1)
1163 break;
1164 id = index & mpt_mask(dev);
1165
1166 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1167 if (err) {
1168 __mlx4_mr_release(dev, index);
1169 break;
1170 }
1171 set_param_l(out_param, index);
1172 break;
1173 case RES_OP_MAP_ICM:
1174 index = get_param_l(&in_param);
1175 id = index & mpt_mask(dev);
1176 err = mr_res_start_move_to(dev, slave, id,
1177 RES_MPT_MAPPED, &mpt);
1178 if (err)
1179 return err;
1180
1181 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1182 if (err) {
1183 res_abort_move(dev, slave, RES_MPT, id);
1184 return err;
1185 }
1186
1187 res_end_move(dev, slave, RES_MPT, id);
1188 break;
1189 }
1190 return err;
1191}
1192
1193static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1194 u64 in_param, u64 *out_param)
1195{
1196 int cqn;
1197 int err;
1198
1199 switch (op) {
1200 case RES_OP_RESERVE_AND_MAP:
1201 err = __mlx4_cq_alloc_icm(dev, &cqn);
1202 if (err)
1203 break;
1204
1205 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1206 if (err) {
1207 __mlx4_cq_free_icm(dev, cqn);
1208 break;
1209 }
1210
1211 set_param_l(out_param, cqn);
1212 break;
1213
1214 default:
1215 err = -EINVAL;
1216 }
1217
1218 return err;
1219}
1220
1221static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1222 u64 in_param, u64 *out_param)
1223{
1224 int srqn;
1225 int err;
1226
1227 switch (op) {
1228 case RES_OP_RESERVE_AND_MAP:
1229 err = __mlx4_srq_alloc_icm(dev, &srqn);
1230 if (err)
1231 break;
1232
1233 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1234 if (err) {
1235 __mlx4_srq_free_icm(dev, srqn);
1236 break;
1237 }
1238
1239 set_param_l(out_param, srqn);
1240 break;
1241
1242 default:
1243 err = -EINVAL;
1244 }
1245
1246 return err;
1247}
1248
1249static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1250{
1251 struct mlx4_priv *priv = mlx4_priv(dev);
1252 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1253 struct mac_res *res;
1254
1255 res = kzalloc(sizeof *res, GFP_KERNEL);
1256 if (!res)
1257 return -ENOMEM;
1258 res->mac = mac;
1259 res->port = (u8) port;
1260 list_add_tail(&res->list,
1261 &tracker->slave_list[slave].res_list[RES_MAC]);
1262 return 0;
1263}
1264
1265static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1266 int port)
1267{
1268 struct mlx4_priv *priv = mlx4_priv(dev);
1269 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1270 struct list_head *mac_list =
1271 &tracker->slave_list[slave].res_list[RES_MAC];
1272 struct mac_res *res, *tmp;
1273
1274 list_for_each_entry_safe(res, tmp, mac_list, list) {
1275 if (res->mac == mac && res->port == (u8) port) {
1276 list_del(&res->list);
1277 kfree(res);
1278 break;
1279 }
1280 }
1281}
1282
1283static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1284{
1285 struct mlx4_priv *priv = mlx4_priv(dev);
1286 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1287 struct list_head *mac_list =
1288 &tracker->slave_list[slave].res_list[RES_MAC];
1289 struct mac_res *res, *tmp;
1290
1291 list_for_each_entry_safe(res, tmp, mac_list, list) {
1292 list_del(&res->list);
1293 __mlx4_unregister_mac(dev, res->port, res->mac);
1294 kfree(res);
1295 }
1296}
1297
1298static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1299 u64 in_param, u64 *out_param)
1300{
1301 int err = -EINVAL;
1302 int port;
1303 u64 mac;
1304
1305 if (op != RES_OP_RESERVE_AND_MAP)
1306 return err;
1307
1308 port = get_param_l(out_param);
1309 mac = in_param;
1310
1311 err = __mlx4_register_mac(dev, port, mac);
1312 if (err >= 0) {
1313 set_param_l(out_param, err);
1314 err = 0;
1315 }
1316
1317 if (!err) {
1318 err = mac_add_to_slave(dev, slave, mac, port);
1319 if (err)
1320 __mlx4_unregister_mac(dev, port, mac);
1321 }
1322 return err;
1323}
1324
ffe455ad
EE
1325static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1326 u64 in_param, u64 *out_param)
1327{
1328 return 0;
1329}
1330
ba062d52
JM
1331static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1332 u64 in_param, u64 *out_param)
1333{
1334 u32 index;
1335 int err;
1336
1337 if (op != RES_OP_RESERVE)
1338 return -EINVAL;
1339
1340 err = __mlx4_counter_alloc(dev, &index);
1341 if (err)
1342 return err;
1343
1344 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1345 if (err)
1346 __mlx4_counter_free(dev, index);
1347 else
1348 set_param_l(out_param, index);
1349
1350 return err;
1351}
1352
1353static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1354 u64 in_param, u64 *out_param)
1355{
1356 u32 xrcdn;
1357 int err;
1358
1359 if (op != RES_OP_RESERVE)
1360 return -EINVAL;
1361
1362 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1363 if (err)
1364 return err;
1365
1366 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1367 if (err)
1368 __mlx4_xrcd_free(dev, xrcdn);
1369 else
1370 set_param_l(out_param, xrcdn);
1371
1372 return err;
1373}
1374
c82e9aa0
EC
1375int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1376 struct mlx4_vhcr *vhcr,
1377 struct mlx4_cmd_mailbox *inbox,
1378 struct mlx4_cmd_mailbox *outbox,
1379 struct mlx4_cmd_info *cmd)
1380{
1381 int err;
1382 int alop = vhcr->op_modifier;
1383
1384 switch (vhcr->in_modifier) {
1385 case RES_QP:
1386 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1387 vhcr->in_param, &vhcr->out_param);
1388 break;
1389
1390 case RES_MTT:
1391 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1392 vhcr->in_param, &vhcr->out_param);
1393 break;
1394
1395 case RES_MPT:
1396 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1397 vhcr->in_param, &vhcr->out_param);
1398 break;
1399
1400 case RES_CQ:
1401 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1402 vhcr->in_param, &vhcr->out_param);
1403 break;
1404
1405 case RES_SRQ:
1406 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1407 vhcr->in_param, &vhcr->out_param);
1408 break;
1409
1410 case RES_MAC:
1411 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1412 vhcr->in_param, &vhcr->out_param);
1413 break;
1414
ffe455ad
EE
1415 case RES_VLAN:
1416 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1417 vhcr->in_param, &vhcr->out_param);
1418 break;
1419
ba062d52
JM
1420 case RES_COUNTER:
1421 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1422 vhcr->in_param, &vhcr->out_param);
1423 break;
1424
1425 case RES_XRCD:
1426 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1427 vhcr->in_param, &vhcr->out_param);
1428 break;
1429
c82e9aa0
EC
1430 default:
1431 err = -EINVAL;
1432 break;
1433 }
1434
1435 return err;
1436}
1437
1438static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1439 u64 in_param)
1440{
1441 int err;
1442 int count;
1443 int base;
1444 int qpn;
1445
1446 switch (op) {
1447 case RES_OP_RESERVE:
1448 base = get_param_l(&in_param) & 0x7fffff;
1449 count = get_param_h(&in_param);
1450 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1451 if (err)
1452 break;
1453 __mlx4_qp_release_range(dev, base, count);
1454 break;
1455 case RES_OP_MAP_ICM:
1456 qpn = get_param_l(&in_param) & 0x7fffff;
1457 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1458 NULL, 0);
1459 if (err)
1460 return err;
1461
1462 if (!valid_reserved(dev, slave, qpn))
1463 __mlx4_qp_free_icm(dev, qpn);
1464
1465 res_end_move(dev, slave, RES_QP, qpn);
1466
1467 if (valid_reserved(dev, slave, qpn))
1468 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1469 break;
1470 default:
1471 err = -EINVAL;
1472 break;
1473 }
1474 return err;
1475}
1476
1477static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1478 u64 in_param, u64 *out_param)
1479{
1480 int err = -EINVAL;
1481 int base;
1482 int order;
1483
1484 if (op != RES_OP_RESERVE_AND_MAP)
1485 return err;
1486
1487 base = get_param_l(&in_param);
1488 order = get_param_h(&in_param);
1489 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1490 if (!err)
1491 __mlx4_free_mtt_range(dev, base, order);
1492 return err;
1493}
1494
1495static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1496 u64 in_param)
1497{
1498 int err = -EINVAL;
1499 int index;
1500 int id;
1501 struct res_mpt *mpt;
1502
1503 switch (op) {
1504 case RES_OP_RESERVE:
1505 index = get_param_l(&in_param);
1506 id = index & mpt_mask(dev);
1507 err = get_res(dev, slave, id, RES_MPT, &mpt);
1508 if (err)
1509 break;
1510 index = mpt->key;
1511 put_res(dev, slave, id, RES_MPT);
1512
1513 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1514 if (err)
1515 break;
1516 __mlx4_mr_release(dev, index);
1517 break;
1518 case RES_OP_MAP_ICM:
1519 index = get_param_l(&in_param);
1520 id = index & mpt_mask(dev);
1521 err = mr_res_start_move_to(dev, slave, id,
1522 RES_MPT_RESERVED, &mpt);
1523 if (err)
1524 return err;
1525
1526 __mlx4_mr_free_icm(dev, mpt->key);
1527 res_end_move(dev, slave, RES_MPT, id);
1528 return err;
1529 break;
1530 default:
1531 err = -EINVAL;
1532 break;
1533 }
1534 return err;
1535}
1536
1537static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1538 u64 in_param, u64 *out_param)
1539{
1540 int cqn;
1541 int err;
1542
1543 switch (op) {
1544 case RES_OP_RESERVE_AND_MAP:
1545 cqn = get_param_l(&in_param);
1546 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1547 if (err)
1548 break;
1549
1550 __mlx4_cq_free_icm(dev, cqn);
1551 break;
1552
1553 default:
1554 err = -EINVAL;
1555 break;
1556 }
1557
1558 return err;
1559}
1560
1561static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1562 u64 in_param, u64 *out_param)
1563{
1564 int srqn;
1565 int err;
1566
1567 switch (op) {
1568 case RES_OP_RESERVE_AND_MAP:
1569 srqn = get_param_l(&in_param);
1570 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1571 if (err)
1572 break;
1573
1574 __mlx4_srq_free_icm(dev, srqn);
1575 break;
1576
1577 default:
1578 err = -EINVAL;
1579 break;
1580 }
1581
1582 return err;
1583}
1584
1585static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1586 u64 in_param, u64 *out_param)
1587{
1588 int port;
1589 int err = 0;
1590
1591 switch (op) {
1592 case RES_OP_RESERVE_AND_MAP:
1593 port = get_param_l(out_param);
1594 mac_del_from_slave(dev, slave, in_param, port);
1595 __mlx4_unregister_mac(dev, port, in_param);
1596 break;
1597 default:
1598 err = -EINVAL;
1599 break;
1600 }
1601
1602 return err;
1603
1604}
1605
ffe455ad
EE
1606static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1607 u64 in_param, u64 *out_param)
1608{
1609 return 0;
1610}
1611
ba062d52
JM
1612static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1613 u64 in_param, u64 *out_param)
1614{
1615 int index;
1616 int err;
1617
1618 if (op != RES_OP_RESERVE)
1619 return -EINVAL;
1620
1621 index = get_param_l(&in_param);
1622 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1623 if (err)
1624 return err;
1625
1626 __mlx4_counter_free(dev, index);
1627
1628 return err;
1629}
1630
1631static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1632 u64 in_param, u64 *out_param)
1633{
1634 int xrcdn;
1635 int err;
1636
1637 if (op != RES_OP_RESERVE)
1638 return -EINVAL;
1639
1640 xrcdn = get_param_l(&in_param);
1641 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1642 if (err)
1643 return err;
1644
1645 __mlx4_xrcd_free(dev, xrcdn);
1646
1647 return err;
1648}
1649
c82e9aa0
EC
1650int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1651 struct mlx4_vhcr *vhcr,
1652 struct mlx4_cmd_mailbox *inbox,
1653 struct mlx4_cmd_mailbox *outbox,
1654 struct mlx4_cmd_info *cmd)
1655{
1656 int err = -EINVAL;
1657 int alop = vhcr->op_modifier;
1658
1659 switch (vhcr->in_modifier) {
1660 case RES_QP:
1661 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1662 vhcr->in_param);
1663 break;
1664
1665 case RES_MTT:
1666 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1667 vhcr->in_param, &vhcr->out_param);
1668 break;
1669
1670 case RES_MPT:
1671 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1672 vhcr->in_param);
1673 break;
1674
1675 case RES_CQ:
1676 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1677 vhcr->in_param, &vhcr->out_param);
1678 break;
1679
1680 case RES_SRQ:
1681 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1682 vhcr->in_param, &vhcr->out_param);
1683 break;
1684
1685 case RES_MAC:
1686 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1687 vhcr->in_param, &vhcr->out_param);
1688 break;
1689
ffe455ad
EE
1690 case RES_VLAN:
1691 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1692 vhcr->in_param, &vhcr->out_param);
1693 break;
1694
ba062d52
JM
1695 case RES_COUNTER:
1696 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1697 vhcr->in_param, &vhcr->out_param);
1698 break;
1699
1700 case RES_XRCD:
1701 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1702 vhcr->in_param, &vhcr->out_param);
1703
c82e9aa0
EC
1704 default:
1705 break;
1706 }
1707 return err;
1708}
1709
1710/* ugly but other choices are uglier */
1711static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1712{
1713 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1714}
1715
2b8fb286 1716static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1717{
2b8fb286 1718 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1719}
1720
1721static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1722{
1723 return be32_to_cpu(mpt->mtt_sz);
1724}
1725
2b8fb286 1726static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1727{
1728 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1729}
1730
2b8fb286 1731static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1732{
1733 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1734}
1735
1736static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1737{
1738 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1739 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1740 int log_sq_sride = qpc->sq_size_stride & 7;
1741 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1742 int log_rq_stride = qpc->rq_size_stride & 7;
1743 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1744 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1745 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1746 int sq_size;
1747 int rq_size;
1748 int total_pages;
1749 int total_mem;
1750 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1751
1752 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1753 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1754 total_mem = sq_size + rq_size;
1755 total_pages =
1756 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1757 page_shift);
1758
1759 return total_pages;
1760}
1761
c82e9aa0
EC
1762static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1763 int size, struct res_mtt *mtt)
1764{
2b8fb286
MA
1765 int res_start = mtt->com.res_id;
1766 int res_size = (1 << mtt->order);
c82e9aa0
EC
1767
1768 if (start < res_start || start + size > res_start + res_size)
1769 return -EPERM;
1770 return 0;
1771}
1772
1773int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1774 struct mlx4_vhcr *vhcr,
1775 struct mlx4_cmd_mailbox *inbox,
1776 struct mlx4_cmd_mailbox *outbox,
1777 struct mlx4_cmd_info *cmd)
1778{
1779 int err;
1780 int index = vhcr->in_modifier;
1781 struct res_mtt *mtt;
1782 struct res_mpt *mpt;
2b8fb286 1783 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1784 int phys;
1785 int id;
1786
1787 id = index & mpt_mask(dev);
1788 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1789 if (err)
1790 return err;
1791
1792 phys = mr_phys_mpt(inbox->buf);
1793 if (!phys) {
2b8fb286 1794 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1795 if (err)
1796 goto ex_abort;
1797
1798 err = check_mtt_range(dev, slave, mtt_base,
1799 mr_get_mtt_size(inbox->buf), mtt);
1800 if (err)
1801 goto ex_put;
1802
1803 mpt->mtt = mtt;
1804 }
1805
c82e9aa0
EC
1806 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1807 if (err)
1808 goto ex_put;
1809
1810 if (!phys) {
1811 atomic_inc(&mtt->ref_count);
1812 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1813 }
1814
1815 res_end_move(dev, slave, RES_MPT, id);
1816 return 0;
1817
1818ex_put:
1819 if (!phys)
1820 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1821ex_abort:
1822 res_abort_move(dev, slave, RES_MPT, id);
1823
1824 return err;
1825}
1826
1827int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1828 struct mlx4_vhcr *vhcr,
1829 struct mlx4_cmd_mailbox *inbox,
1830 struct mlx4_cmd_mailbox *outbox,
1831 struct mlx4_cmd_info *cmd)
1832{
1833 int err;
1834 int index = vhcr->in_modifier;
1835 struct res_mpt *mpt;
1836 int id;
1837
1838 id = index & mpt_mask(dev);
1839 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1840 if (err)
1841 return err;
1842
1843 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1844 if (err)
1845 goto ex_abort;
1846
1847 if (mpt->mtt)
1848 atomic_dec(&mpt->mtt->ref_count);
1849
1850 res_end_move(dev, slave, RES_MPT, id);
1851 return 0;
1852
1853ex_abort:
1854 res_abort_move(dev, slave, RES_MPT, id);
1855
1856 return err;
1857}
1858
1859int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1860 struct mlx4_vhcr *vhcr,
1861 struct mlx4_cmd_mailbox *inbox,
1862 struct mlx4_cmd_mailbox *outbox,
1863 struct mlx4_cmd_info *cmd)
1864{
1865 int err;
1866 int index = vhcr->in_modifier;
1867 struct res_mpt *mpt;
1868 int id;
1869
1870 id = index & mpt_mask(dev);
1871 err = get_res(dev, slave, id, RES_MPT, &mpt);
1872 if (err)
1873 return err;
1874
1875 if (mpt->com.from_state != RES_MPT_HW) {
1876 err = -EBUSY;
1877 goto out;
1878 }
1879
1880 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1881
1882out:
1883 put_res(dev, slave, id, RES_MPT);
1884 return err;
1885}
1886
1887static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1888{
1889 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1890}
1891
1892static int qp_get_scqn(struct mlx4_qp_context *qpc)
1893{
1894 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1895}
1896
1897static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1898{
1899 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1900}
1901
1902int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1903 struct mlx4_vhcr *vhcr,
1904 struct mlx4_cmd_mailbox *inbox,
1905 struct mlx4_cmd_mailbox *outbox,
1906 struct mlx4_cmd_info *cmd)
1907{
1908 int err;
1909 int qpn = vhcr->in_modifier & 0x7fffff;
1910 struct res_mtt *mtt;
1911 struct res_qp *qp;
1912 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1913 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1914 int mtt_size = qp_get_mtt_size(qpc);
1915 struct res_cq *rcq;
1916 struct res_cq *scq;
1917 int rcqn = qp_get_rcqn(qpc);
1918 int scqn = qp_get_scqn(qpc);
1919 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1920 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1921 struct res_srq *srq;
1922 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1923
1924 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1925 if (err)
1926 return err;
1927 qp->local_qpn = local_qpn;
1928
2b8fb286 1929 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1930 if (err)
1931 goto ex_abort;
1932
1933 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1934 if (err)
1935 goto ex_put_mtt;
1936
c82e9aa0
EC
1937 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1938 if (err)
1939 goto ex_put_mtt;
1940
1941 if (scqn != rcqn) {
1942 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1943 if (err)
1944 goto ex_put_rcq;
1945 } else
1946 scq = rcq;
1947
1948 if (use_srq) {
1949 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1950 if (err)
1951 goto ex_put_scq;
1952 }
1953
1954 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1955 if (err)
1956 goto ex_put_srq;
1957 atomic_inc(&mtt->ref_count);
1958 qp->mtt = mtt;
1959 atomic_inc(&rcq->ref_count);
1960 qp->rcq = rcq;
1961 atomic_inc(&scq->ref_count);
1962 qp->scq = scq;
1963
1964 if (scqn != rcqn)
1965 put_res(dev, slave, scqn, RES_CQ);
1966
1967 if (use_srq) {
1968 atomic_inc(&srq->ref_count);
1969 put_res(dev, slave, srqn, RES_SRQ);
1970 qp->srq = srq;
1971 }
1972 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 1973 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1974 res_end_move(dev, slave, RES_QP, qpn);
1975
1976 return 0;
1977
1978ex_put_srq:
1979 if (use_srq)
1980 put_res(dev, slave, srqn, RES_SRQ);
1981ex_put_scq:
1982 if (scqn != rcqn)
1983 put_res(dev, slave, scqn, RES_CQ);
1984ex_put_rcq:
1985 put_res(dev, slave, rcqn, RES_CQ);
1986ex_put_mtt:
2b8fb286 1987 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1988ex_abort:
1989 res_abort_move(dev, slave, RES_QP, qpn);
1990
1991 return err;
1992}
1993
2b8fb286 1994static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
1995{
1996 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1997}
1998
1999static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2000{
2001 int log_eq_size = eqc->log_eq_size & 0x1f;
2002 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2003
2004 if (log_eq_size + 5 < page_shift)
2005 return 1;
2006
2007 return 1 << (log_eq_size + 5 - page_shift);
2008}
2009
2b8fb286 2010static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2011{
2012 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2013}
2014
2015static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2016{
2017 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2018 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2019
2020 if (log_cq_size + 5 < page_shift)
2021 return 1;
2022
2023 return 1 << (log_cq_size + 5 - page_shift);
2024}
2025
2026int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2027 struct mlx4_vhcr *vhcr,
2028 struct mlx4_cmd_mailbox *inbox,
2029 struct mlx4_cmd_mailbox *outbox,
2030 struct mlx4_cmd_info *cmd)
2031{
2032 int err;
2033 int eqn = vhcr->in_modifier;
2034 int res_id = (slave << 8) | eqn;
2035 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2036 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2037 int mtt_size = eq_get_mtt_size(eqc);
2038 struct res_eq *eq;
2039 struct res_mtt *mtt;
2040
2041 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2042 if (err)
2043 return err;
2044 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2045 if (err)
2046 goto out_add;
2047
2b8fb286 2048 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2049 if (err)
2050 goto out_move;
2051
2052 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2053 if (err)
2054 goto out_put;
2055
2056 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2057 if (err)
2058 goto out_put;
2059
2060 atomic_inc(&mtt->ref_count);
2061 eq->mtt = mtt;
2062 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2063 res_end_move(dev, slave, RES_EQ, res_id);
2064 return 0;
2065
2066out_put:
2067 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2068out_move:
2069 res_abort_move(dev, slave, RES_EQ, res_id);
2070out_add:
2071 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2072 return err;
2073}
2074
2075static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2076 int len, struct res_mtt **res)
2077{
2078 struct mlx4_priv *priv = mlx4_priv(dev);
2079 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2080 struct res_mtt *mtt;
2081 int err = -EINVAL;
2082
2083 spin_lock_irq(mlx4_tlock(dev));
2084 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2085 com.list) {
2086 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2087 *res = mtt;
2088 mtt->com.from_state = mtt->com.state;
2089 mtt->com.state = RES_MTT_BUSY;
2090 err = 0;
2091 break;
2092 }
2093 }
2094 spin_unlock_irq(mlx4_tlock(dev));
2095
2096 return err;
2097}
2098
2099int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2100 struct mlx4_vhcr *vhcr,
2101 struct mlx4_cmd_mailbox *inbox,
2102 struct mlx4_cmd_mailbox *outbox,
2103 struct mlx4_cmd_info *cmd)
2104{
2105 struct mlx4_mtt mtt;
2106 __be64 *page_list = inbox->buf;
2107 u64 *pg_list = (u64 *)page_list;
2108 int i;
2109 struct res_mtt *rmtt = NULL;
2110 int start = be64_to_cpu(page_list[0]);
2111 int npages = vhcr->in_modifier;
2112 int err;
2113
2114 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2115 if (err)
2116 return err;
2117
2118 /* Call the SW implementation of write_mtt:
2119 * - Prepare a dummy mtt struct
2120 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2121 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2122 we don't really use it */
c82e9aa0
EC
2123 mtt.order = 0;
2124 mtt.page_shift = 0;
2125 for (i = 0; i < npages; ++i)
2126 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2127
2128 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2129 ((u64 *)page_list + 2));
2130
2131 if (rmtt)
2132 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2133
2134 return err;
2135}
2136
2137int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2138 struct mlx4_vhcr *vhcr,
2139 struct mlx4_cmd_mailbox *inbox,
2140 struct mlx4_cmd_mailbox *outbox,
2141 struct mlx4_cmd_info *cmd)
2142{
2143 int eqn = vhcr->in_modifier;
2144 int res_id = eqn | (slave << 8);
2145 struct res_eq *eq;
2146 int err;
2147
2148 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2149 if (err)
2150 return err;
2151
2152 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2153 if (err)
2154 goto ex_abort;
2155
2156 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2157 if (err)
2158 goto ex_put;
2159
2160 atomic_dec(&eq->mtt->ref_count);
2161 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2162 res_end_move(dev, slave, RES_EQ, res_id);
2163 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2164
2165 return 0;
2166
2167ex_put:
2168 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2169ex_abort:
2170 res_abort_move(dev, slave, RES_EQ, res_id);
2171
2172 return err;
2173}
2174
2175int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2176{
2177 struct mlx4_priv *priv = mlx4_priv(dev);
2178 struct mlx4_slave_event_eq_info *event_eq;
2179 struct mlx4_cmd_mailbox *mailbox;
2180 u32 in_modifier = 0;
2181 int err;
2182 int res_id;
2183 struct res_eq *req;
2184
2185 if (!priv->mfunc.master.slave_state)
2186 return -EINVAL;
2187
803143fb 2188 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2189
2190 /* Create the event only if the slave is registered */
803143fb 2191 if (event_eq->eqn < 0)
c82e9aa0
EC
2192 return 0;
2193
2194 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2195 res_id = (slave << 8) | event_eq->eqn;
2196 err = get_res(dev, slave, res_id, RES_EQ, &req);
2197 if (err)
2198 goto unlock;
2199
2200 if (req->com.from_state != RES_EQ_HW) {
2201 err = -EINVAL;
2202 goto put;
2203 }
2204
2205 mailbox = mlx4_alloc_cmd_mailbox(dev);
2206 if (IS_ERR(mailbox)) {
2207 err = PTR_ERR(mailbox);
2208 goto put;
2209 }
2210
2211 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2212 ++event_eq->token;
2213 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2214 }
2215
2216 memcpy(mailbox->buf, (u8 *) eqe, 28);
2217
2218 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2219
2220 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2221 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2222 MLX4_CMD_NATIVE);
2223
2224 put_res(dev, slave, res_id, RES_EQ);
2225 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2226 mlx4_free_cmd_mailbox(dev, mailbox);
2227 return err;
2228
2229put:
2230 put_res(dev, slave, res_id, RES_EQ);
2231
2232unlock:
2233 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2234 return err;
2235}
2236
2237int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2238 struct mlx4_vhcr *vhcr,
2239 struct mlx4_cmd_mailbox *inbox,
2240 struct mlx4_cmd_mailbox *outbox,
2241 struct mlx4_cmd_info *cmd)
2242{
2243 int eqn = vhcr->in_modifier;
2244 int res_id = eqn | (slave << 8);
2245 struct res_eq *eq;
2246 int err;
2247
2248 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2249 if (err)
2250 return err;
2251
2252 if (eq->com.from_state != RES_EQ_HW) {
2253 err = -EINVAL;
2254 goto ex_put;
2255 }
2256
2257 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2258
2259ex_put:
2260 put_res(dev, slave, res_id, RES_EQ);
2261 return err;
2262}
2263
2264int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2265 struct mlx4_vhcr *vhcr,
2266 struct mlx4_cmd_mailbox *inbox,
2267 struct mlx4_cmd_mailbox *outbox,
2268 struct mlx4_cmd_info *cmd)
2269{
2270 int err;
2271 int cqn = vhcr->in_modifier;
2272 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2273 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2274 struct res_cq *cq;
2275 struct res_mtt *mtt;
2276
2277 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2278 if (err)
2279 return err;
2b8fb286 2280 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2281 if (err)
2282 goto out_move;
2283 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2284 if (err)
2285 goto out_put;
2286 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2287 if (err)
2288 goto out_put;
2289 atomic_inc(&mtt->ref_count);
2290 cq->mtt = mtt;
2291 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2292 res_end_move(dev, slave, RES_CQ, cqn);
2293 return 0;
2294
2295out_put:
2296 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2297out_move:
2298 res_abort_move(dev, slave, RES_CQ, cqn);
2299 return err;
2300}
2301
2302int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2303 struct mlx4_vhcr *vhcr,
2304 struct mlx4_cmd_mailbox *inbox,
2305 struct mlx4_cmd_mailbox *outbox,
2306 struct mlx4_cmd_info *cmd)
2307{
2308 int err;
2309 int cqn = vhcr->in_modifier;
2310 struct res_cq *cq;
2311
2312 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2313 if (err)
2314 return err;
2315 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2316 if (err)
2317 goto out_move;
2318 atomic_dec(&cq->mtt->ref_count);
2319 res_end_move(dev, slave, RES_CQ, cqn);
2320 return 0;
2321
2322out_move:
2323 res_abort_move(dev, slave, RES_CQ, cqn);
2324 return err;
2325}
2326
2327int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2328 struct mlx4_vhcr *vhcr,
2329 struct mlx4_cmd_mailbox *inbox,
2330 struct mlx4_cmd_mailbox *outbox,
2331 struct mlx4_cmd_info *cmd)
2332{
2333 int cqn = vhcr->in_modifier;
2334 struct res_cq *cq;
2335 int err;
2336
2337 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2338 if (err)
2339 return err;
2340
2341 if (cq->com.from_state != RES_CQ_HW)
2342 goto ex_put;
2343
2344 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2345ex_put:
2346 put_res(dev, slave, cqn, RES_CQ);
2347
2348 return err;
2349}
2350
2351static int handle_resize(struct mlx4_dev *dev, int slave,
2352 struct mlx4_vhcr *vhcr,
2353 struct mlx4_cmd_mailbox *inbox,
2354 struct mlx4_cmd_mailbox *outbox,
2355 struct mlx4_cmd_info *cmd,
2356 struct res_cq *cq)
2357{
2358 int err;
2359 struct res_mtt *orig_mtt;
2360 struct res_mtt *mtt;
2361 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2362 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2363
2364 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2365 if (err)
2366 return err;
2367
2368 if (orig_mtt != cq->mtt) {
2369 err = -EINVAL;
2370 goto ex_put;
2371 }
2372
2b8fb286 2373 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2374 if (err)
2375 goto ex_put;
2376
2377 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2378 if (err)
2379 goto ex_put1;
2380 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2381 if (err)
2382 goto ex_put1;
2383 atomic_dec(&orig_mtt->ref_count);
2384 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2385 atomic_inc(&mtt->ref_count);
2386 cq->mtt = mtt;
2387 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2388 return 0;
2389
2390ex_put1:
2391 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2392ex_put:
2393 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2394
2395 return err;
2396
2397}
2398
2399int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2400 struct mlx4_vhcr *vhcr,
2401 struct mlx4_cmd_mailbox *inbox,
2402 struct mlx4_cmd_mailbox *outbox,
2403 struct mlx4_cmd_info *cmd)
2404{
2405 int cqn = vhcr->in_modifier;
2406 struct res_cq *cq;
2407 int err;
2408
2409 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2410 if (err)
2411 return err;
2412
2413 if (cq->com.from_state != RES_CQ_HW)
2414 goto ex_put;
2415
2416 if (vhcr->op_modifier == 0) {
2417 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2418 goto ex_put;
c82e9aa0
EC
2419 }
2420
2421 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2422ex_put:
2423 put_res(dev, slave, cqn, RES_CQ);
2424
2425 return err;
2426}
2427
c82e9aa0
EC
2428static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2429{
2430 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2431 int log_rq_stride = srqc->logstride & 7;
2432 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2433
2434 if (log_srq_size + log_rq_stride + 4 < page_shift)
2435 return 1;
2436
2437 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2438}
2439
2440int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2441 struct mlx4_vhcr *vhcr,
2442 struct mlx4_cmd_mailbox *inbox,
2443 struct mlx4_cmd_mailbox *outbox,
2444 struct mlx4_cmd_info *cmd)
2445{
2446 int err;
2447 int srqn = vhcr->in_modifier;
2448 struct res_mtt *mtt;
2449 struct res_srq *srq;
2450 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2451 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2452
2453 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2454 return -EINVAL;
2455
2456 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2457 if (err)
2458 return err;
2b8fb286 2459 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2460 if (err)
2461 goto ex_abort;
2462 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2463 mtt);
2464 if (err)
2465 goto ex_put_mtt;
2466
c82e9aa0
EC
2467 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2468 if (err)
2469 goto ex_put_mtt;
2470
2471 atomic_inc(&mtt->ref_count);
2472 srq->mtt = mtt;
2473 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2474 res_end_move(dev, slave, RES_SRQ, srqn);
2475 return 0;
2476
2477ex_put_mtt:
2478 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2479ex_abort:
2480 res_abort_move(dev, slave, RES_SRQ, srqn);
2481
2482 return err;
2483}
2484
2485int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2486 struct mlx4_vhcr *vhcr,
2487 struct mlx4_cmd_mailbox *inbox,
2488 struct mlx4_cmd_mailbox *outbox,
2489 struct mlx4_cmd_info *cmd)
2490{
2491 int err;
2492 int srqn = vhcr->in_modifier;
2493 struct res_srq *srq;
2494
2495 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2496 if (err)
2497 return err;
2498 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2499 if (err)
2500 goto ex_abort;
2501 atomic_dec(&srq->mtt->ref_count);
2502 if (srq->cq)
2503 atomic_dec(&srq->cq->ref_count);
2504 res_end_move(dev, slave, RES_SRQ, srqn);
2505
2506 return 0;
2507
2508ex_abort:
2509 res_abort_move(dev, slave, RES_SRQ, srqn);
2510
2511 return err;
2512}
2513
2514int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2515 struct mlx4_vhcr *vhcr,
2516 struct mlx4_cmd_mailbox *inbox,
2517 struct mlx4_cmd_mailbox *outbox,
2518 struct mlx4_cmd_info *cmd)
2519{
2520 int err;
2521 int srqn = vhcr->in_modifier;
2522 struct res_srq *srq;
2523
2524 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2525 if (err)
2526 return err;
2527 if (srq->com.from_state != RES_SRQ_HW) {
2528 err = -EBUSY;
2529 goto out;
2530 }
2531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2532out:
2533 put_res(dev, slave, srqn, RES_SRQ);
2534 return err;
2535}
2536
2537int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2538 struct mlx4_vhcr *vhcr,
2539 struct mlx4_cmd_mailbox *inbox,
2540 struct mlx4_cmd_mailbox *outbox,
2541 struct mlx4_cmd_info *cmd)
2542{
2543 int err;
2544 int srqn = vhcr->in_modifier;
2545 struct res_srq *srq;
2546
2547 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2548 if (err)
2549 return err;
2550
2551 if (srq->com.from_state != RES_SRQ_HW) {
2552 err = -EBUSY;
2553 goto out;
2554 }
2555
2556 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2557out:
2558 put_res(dev, slave, srqn, RES_SRQ);
2559 return err;
2560}
2561
2562int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2563 struct mlx4_vhcr *vhcr,
2564 struct mlx4_cmd_mailbox *inbox,
2565 struct mlx4_cmd_mailbox *outbox,
2566 struct mlx4_cmd_info *cmd)
2567{
2568 int err;
2569 int qpn = vhcr->in_modifier & 0x7fffff;
2570 struct res_qp *qp;
2571
2572 err = get_res(dev, slave, qpn, RES_QP, &qp);
2573 if (err)
2574 return err;
2575 if (qp->com.from_state != RES_QP_HW) {
2576 err = -EBUSY;
2577 goto out;
2578 }
2579
2580 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2581out:
2582 put_res(dev, slave, qpn, RES_QP);
2583 return err;
2584}
2585
2586int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2587 struct mlx4_vhcr *vhcr,
2588 struct mlx4_cmd_mailbox *inbox,
2589 struct mlx4_cmd_mailbox *outbox,
2590 struct mlx4_cmd_info *cmd)
2591{
2592 struct mlx4_qp_context *qpc = inbox->buf + 8;
2593
2594 update_ud_gid(dev, qpc, (u8)slave);
2595
2596 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2597}
2598
2599int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2600 struct mlx4_vhcr *vhcr,
2601 struct mlx4_cmd_mailbox *inbox,
2602 struct mlx4_cmd_mailbox *outbox,
2603 struct mlx4_cmd_info *cmd)
2604{
2605 int err;
2606 int qpn = vhcr->in_modifier & 0x7fffff;
2607 struct res_qp *qp;
2608
2609 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2610 if (err)
2611 return err;
2612 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2613 if (err)
2614 goto ex_abort;
2615
2616 atomic_dec(&qp->mtt->ref_count);
2617 atomic_dec(&qp->rcq->ref_count);
2618 atomic_dec(&qp->scq->ref_count);
2619 if (qp->srq)
2620 atomic_dec(&qp->srq->ref_count);
2621 res_end_move(dev, slave, RES_QP, qpn);
2622 return 0;
2623
2624ex_abort:
2625 res_abort_move(dev, slave, RES_QP, qpn);
2626
2627 return err;
2628}
2629
2630static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2631 struct res_qp *rqp, u8 *gid)
2632{
2633 struct res_gid *res;
2634
2635 list_for_each_entry(res, &rqp->mcg_list, list) {
2636 if (!memcmp(res->gid, gid, 16))
2637 return res;
2638 }
2639 return NULL;
2640}
2641
2642static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2643 u8 *gid, enum mlx4_protocol prot,
2644 enum mlx4_steer_type steer)
c82e9aa0
EC
2645{
2646 struct res_gid *res;
2647 int err;
2648
2649 res = kzalloc(sizeof *res, GFP_KERNEL);
2650 if (!res)
2651 return -ENOMEM;
2652
2653 spin_lock_irq(&rqp->mcg_spl);
2654 if (find_gid(dev, slave, rqp, gid)) {
2655 kfree(res);
2656 err = -EEXIST;
2657 } else {
2658 memcpy(res->gid, gid, 16);
2659 res->prot = prot;
9f5b6c63 2660 res->steer = steer;
c82e9aa0
EC
2661 list_add_tail(&res->list, &rqp->mcg_list);
2662 err = 0;
2663 }
2664 spin_unlock_irq(&rqp->mcg_spl);
2665
2666 return err;
2667}
2668
2669static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2670 u8 *gid, enum mlx4_protocol prot,
2671 enum mlx4_steer_type steer)
c82e9aa0
EC
2672{
2673 struct res_gid *res;
2674 int err;
2675
2676 spin_lock_irq(&rqp->mcg_spl);
2677 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2678 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2679 err = -EINVAL;
2680 else {
2681 list_del(&res->list);
2682 kfree(res);
2683 err = 0;
2684 }
2685 spin_unlock_irq(&rqp->mcg_spl);
2686
2687 return err;
2688}
2689
2690int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2691 struct mlx4_vhcr *vhcr,
2692 struct mlx4_cmd_mailbox *inbox,
2693 struct mlx4_cmd_mailbox *outbox,
2694 struct mlx4_cmd_info *cmd)
2695{
2696 struct mlx4_qp qp; /* dummy for calling attach/detach */
2697 u8 *gid = inbox->buf;
2698 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 2699 int err;
c82e9aa0
EC
2700 int qpn;
2701 struct res_qp *rqp;
2702 int attach = vhcr->op_modifier;
2703 int block_loopback = vhcr->in_modifier >> 31;
2704 u8 steer_type_mask = 2;
75c6062c 2705 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
2706
2707 qpn = vhcr->in_modifier & 0xffffff;
2708 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2709 if (err)
2710 return err;
2711
2712 qp.qpn = qpn;
2713 if (attach) {
9f5b6c63 2714 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2715 if (err)
2716 goto ex_put;
2717
2718 err = mlx4_qp_attach_common(dev, &qp, gid,
2719 block_loopback, prot, type);
2720 if (err)
2721 goto ex_rem;
2722 } else {
9f5b6c63 2723 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2724 if (err)
2725 goto ex_put;
2726 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2727 }
2728
2729 put_res(dev, slave, qpn, RES_QP);
2730 return 0;
2731
2732ex_rem:
2733 /* ignore error return below, already in error */
162344ed 2734 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2735ex_put:
2736 put_res(dev, slave, qpn, RES_QP);
2737
2738 return err;
2739}
2740
2741enum {
2742 BUSY_MAX_RETRIES = 10
2743};
2744
2745int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2746 struct mlx4_vhcr *vhcr,
2747 struct mlx4_cmd_mailbox *inbox,
2748 struct mlx4_cmd_mailbox *outbox,
2749 struct mlx4_cmd_info *cmd)
2750{
2751 int err;
2752 int index = vhcr->in_modifier & 0xffff;
2753
2754 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2755 if (err)
2756 return err;
2757
2758 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2759 put_res(dev, slave, index, RES_COUNTER);
2760 return err;
2761}
2762
2763static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2764{
2765 struct res_gid *rgid;
2766 struct res_gid *tmp;
c82e9aa0
EC
2767 struct mlx4_qp qp; /* dummy for calling attach/detach */
2768
2769 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2770 qp.qpn = rqp->local_qpn;
162344ed
OG
2771 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2772 rgid->steer);
c82e9aa0
EC
2773 list_del(&rgid->list);
2774 kfree(rgid);
2775 }
2776}
2777
2778static int _move_all_busy(struct mlx4_dev *dev, int slave,
2779 enum mlx4_resource type, int print)
2780{
2781 struct mlx4_priv *priv = mlx4_priv(dev);
2782 struct mlx4_resource_tracker *tracker =
2783 &priv->mfunc.master.res_tracker;
2784 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2785 struct res_common *r;
2786 struct res_common *tmp;
2787 int busy;
2788
2789 busy = 0;
2790 spin_lock_irq(mlx4_tlock(dev));
2791 list_for_each_entry_safe(r, tmp, rlist, list) {
2792 if (r->owner == slave) {
2793 if (!r->removing) {
2794 if (r->state == RES_ANY_BUSY) {
2795 if (print)
2796 mlx4_dbg(dev,
aa1ec3dd 2797 "%s id 0x%llx is busy\n",
c82e9aa0
EC
2798 ResourceType(type),
2799 r->res_id);
2800 ++busy;
2801 } else {
2802 r->from_state = r->state;
2803 r->state = RES_ANY_BUSY;
2804 r->removing = 1;
2805 }
2806 }
2807 }
2808 }
2809 spin_unlock_irq(mlx4_tlock(dev));
2810
2811 return busy;
2812}
2813
2814static int move_all_busy(struct mlx4_dev *dev, int slave,
2815 enum mlx4_resource type)
2816{
2817 unsigned long begin;
2818 int busy;
2819
2820 begin = jiffies;
2821 do {
2822 busy = _move_all_busy(dev, slave, type, 0);
2823 if (time_after(jiffies, begin + 5 * HZ))
2824 break;
2825 if (busy)
2826 cond_resched();
2827 } while (busy);
2828
2829 if (busy)
2830 busy = _move_all_busy(dev, slave, type, 1);
2831
2832 return busy;
2833}
2834static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2835{
2836 struct mlx4_priv *priv = mlx4_priv(dev);
2837 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2838 struct list_head *qp_list =
2839 &tracker->slave_list[slave].res_list[RES_QP];
2840 struct res_qp *qp;
2841 struct res_qp *tmp;
2842 int state;
2843 u64 in_param;
2844 int qpn;
2845 int err;
2846
2847 err = move_all_busy(dev, slave, RES_QP);
2848 if (err)
2849 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2850 "for slave %d\n", slave);
2851
2852 spin_lock_irq(mlx4_tlock(dev));
2853 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2854 spin_unlock_irq(mlx4_tlock(dev));
2855 if (qp->com.owner == slave) {
2856 qpn = qp->com.res_id;
2857 detach_qp(dev, slave, qp);
2858 state = qp->com.from_state;
2859 while (state != 0) {
2860 switch (state) {
2861 case RES_QP_RESERVED:
2862 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
2863 rb_erase(&qp->com.node,
2864 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
2865 list_del(&qp->com.list);
2866 spin_unlock_irq(mlx4_tlock(dev));
2867 kfree(qp);
2868 state = 0;
2869 break;
2870 case RES_QP_MAPPED:
2871 if (!valid_reserved(dev, slave, qpn))
2872 __mlx4_qp_free_icm(dev, qpn);
2873 state = RES_QP_RESERVED;
2874 break;
2875 case RES_QP_HW:
2876 in_param = slave;
2877 err = mlx4_cmd(dev, in_param,
2878 qp->local_qpn, 2,
2879 MLX4_CMD_2RST_QP,
2880 MLX4_CMD_TIME_CLASS_A,
2881 MLX4_CMD_NATIVE);
2882 if (err)
2883 mlx4_dbg(dev, "rem_slave_qps: failed"
2884 " to move slave %d qpn %d to"
2885 " reset\n", slave,
2886 qp->local_qpn);
2887 atomic_dec(&qp->rcq->ref_count);
2888 atomic_dec(&qp->scq->ref_count);
2889 atomic_dec(&qp->mtt->ref_count);
2890 if (qp->srq)
2891 atomic_dec(&qp->srq->ref_count);
2892 state = RES_QP_MAPPED;
2893 break;
2894 default:
2895 state = 0;
2896 }
2897 }
2898 }
2899 spin_lock_irq(mlx4_tlock(dev));
2900 }
2901 spin_unlock_irq(mlx4_tlock(dev));
2902}
2903
2904static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2905{
2906 struct mlx4_priv *priv = mlx4_priv(dev);
2907 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2908 struct list_head *srq_list =
2909 &tracker->slave_list[slave].res_list[RES_SRQ];
2910 struct res_srq *srq;
2911 struct res_srq *tmp;
2912 int state;
2913 u64 in_param;
2914 LIST_HEAD(tlist);
2915 int srqn;
2916 int err;
2917
2918 err = move_all_busy(dev, slave, RES_SRQ);
2919 if (err)
2920 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2921 "busy for slave %d\n", slave);
2922
2923 spin_lock_irq(mlx4_tlock(dev));
2924 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2925 spin_unlock_irq(mlx4_tlock(dev));
2926 if (srq->com.owner == slave) {
2927 srqn = srq->com.res_id;
2928 state = srq->com.from_state;
2929 while (state != 0) {
2930 switch (state) {
2931 case RES_SRQ_ALLOCATED:
2932 __mlx4_srq_free_icm(dev, srqn);
2933 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
2934 rb_erase(&srq->com.node,
2935 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
2936 list_del(&srq->com.list);
2937 spin_unlock_irq(mlx4_tlock(dev));
2938 kfree(srq);
2939 state = 0;
2940 break;
2941
2942 case RES_SRQ_HW:
2943 in_param = slave;
2944 err = mlx4_cmd(dev, in_param, srqn, 1,
2945 MLX4_CMD_HW2SW_SRQ,
2946 MLX4_CMD_TIME_CLASS_A,
2947 MLX4_CMD_NATIVE);
2948 if (err)
2949 mlx4_dbg(dev, "rem_slave_srqs: failed"
2950 " to move slave %d srq %d to"
2951 " SW ownership\n",
2952 slave, srqn);
2953
2954 atomic_dec(&srq->mtt->ref_count);
2955 if (srq->cq)
2956 atomic_dec(&srq->cq->ref_count);
2957 state = RES_SRQ_ALLOCATED;
2958 break;
2959
2960 default:
2961 state = 0;
2962 }
2963 }
2964 }
2965 spin_lock_irq(mlx4_tlock(dev));
2966 }
2967 spin_unlock_irq(mlx4_tlock(dev));
2968}
2969
2970static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2971{
2972 struct mlx4_priv *priv = mlx4_priv(dev);
2973 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2974 struct list_head *cq_list =
2975 &tracker->slave_list[slave].res_list[RES_CQ];
2976 struct res_cq *cq;
2977 struct res_cq *tmp;
2978 int state;
2979 u64 in_param;
2980 LIST_HEAD(tlist);
2981 int cqn;
2982 int err;
2983
2984 err = move_all_busy(dev, slave, RES_CQ);
2985 if (err)
2986 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2987 "busy for slave %d\n", slave);
2988
2989 spin_lock_irq(mlx4_tlock(dev));
2990 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2991 spin_unlock_irq(mlx4_tlock(dev));
2992 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2993 cqn = cq->com.res_id;
2994 state = cq->com.from_state;
2995 while (state != 0) {
2996 switch (state) {
2997 case RES_CQ_ALLOCATED:
2998 __mlx4_cq_free_icm(dev, cqn);
2999 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3000 rb_erase(&cq->com.node,
3001 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3002 list_del(&cq->com.list);
3003 spin_unlock_irq(mlx4_tlock(dev));
3004 kfree(cq);
3005 state = 0;
3006 break;
3007
3008 case RES_CQ_HW:
3009 in_param = slave;
3010 err = mlx4_cmd(dev, in_param, cqn, 1,
3011 MLX4_CMD_HW2SW_CQ,
3012 MLX4_CMD_TIME_CLASS_A,
3013 MLX4_CMD_NATIVE);
3014 if (err)
3015 mlx4_dbg(dev, "rem_slave_cqs: failed"
3016 " to move slave %d cq %d to"
3017 " SW ownership\n",
3018 slave, cqn);
3019 atomic_dec(&cq->mtt->ref_count);
3020 state = RES_CQ_ALLOCATED;
3021 break;
3022
3023 default:
3024 state = 0;
3025 }
3026 }
3027 }
3028 spin_lock_irq(mlx4_tlock(dev));
3029 }
3030 spin_unlock_irq(mlx4_tlock(dev));
3031}
3032
3033static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3034{
3035 struct mlx4_priv *priv = mlx4_priv(dev);
3036 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3037 struct list_head *mpt_list =
3038 &tracker->slave_list[slave].res_list[RES_MPT];
3039 struct res_mpt *mpt;
3040 struct res_mpt *tmp;
3041 int state;
3042 u64 in_param;
3043 LIST_HEAD(tlist);
3044 int mptn;
3045 int err;
3046
3047 err = move_all_busy(dev, slave, RES_MPT);
3048 if (err)
3049 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3050 "busy for slave %d\n", slave);
3051
3052 spin_lock_irq(mlx4_tlock(dev));
3053 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3054 spin_unlock_irq(mlx4_tlock(dev));
3055 if (mpt->com.owner == slave) {
3056 mptn = mpt->com.res_id;
3057 state = mpt->com.from_state;
3058 while (state != 0) {
3059 switch (state) {
3060 case RES_MPT_RESERVED:
3061 __mlx4_mr_release(dev, mpt->key);
3062 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3063 rb_erase(&mpt->com.node,
3064 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3065 list_del(&mpt->com.list);
3066 spin_unlock_irq(mlx4_tlock(dev));
3067 kfree(mpt);
3068 state = 0;
3069 break;
3070
3071 case RES_MPT_MAPPED:
3072 __mlx4_mr_free_icm(dev, mpt->key);
3073 state = RES_MPT_RESERVED;
3074 break;
3075
3076 case RES_MPT_HW:
3077 in_param = slave;
3078 err = mlx4_cmd(dev, in_param, mptn, 0,
3079 MLX4_CMD_HW2SW_MPT,
3080 MLX4_CMD_TIME_CLASS_A,
3081 MLX4_CMD_NATIVE);
3082 if (err)
3083 mlx4_dbg(dev, "rem_slave_mrs: failed"
3084 " to move slave %d mpt %d to"
3085 " SW ownership\n",
3086 slave, mptn);
3087 if (mpt->mtt)
3088 atomic_dec(&mpt->mtt->ref_count);
3089 state = RES_MPT_MAPPED;
3090 break;
3091 default:
3092 state = 0;
3093 }
3094 }
3095 }
3096 spin_lock_irq(mlx4_tlock(dev));
3097 }
3098 spin_unlock_irq(mlx4_tlock(dev));
3099}
3100
3101static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3102{
3103 struct mlx4_priv *priv = mlx4_priv(dev);
3104 struct mlx4_resource_tracker *tracker =
3105 &priv->mfunc.master.res_tracker;
3106 struct list_head *mtt_list =
3107 &tracker->slave_list[slave].res_list[RES_MTT];
3108 struct res_mtt *mtt;
3109 struct res_mtt *tmp;
3110 int state;
3111 LIST_HEAD(tlist);
3112 int base;
3113 int err;
3114
3115 err = move_all_busy(dev, slave, RES_MTT);
3116 if (err)
3117 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3118 "busy for slave %d\n", slave);
3119
3120 spin_lock_irq(mlx4_tlock(dev));
3121 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3122 spin_unlock_irq(mlx4_tlock(dev));
3123 if (mtt->com.owner == slave) {
3124 base = mtt->com.res_id;
3125 state = mtt->com.from_state;
3126 while (state != 0) {
3127 switch (state) {
3128 case RES_MTT_ALLOCATED:
3129 __mlx4_free_mtt_range(dev, base,
3130 mtt->order);
3131 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3132 rb_erase(&mtt->com.node,
3133 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3134 list_del(&mtt->com.list);
3135 spin_unlock_irq(mlx4_tlock(dev));
3136 kfree(mtt);
3137 state = 0;
3138 break;
3139
3140 default:
3141 state = 0;
3142 }
3143 }
3144 }
3145 spin_lock_irq(mlx4_tlock(dev));
3146 }
3147 spin_unlock_irq(mlx4_tlock(dev));
3148}
3149
3150static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3151{
3152 struct mlx4_priv *priv = mlx4_priv(dev);
3153 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3154 struct list_head *eq_list =
3155 &tracker->slave_list[slave].res_list[RES_EQ];
3156 struct res_eq *eq;
3157 struct res_eq *tmp;
3158 int err;
3159 int state;
3160 LIST_HEAD(tlist);
3161 int eqn;
3162 struct mlx4_cmd_mailbox *mailbox;
3163
3164 err = move_all_busy(dev, slave, RES_EQ);
3165 if (err)
3166 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3167 "busy for slave %d\n", slave);
3168
3169 spin_lock_irq(mlx4_tlock(dev));
3170 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3171 spin_unlock_irq(mlx4_tlock(dev));
3172 if (eq->com.owner == slave) {
3173 eqn = eq->com.res_id;
3174 state = eq->com.from_state;
3175 while (state != 0) {
3176 switch (state) {
3177 case RES_EQ_RESERVED:
3178 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3179 rb_erase(&eq->com.node,
3180 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3181 list_del(&eq->com.list);
3182 spin_unlock_irq(mlx4_tlock(dev));
3183 kfree(eq);
3184 state = 0;
3185 break;
3186
3187 case RES_EQ_HW:
3188 mailbox = mlx4_alloc_cmd_mailbox(dev);
3189 if (IS_ERR(mailbox)) {
3190 cond_resched();
3191 continue;
3192 }
3193 err = mlx4_cmd_box(dev, slave, 0,
3194 eqn & 0xff, 0,
3195 MLX4_CMD_HW2SW_EQ,
3196 MLX4_CMD_TIME_CLASS_A,
3197 MLX4_CMD_NATIVE);
eb71d0d6
JM
3198 if (err)
3199 mlx4_dbg(dev, "rem_slave_eqs: failed"
3200 " to move slave %d eqs %d to"
3201 " SW ownership\n", slave, eqn);
c82e9aa0 3202 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3203 atomic_dec(&eq->mtt->ref_count);
3204 state = RES_EQ_RESERVED;
c82e9aa0
EC
3205 break;
3206
3207 default:
3208 state = 0;
3209 }
3210 }
3211 }
3212 spin_lock_irq(mlx4_tlock(dev));
3213 }
3214 spin_unlock_irq(mlx4_tlock(dev));
3215}
3216
ba062d52
JM
3217static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3218{
3219 struct mlx4_priv *priv = mlx4_priv(dev);
3220 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3221 struct list_head *counter_list =
3222 &tracker->slave_list[slave].res_list[RES_COUNTER];
3223 struct res_counter *counter;
3224 struct res_counter *tmp;
3225 int err;
3226 int index;
3227
3228 err = move_all_busy(dev, slave, RES_COUNTER);
3229 if (err)
3230 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3231 "busy for slave %d\n", slave);
3232
3233 spin_lock_irq(mlx4_tlock(dev));
3234 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3235 if (counter->com.owner == slave) {
3236 index = counter->com.res_id;
4af1c048
HHZ
3237 rb_erase(&counter->com.node,
3238 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3239 list_del(&counter->com.list);
3240 kfree(counter);
3241 __mlx4_counter_free(dev, index);
3242 }
3243 }
3244 spin_unlock_irq(mlx4_tlock(dev));
3245}
3246
3247static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3248{
3249 struct mlx4_priv *priv = mlx4_priv(dev);
3250 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3251 struct list_head *xrcdn_list =
3252 &tracker->slave_list[slave].res_list[RES_XRCD];
3253 struct res_xrcdn *xrcd;
3254 struct res_xrcdn *tmp;
3255 int err;
3256 int xrcdn;
3257
3258 err = move_all_busy(dev, slave, RES_XRCD);
3259 if (err)
3260 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3261 "busy for slave %d\n", slave);
3262
3263 spin_lock_irq(mlx4_tlock(dev));
3264 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3265 if (xrcd->com.owner == slave) {
3266 xrcdn = xrcd->com.res_id;
4af1c048 3267 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3268 list_del(&xrcd->com.list);
3269 kfree(xrcd);
3270 __mlx4_xrcd_free(dev, xrcdn);
3271 }
3272 }
3273 spin_unlock_irq(mlx4_tlock(dev));
3274}
3275
c82e9aa0
EC
3276void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3277{
3278 struct mlx4_priv *priv = mlx4_priv(dev);
3279
3280 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3281 /*VLAN*/
3282 rem_slave_macs(dev, slave);
3283 rem_slave_qps(dev, slave);
3284 rem_slave_srqs(dev, slave);
3285 rem_slave_cqs(dev, slave);
3286 rem_slave_mrs(dev, slave);
3287 rem_slave_eqs(dev, slave);
3288 rem_slave_mtts(dev, slave);
ba062d52
JM
3289 rem_slave_counters(dev, slave);
3290 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
3291 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3292}
This page took 0.209048 seconds and 5 git commands to generate.