mlx4_core: Fix mtt profile issue
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
60 u32 res_id;
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66};
67
68enum {
69 RES_ANY_BUSY = 1
70};
71
72struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
76};
77
78enum res_qp_states {
79 RES_QP_BUSY = RES_ANY_BUSY,
80
81 /* QP number was allocated */
82 RES_QP_RESERVED,
83
84 /* ICM memory for QP context was mapped */
85 RES_QP_MAPPED,
86
87 /* QP is in hw ownership */
88 RES_QP_HW
89};
90
91static inline const char *qp_states_str(enum res_qp_states state)
92{
93 switch (state) {
94 case RES_QP_BUSY: return "RES_QP_BUSY";
95 case RES_QP_RESERVED: return "RES_QP_RESERVED";
96 case RES_QP_MAPPED: return "RES_QP_MAPPED";
97 case RES_QP_HW: return "RES_QP_HW";
98 default: return "Unknown";
99 }
100}
101
102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
111};
112
113enum res_mtt_states {
114 RES_MTT_BUSY = RES_ANY_BUSY,
115 RES_MTT_ALLOCATED,
116};
117
118static inline const char *mtt_states_str(enum res_mtt_states state)
119{
120 switch (state) {
121 case RES_MTT_BUSY: return "RES_MTT_BUSY";
122 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
123 default: return "Unknown";
124 }
125}
126
127struct res_mtt {
128 struct res_common com;
129 int order;
130 atomic_t ref_count;
131};
132
133enum res_mpt_states {
134 RES_MPT_BUSY = RES_ANY_BUSY,
135 RES_MPT_RESERVED,
136 RES_MPT_MAPPED,
137 RES_MPT_HW,
138};
139
140struct res_mpt {
141 struct res_common com;
142 struct res_mtt *mtt;
143 int key;
144};
145
146enum res_eq_states {
147 RES_EQ_BUSY = RES_ANY_BUSY,
148 RES_EQ_RESERVED,
149 RES_EQ_HW,
150};
151
152struct res_eq {
153 struct res_common com;
154 struct res_mtt *mtt;
155};
156
157enum res_cq_states {
158 RES_CQ_BUSY = RES_ANY_BUSY,
159 RES_CQ_ALLOCATED,
160 RES_CQ_HW,
161};
162
163struct res_cq {
164 struct res_common com;
165 struct res_mtt *mtt;
166 atomic_t ref_count;
167};
168
169enum res_srq_states {
170 RES_SRQ_BUSY = RES_ANY_BUSY,
171 RES_SRQ_ALLOCATED,
172 RES_SRQ_HW,
173};
174
175static inline const char *srq_states_str(enum res_srq_states state)
176{
177 switch (state) {
178 case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
179 case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
180 case RES_SRQ_HW: return "RES_SRQ_HW";
181 default: return "Unknown";
182 }
183}
184
185struct res_srq {
186 struct res_common com;
187 struct res_mtt *mtt;
188 struct res_cq *cq;
189 atomic_t ref_count;
190};
191
192enum res_counter_states {
193 RES_COUNTER_BUSY = RES_ANY_BUSY,
194 RES_COUNTER_ALLOCATED,
195};
196
197static inline const char *counter_states_str(enum res_counter_states state)
198{
199 switch (state) {
200 case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
201 case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
202 default: return "Unknown";
203 }
204}
205
206struct res_counter {
207 struct res_common com;
208 int port;
209};
210
211/* For Debug uses */
212static const char *ResourceType(enum mlx4_resource rt)
213{
214 switch (rt) {
215 case RES_QP: return "RES_QP";
216 case RES_CQ: return "RES_CQ";
217 case RES_SRQ: return "RES_SRQ";
218 case RES_MPT: return "RES_MPT";
219 case RES_MTT: return "RES_MTT";
220 case RES_MAC: return "RES_MAC";
221 case RES_EQ: return "RES_EQ";
222 case RES_COUNTER: return "RES_COUNTER";
223 default: return "Unknown resource type !!!";
224 };
225}
226
c82e9aa0
EC
227int mlx4_init_resource_tracker(struct mlx4_dev *dev)
228{
229 struct mlx4_priv *priv = mlx4_priv(dev);
230 int i;
231 int t;
232
233 priv->mfunc.master.res_tracker.slave_list =
234 kzalloc(dev->num_slaves * sizeof(struct slave_list),
235 GFP_KERNEL);
236 if (!priv->mfunc.master.res_tracker.slave_list)
237 return -ENOMEM;
238
239 for (i = 0 ; i < dev->num_slaves; i++) {
240 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
241 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
242 slave_list[i].res_list[t]);
243 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
244 }
245
246 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
247 dev->num_slaves);
248 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
249 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
250 GFP_ATOMIC|__GFP_NOWARN);
251
252 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
253 return 0 ;
254}
255
256void mlx4_free_resource_tracker(struct mlx4_dev *dev)
257{
258 struct mlx4_priv *priv = mlx4_priv(dev);
259 int i;
260
261 if (priv->mfunc.master.res_tracker.slave_list) {
262 for (i = 0 ; i < dev->num_slaves; i++)
263 mlx4_delete_all_resources_for_slave(dev, i);
264
265 kfree(priv->mfunc.master.res_tracker.slave_list);
266 }
267}
268
269static void update_ud_gid(struct mlx4_dev *dev,
270 struct mlx4_qp_context *qp_ctx, u8 slave)
271{
272 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
273
274 if (MLX4_QP_ST_UD == ts)
275 qp_ctx->pri_path.mgid_index = 0x80 | slave;
276
277 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
278 slave, qp_ctx->pri_path.mgid_index);
279}
280
281static int mpt_mask(struct mlx4_dev *dev)
282{
283 return dev->caps.num_mpts - 1;
284}
285
286static void *find_res(struct mlx4_dev *dev, int res_id,
287 enum mlx4_resource type)
288{
289 struct mlx4_priv *priv = mlx4_priv(dev);
290
291 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
292 res_id);
293}
294
295static int get_res(struct mlx4_dev *dev, int slave, int res_id,
296 enum mlx4_resource type,
297 void *res)
298{
299 struct res_common *r;
300 int err = 0;
301
302 spin_lock_irq(mlx4_tlock(dev));
303 r = find_res(dev, res_id, type);
304 if (!r) {
305 err = -ENONET;
306 goto exit;
307 }
308
309 if (r->state == RES_ANY_BUSY) {
310 err = -EBUSY;
311 goto exit;
312 }
313
314 if (r->owner != slave) {
315 err = -EPERM;
316 goto exit;
317 }
318
319 r->from_state = r->state;
320 r->state = RES_ANY_BUSY;
321 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
322 ResourceType(type), r->res_id);
323
324 if (res)
325 *((struct res_common **)res) = r;
326
327exit:
328 spin_unlock_irq(mlx4_tlock(dev));
329 return err;
330}
331
332int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
333 enum mlx4_resource type,
334 int res_id, int *slave)
335{
336
337 struct res_common *r;
338 int err = -ENOENT;
339 int id = res_id;
340
341 if (type == RES_QP)
342 id &= 0x7fffff;
996b0541 343 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
344
345 r = find_res(dev, id, type);
346 if (r) {
347 *slave = r->owner;
348 err = 0;
349 }
996b0541 350 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
351
352 return err;
353}
354
355static void put_res(struct mlx4_dev *dev, int slave, int res_id,
356 enum mlx4_resource type)
357{
358 struct res_common *r;
359
360 spin_lock_irq(mlx4_tlock(dev));
361 r = find_res(dev, res_id, type);
362 if (r)
363 r->state = r->from_state;
364 spin_unlock_irq(mlx4_tlock(dev));
365}
366
367static struct res_common *alloc_qp_tr(int id)
368{
369 struct res_qp *ret;
370
371 ret = kzalloc(sizeof *ret, GFP_KERNEL);
372 if (!ret)
373 return NULL;
374
375 ret->com.res_id = id;
376 ret->com.state = RES_QP_RESERVED;
377 INIT_LIST_HEAD(&ret->mcg_list);
378 spin_lock_init(&ret->mcg_spl);
379
380 return &ret->com;
381}
382
383static struct res_common *alloc_mtt_tr(int id, int order)
384{
385 struct res_mtt *ret;
386
387 ret = kzalloc(sizeof *ret, GFP_KERNEL);
388 if (!ret)
389 return NULL;
390
391 ret->com.res_id = id;
392 ret->order = order;
393 ret->com.state = RES_MTT_ALLOCATED;
394 atomic_set(&ret->ref_count, 0);
395
396 return &ret->com;
397}
398
399static struct res_common *alloc_mpt_tr(int id, int key)
400{
401 struct res_mpt *ret;
402
403 ret = kzalloc(sizeof *ret, GFP_KERNEL);
404 if (!ret)
405 return NULL;
406
407 ret->com.res_id = id;
408 ret->com.state = RES_MPT_RESERVED;
409 ret->key = key;
410
411 return &ret->com;
412}
413
414static struct res_common *alloc_eq_tr(int id)
415{
416 struct res_eq *ret;
417
418 ret = kzalloc(sizeof *ret, GFP_KERNEL);
419 if (!ret)
420 return NULL;
421
422 ret->com.res_id = id;
423 ret->com.state = RES_EQ_RESERVED;
424
425 return &ret->com;
426}
427
428static struct res_common *alloc_cq_tr(int id)
429{
430 struct res_cq *ret;
431
432 ret = kzalloc(sizeof *ret, GFP_KERNEL);
433 if (!ret)
434 return NULL;
435
436 ret->com.res_id = id;
437 ret->com.state = RES_CQ_ALLOCATED;
438 atomic_set(&ret->ref_count, 0);
439
440 return &ret->com;
441}
442
443static struct res_common *alloc_srq_tr(int id)
444{
445 struct res_srq *ret;
446
447 ret = kzalloc(sizeof *ret, GFP_KERNEL);
448 if (!ret)
449 return NULL;
450
451 ret->com.res_id = id;
452 ret->com.state = RES_SRQ_ALLOCATED;
453 atomic_set(&ret->ref_count, 0);
454
455 return &ret->com;
456}
457
458static struct res_common *alloc_counter_tr(int id)
459{
460 struct res_counter *ret;
461
462 ret = kzalloc(sizeof *ret, GFP_KERNEL);
463 if (!ret)
464 return NULL;
465
466 ret->com.res_id = id;
467 ret->com.state = RES_COUNTER_ALLOCATED;
468
469 return &ret->com;
470}
471
472static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
473 int extra)
474{
475 struct res_common *ret;
476
477 switch (type) {
478 case RES_QP:
479 ret = alloc_qp_tr(id);
480 break;
481 case RES_MPT:
482 ret = alloc_mpt_tr(id, extra);
483 break;
484 case RES_MTT:
485 ret = alloc_mtt_tr(id, extra);
486 break;
487 case RES_EQ:
488 ret = alloc_eq_tr(id);
489 break;
490 case RES_CQ:
491 ret = alloc_cq_tr(id);
492 break;
493 case RES_SRQ:
494 ret = alloc_srq_tr(id);
495 break;
496 case RES_MAC:
497 printk(KERN_ERR "implementation missing\n");
498 return NULL;
499 case RES_COUNTER:
500 ret = alloc_counter_tr(id);
501 break;
502
503 default:
504 return NULL;
505 }
506 if (ret)
507 ret->owner = slave;
508
509 return ret;
510}
511
512static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
513 enum mlx4_resource type, int extra)
514{
515 int i;
516 int err;
517 struct mlx4_priv *priv = mlx4_priv(dev);
518 struct res_common **res_arr;
519 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
520 struct radix_tree_root *root = &tracker->res_tree[type];
521
522 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
523 if (!res_arr)
524 return -ENOMEM;
525
526 for (i = 0; i < count; ++i) {
527 res_arr[i] = alloc_tr(base + i, type, slave, extra);
528 if (!res_arr[i]) {
529 for (--i; i >= 0; --i)
530 kfree(res_arr[i]);
531
532 kfree(res_arr);
533 return -ENOMEM;
534 }
535 }
536
537 spin_lock_irq(mlx4_tlock(dev));
538 for (i = 0; i < count; ++i) {
539 if (find_res(dev, base + i, type)) {
540 err = -EEXIST;
541 goto undo;
542 }
543 err = radix_tree_insert(root, base + i, res_arr[i]);
544 if (err)
545 goto undo;
546 list_add_tail(&res_arr[i]->list,
547 &tracker->slave_list[slave].res_list[type]);
548 }
549 spin_unlock_irq(mlx4_tlock(dev));
550 kfree(res_arr);
551
552 return 0;
553
554undo:
555 for (--i; i >= base; --i)
556 radix_tree_delete(&tracker->res_tree[type], i);
557
558 spin_unlock_irq(mlx4_tlock(dev));
559
560 for (i = 0; i < count; ++i)
561 kfree(res_arr[i]);
562
563 kfree(res_arr);
564
565 return err;
566}
567
568static int remove_qp_ok(struct res_qp *res)
569{
570 if (res->com.state == RES_QP_BUSY)
571 return -EBUSY;
572 else if (res->com.state != RES_QP_RESERVED)
573 return -EPERM;
574
575 return 0;
576}
577
578static int remove_mtt_ok(struct res_mtt *res, int order)
579{
580 if (res->com.state == RES_MTT_BUSY ||
581 atomic_read(&res->ref_count)) {
582 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
583 __func__, __LINE__,
584 mtt_states_str(res->com.state),
585 atomic_read(&res->ref_count));
586 return -EBUSY;
587 } else if (res->com.state != RES_MTT_ALLOCATED)
588 return -EPERM;
589 else if (res->order != order)
590 return -EINVAL;
591
592 return 0;
593}
594
595static int remove_mpt_ok(struct res_mpt *res)
596{
597 if (res->com.state == RES_MPT_BUSY)
598 return -EBUSY;
599 else if (res->com.state != RES_MPT_RESERVED)
600 return -EPERM;
601
602 return 0;
603}
604
605static int remove_eq_ok(struct res_eq *res)
606{
607 if (res->com.state == RES_MPT_BUSY)
608 return -EBUSY;
609 else if (res->com.state != RES_MPT_RESERVED)
610 return -EPERM;
611
612 return 0;
613}
614
615static int remove_counter_ok(struct res_counter *res)
616{
617 if (res->com.state == RES_COUNTER_BUSY)
618 return -EBUSY;
619 else if (res->com.state != RES_COUNTER_ALLOCATED)
620 return -EPERM;
621
622 return 0;
623}
624
625static int remove_cq_ok(struct res_cq *res)
626{
627 if (res->com.state == RES_CQ_BUSY)
628 return -EBUSY;
629 else if (res->com.state != RES_CQ_ALLOCATED)
630 return -EPERM;
631
632 return 0;
633}
634
635static int remove_srq_ok(struct res_srq *res)
636{
637 if (res->com.state == RES_SRQ_BUSY)
638 return -EBUSY;
639 else if (res->com.state != RES_SRQ_ALLOCATED)
640 return -EPERM;
641
642 return 0;
643}
644
645static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
646{
647 switch (type) {
648 case RES_QP:
649 return remove_qp_ok((struct res_qp *)res);
650 case RES_CQ:
651 return remove_cq_ok((struct res_cq *)res);
652 case RES_SRQ:
653 return remove_srq_ok((struct res_srq *)res);
654 case RES_MPT:
655 return remove_mpt_ok((struct res_mpt *)res);
656 case RES_MTT:
657 return remove_mtt_ok((struct res_mtt *)res, extra);
658 case RES_MAC:
659 return -ENOSYS;
660 case RES_EQ:
661 return remove_eq_ok((struct res_eq *)res);
662 case RES_COUNTER:
663 return remove_counter_ok((struct res_counter *)res);
664 default:
665 return -EINVAL;
666 }
667}
668
669static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
670 enum mlx4_resource type, int extra)
671{
672 int i;
673 int err;
674 struct mlx4_priv *priv = mlx4_priv(dev);
675 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
676 struct res_common *r;
677
678 spin_lock_irq(mlx4_tlock(dev));
679 for (i = base; i < base + count; ++i) {
680 r = radix_tree_lookup(&tracker->res_tree[type], i);
681 if (!r) {
682 err = -ENOENT;
683 goto out;
684 }
685 if (r->owner != slave) {
686 err = -EPERM;
687 goto out;
688 }
689 err = remove_ok(r, type, extra);
690 if (err)
691 goto out;
692 }
693
694 for (i = base; i < base + count; ++i) {
695 r = radix_tree_lookup(&tracker->res_tree[type], i);
696 radix_tree_delete(&tracker->res_tree[type], i);
697 list_del(&r->list);
698 kfree(r);
699 }
700 err = 0;
701
702out:
703 spin_unlock_irq(mlx4_tlock(dev));
704
705 return err;
706}
707
708static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
709 enum res_qp_states state, struct res_qp **qp,
710 int alloc)
711{
712 struct mlx4_priv *priv = mlx4_priv(dev);
713 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
714 struct res_qp *r;
715 int err = 0;
716
717 spin_lock_irq(mlx4_tlock(dev));
718 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
719 if (!r)
720 err = -ENOENT;
721 else if (r->com.owner != slave)
722 err = -EPERM;
723 else {
724 switch (state) {
725 case RES_QP_BUSY:
726 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
727 __func__, r->com.res_id);
728 err = -EBUSY;
729 break;
730
731 case RES_QP_RESERVED:
732 if (r->com.state == RES_QP_MAPPED && !alloc)
733 break;
734
735 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
736 err = -EINVAL;
737 break;
738
739 case RES_QP_MAPPED:
740 if ((r->com.state == RES_QP_RESERVED && alloc) ||
741 r->com.state == RES_QP_HW)
742 break;
743 else {
744 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
745 r->com.res_id);
746 err = -EINVAL;
747 }
748
749 break;
750
751 case RES_QP_HW:
752 if (r->com.state != RES_QP_MAPPED)
753 err = -EINVAL;
754 break;
755 default:
756 err = -EINVAL;
757 }
758
759 if (!err) {
760 r->com.from_state = r->com.state;
761 r->com.to_state = state;
762 r->com.state = RES_QP_BUSY;
763 if (qp)
764 *qp = (struct res_qp *)r;
765 }
766 }
767
768 spin_unlock_irq(mlx4_tlock(dev));
769
770 return err;
771}
772
773static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
774 enum res_mpt_states state, struct res_mpt **mpt)
775{
776 struct mlx4_priv *priv = mlx4_priv(dev);
777 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
778 struct res_mpt *r;
779 int err = 0;
780
781 spin_lock_irq(mlx4_tlock(dev));
782 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
783 if (!r)
784 err = -ENOENT;
785 else if (r->com.owner != slave)
786 err = -EPERM;
787 else {
788 switch (state) {
789 case RES_MPT_BUSY:
790 err = -EINVAL;
791 break;
792
793 case RES_MPT_RESERVED:
794 if (r->com.state != RES_MPT_MAPPED)
795 err = -EINVAL;
796 break;
797
798 case RES_MPT_MAPPED:
799 if (r->com.state != RES_MPT_RESERVED &&
800 r->com.state != RES_MPT_HW)
801 err = -EINVAL;
802 break;
803
804 case RES_MPT_HW:
805 if (r->com.state != RES_MPT_MAPPED)
806 err = -EINVAL;
807 break;
808 default:
809 err = -EINVAL;
810 }
811
812 if (!err) {
813 r->com.from_state = r->com.state;
814 r->com.to_state = state;
815 r->com.state = RES_MPT_BUSY;
816 if (mpt)
817 *mpt = (struct res_mpt *)r;
818 }
819 }
820
821 spin_unlock_irq(mlx4_tlock(dev));
822
823 return err;
824}
825
826static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
827 enum res_eq_states state, struct res_eq **eq)
828{
829 struct mlx4_priv *priv = mlx4_priv(dev);
830 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
831 struct res_eq *r;
832 int err = 0;
833
834 spin_lock_irq(mlx4_tlock(dev));
835 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
836 if (!r)
837 err = -ENOENT;
838 else if (r->com.owner != slave)
839 err = -EPERM;
840 else {
841 switch (state) {
842 case RES_EQ_BUSY:
843 err = -EINVAL;
844 break;
845
846 case RES_EQ_RESERVED:
847 if (r->com.state != RES_EQ_HW)
848 err = -EINVAL;
849 break;
850
851 case RES_EQ_HW:
852 if (r->com.state != RES_EQ_RESERVED)
853 err = -EINVAL;
854 break;
855
856 default:
857 err = -EINVAL;
858 }
859
860 if (!err) {
861 r->com.from_state = r->com.state;
862 r->com.to_state = state;
863 r->com.state = RES_EQ_BUSY;
864 if (eq)
865 *eq = r;
866 }
867 }
868
869 spin_unlock_irq(mlx4_tlock(dev));
870
871 return err;
872}
873
874static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
875 enum res_cq_states state, struct res_cq **cq)
876{
877 struct mlx4_priv *priv = mlx4_priv(dev);
878 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
879 struct res_cq *r;
880 int err;
881
882 spin_lock_irq(mlx4_tlock(dev));
883 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
884 if (!r)
885 err = -ENOENT;
886 else if (r->com.owner != slave)
887 err = -EPERM;
888 else {
889 switch (state) {
890 case RES_CQ_BUSY:
891 err = -EBUSY;
892 break;
893
894 case RES_CQ_ALLOCATED:
895 if (r->com.state != RES_CQ_HW)
896 err = -EINVAL;
897 else if (atomic_read(&r->ref_count))
898 err = -EBUSY;
899 else
900 err = 0;
901 break;
902
903 case RES_CQ_HW:
904 if (r->com.state != RES_CQ_ALLOCATED)
905 err = -EINVAL;
906 else
907 err = 0;
908 break;
909
910 default:
911 err = -EINVAL;
912 }
913
914 if (!err) {
915 r->com.from_state = r->com.state;
916 r->com.to_state = state;
917 r->com.state = RES_CQ_BUSY;
918 if (cq)
919 *cq = r;
920 }
921 }
922
923 spin_unlock_irq(mlx4_tlock(dev));
924
925 return err;
926}
927
928static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
929 enum res_cq_states state, struct res_srq **srq)
930{
931 struct mlx4_priv *priv = mlx4_priv(dev);
932 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
933 struct res_srq *r;
934 int err = 0;
935
936 spin_lock_irq(mlx4_tlock(dev));
937 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
938 if (!r)
939 err = -ENOENT;
940 else if (r->com.owner != slave)
941 err = -EPERM;
942 else {
943 switch (state) {
944 case RES_SRQ_BUSY:
945 err = -EINVAL;
946 break;
947
948 case RES_SRQ_ALLOCATED:
949 if (r->com.state != RES_SRQ_HW)
950 err = -EINVAL;
951 else if (atomic_read(&r->ref_count))
952 err = -EBUSY;
953 break;
954
955 case RES_SRQ_HW:
956 if (r->com.state != RES_SRQ_ALLOCATED)
957 err = -EINVAL;
958 break;
959
960 default:
961 err = -EINVAL;
962 }
963
964 if (!err) {
965 r->com.from_state = r->com.state;
966 r->com.to_state = state;
967 r->com.state = RES_SRQ_BUSY;
968 if (srq)
969 *srq = r;
970 }
971 }
972
973 spin_unlock_irq(mlx4_tlock(dev));
974
975 return err;
976}
977
978static void res_abort_move(struct mlx4_dev *dev, int slave,
979 enum mlx4_resource type, int id)
980{
981 struct mlx4_priv *priv = mlx4_priv(dev);
982 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
983 struct res_common *r;
984
985 spin_lock_irq(mlx4_tlock(dev));
986 r = radix_tree_lookup(&tracker->res_tree[type], id);
987 if (r && (r->owner == slave))
988 r->state = r->from_state;
989 spin_unlock_irq(mlx4_tlock(dev));
990}
991
992static void res_end_move(struct mlx4_dev *dev, int slave,
993 enum mlx4_resource type, int id)
994{
995 struct mlx4_priv *priv = mlx4_priv(dev);
996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
997 struct res_common *r;
998
999 spin_lock_irq(mlx4_tlock(dev));
1000 r = radix_tree_lookup(&tracker->res_tree[type], id);
1001 if (r && (r->owner == slave))
1002 r->state = r->to_state;
1003 spin_unlock_irq(mlx4_tlock(dev));
1004}
1005
1006static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1007{
1008 return mlx4_is_qp_reserved(dev, qpn);
1009}
1010
1011static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1012 u64 in_param, u64 *out_param)
1013{
1014 int err;
1015 int count;
1016 int align;
1017 int base;
1018 int qpn;
1019
1020 switch (op) {
1021 case RES_OP_RESERVE:
1022 count = get_param_l(&in_param);
1023 align = get_param_h(&in_param);
1024 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1025 if (err)
1026 return err;
1027
1028 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1029 if (err) {
1030 __mlx4_qp_release_range(dev, base, count);
1031 return err;
1032 }
1033 set_param_l(out_param, base);
1034 break;
1035 case RES_OP_MAP_ICM:
1036 qpn = get_param_l(&in_param) & 0x7fffff;
1037 if (valid_reserved(dev, slave, qpn)) {
1038 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1039 if (err)
1040 return err;
1041 }
1042
1043 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1044 NULL, 1);
1045 if (err)
1046 return err;
1047
1048 if (!valid_reserved(dev, slave, qpn)) {
1049 err = __mlx4_qp_alloc_icm(dev, qpn);
1050 if (err) {
1051 res_abort_move(dev, slave, RES_QP, qpn);
1052 return err;
1053 }
1054 }
1055
1056 res_end_move(dev, slave, RES_QP, qpn);
1057 break;
1058
1059 default:
1060 err = -EINVAL;
1061 break;
1062 }
1063 return err;
1064}
1065
1066static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1067 u64 in_param, u64 *out_param)
1068{
1069 int err = -EINVAL;
1070 int base;
1071 int order;
1072
1073 if (op != RES_OP_RESERVE_AND_MAP)
1074 return err;
1075
1076 order = get_param_l(&in_param);
1077 base = __mlx4_alloc_mtt_range(dev, order);
1078 if (base == -1)
1079 return -ENOMEM;
1080
1081 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1082 if (err)
1083 __mlx4_free_mtt_range(dev, base, order);
1084 else
1085 set_param_l(out_param, base);
1086
1087 return err;
1088}
1089
1090static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1091 u64 in_param, u64 *out_param)
1092{
1093 int err = -EINVAL;
1094 int index;
1095 int id;
1096 struct res_mpt *mpt;
1097
1098 switch (op) {
1099 case RES_OP_RESERVE:
1100 index = __mlx4_mr_reserve(dev);
1101 if (index == -1)
1102 break;
1103 id = index & mpt_mask(dev);
1104
1105 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1106 if (err) {
1107 __mlx4_mr_release(dev, index);
1108 break;
1109 }
1110 set_param_l(out_param, index);
1111 break;
1112 case RES_OP_MAP_ICM:
1113 index = get_param_l(&in_param);
1114 id = index & mpt_mask(dev);
1115 err = mr_res_start_move_to(dev, slave, id,
1116 RES_MPT_MAPPED, &mpt);
1117 if (err)
1118 return err;
1119
1120 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1121 if (err) {
1122 res_abort_move(dev, slave, RES_MPT, id);
1123 return err;
1124 }
1125
1126 res_end_move(dev, slave, RES_MPT, id);
1127 break;
1128 }
1129 return err;
1130}
1131
1132static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1133 u64 in_param, u64 *out_param)
1134{
1135 int cqn;
1136 int err;
1137
1138 switch (op) {
1139 case RES_OP_RESERVE_AND_MAP:
1140 err = __mlx4_cq_alloc_icm(dev, &cqn);
1141 if (err)
1142 break;
1143
1144 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1145 if (err) {
1146 __mlx4_cq_free_icm(dev, cqn);
1147 break;
1148 }
1149
1150 set_param_l(out_param, cqn);
1151 break;
1152
1153 default:
1154 err = -EINVAL;
1155 }
1156
1157 return err;
1158}
1159
1160static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1161 u64 in_param, u64 *out_param)
1162{
1163 int srqn;
1164 int err;
1165
1166 switch (op) {
1167 case RES_OP_RESERVE_AND_MAP:
1168 err = __mlx4_srq_alloc_icm(dev, &srqn);
1169 if (err)
1170 break;
1171
1172 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1173 if (err) {
1174 __mlx4_srq_free_icm(dev, srqn);
1175 break;
1176 }
1177
1178 set_param_l(out_param, srqn);
1179 break;
1180
1181 default:
1182 err = -EINVAL;
1183 }
1184
1185 return err;
1186}
1187
1188static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1189{
1190 struct mlx4_priv *priv = mlx4_priv(dev);
1191 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1192 struct mac_res *res;
1193
1194 res = kzalloc(sizeof *res, GFP_KERNEL);
1195 if (!res)
1196 return -ENOMEM;
1197 res->mac = mac;
1198 res->port = (u8) port;
1199 list_add_tail(&res->list,
1200 &tracker->slave_list[slave].res_list[RES_MAC]);
1201 return 0;
1202}
1203
1204static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1205 int port)
1206{
1207 struct mlx4_priv *priv = mlx4_priv(dev);
1208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1209 struct list_head *mac_list =
1210 &tracker->slave_list[slave].res_list[RES_MAC];
1211 struct mac_res *res, *tmp;
1212
1213 list_for_each_entry_safe(res, tmp, mac_list, list) {
1214 if (res->mac == mac && res->port == (u8) port) {
1215 list_del(&res->list);
1216 kfree(res);
1217 break;
1218 }
1219 }
1220}
1221
1222static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1223{
1224 struct mlx4_priv *priv = mlx4_priv(dev);
1225 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1226 struct list_head *mac_list =
1227 &tracker->slave_list[slave].res_list[RES_MAC];
1228 struct mac_res *res, *tmp;
1229
1230 list_for_each_entry_safe(res, tmp, mac_list, list) {
1231 list_del(&res->list);
1232 __mlx4_unregister_mac(dev, res->port, res->mac);
1233 kfree(res);
1234 }
1235}
1236
1237static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1238 u64 in_param, u64 *out_param)
1239{
1240 int err = -EINVAL;
1241 int port;
1242 u64 mac;
1243
1244 if (op != RES_OP_RESERVE_AND_MAP)
1245 return err;
1246
1247 port = get_param_l(out_param);
1248 mac = in_param;
1249
1250 err = __mlx4_register_mac(dev, port, mac);
1251 if (err >= 0) {
1252 set_param_l(out_param, err);
1253 err = 0;
1254 }
1255
1256 if (!err) {
1257 err = mac_add_to_slave(dev, slave, mac, port);
1258 if (err)
1259 __mlx4_unregister_mac(dev, port, mac);
1260 }
1261 return err;
1262}
1263
ffe455ad
EE
1264static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1265 u64 in_param, u64 *out_param)
1266{
1267 return 0;
1268}
1269
c82e9aa0
EC
1270int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1271 struct mlx4_vhcr *vhcr,
1272 struct mlx4_cmd_mailbox *inbox,
1273 struct mlx4_cmd_mailbox *outbox,
1274 struct mlx4_cmd_info *cmd)
1275{
1276 int err;
1277 int alop = vhcr->op_modifier;
1278
1279 switch (vhcr->in_modifier) {
1280 case RES_QP:
1281 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1282 vhcr->in_param, &vhcr->out_param);
1283 break;
1284
1285 case RES_MTT:
1286 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1287 vhcr->in_param, &vhcr->out_param);
1288 break;
1289
1290 case RES_MPT:
1291 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1292 vhcr->in_param, &vhcr->out_param);
1293 break;
1294
1295 case RES_CQ:
1296 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1297 vhcr->in_param, &vhcr->out_param);
1298 break;
1299
1300 case RES_SRQ:
1301 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1302 vhcr->in_param, &vhcr->out_param);
1303 break;
1304
1305 case RES_MAC:
1306 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1307 vhcr->in_param, &vhcr->out_param);
1308 break;
1309
ffe455ad
EE
1310 case RES_VLAN:
1311 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1312 vhcr->in_param, &vhcr->out_param);
1313 break;
1314
c82e9aa0
EC
1315 default:
1316 err = -EINVAL;
1317 break;
1318 }
1319
1320 return err;
1321}
1322
1323static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1324 u64 in_param)
1325{
1326 int err;
1327 int count;
1328 int base;
1329 int qpn;
1330
1331 switch (op) {
1332 case RES_OP_RESERVE:
1333 base = get_param_l(&in_param) & 0x7fffff;
1334 count = get_param_h(&in_param);
1335 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1336 if (err)
1337 break;
1338 __mlx4_qp_release_range(dev, base, count);
1339 break;
1340 case RES_OP_MAP_ICM:
1341 qpn = get_param_l(&in_param) & 0x7fffff;
1342 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1343 NULL, 0);
1344 if (err)
1345 return err;
1346
1347 if (!valid_reserved(dev, slave, qpn))
1348 __mlx4_qp_free_icm(dev, qpn);
1349
1350 res_end_move(dev, slave, RES_QP, qpn);
1351
1352 if (valid_reserved(dev, slave, qpn))
1353 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1354 break;
1355 default:
1356 err = -EINVAL;
1357 break;
1358 }
1359 return err;
1360}
1361
1362static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1363 u64 in_param, u64 *out_param)
1364{
1365 int err = -EINVAL;
1366 int base;
1367 int order;
1368
1369 if (op != RES_OP_RESERVE_AND_MAP)
1370 return err;
1371
1372 base = get_param_l(&in_param);
1373 order = get_param_h(&in_param);
1374 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1375 if (!err)
1376 __mlx4_free_mtt_range(dev, base, order);
1377 return err;
1378}
1379
1380static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1381 u64 in_param)
1382{
1383 int err = -EINVAL;
1384 int index;
1385 int id;
1386 struct res_mpt *mpt;
1387
1388 switch (op) {
1389 case RES_OP_RESERVE:
1390 index = get_param_l(&in_param);
1391 id = index & mpt_mask(dev);
1392 err = get_res(dev, slave, id, RES_MPT, &mpt);
1393 if (err)
1394 break;
1395 index = mpt->key;
1396 put_res(dev, slave, id, RES_MPT);
1397
1398 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1399 if (err)
1400 break;
1401 __mlx4_mr_release(dev, index);
1402 break;
1403 case RES_OP_MAP_ICM:
1404 index = get_param_l(&in_param);
1405 id = index & mpt_mask(dev);
1406 err = mr_res_start_move_to(dev, slave, id,
1407 RES_MPT_RESERVED, &mpt);
1408 if (err)
1409 return err;
1410
1411 __mlx4_mr_free_icm(dev, mpt->key);
1412 res_end_move(dev, slave, RES_MPT, id);
1413 return err;
1414 break;
1415 default:
1416 err = -EINVAL;
1417 break;
1418 }
1419 return err;
1420}
1421
1422static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1423 u64 in_param, u64 *out_param)
1424{
1425 int cqn;
1426 int err;
1427
1428 switch (op) {
1429 case RES_OP_RESERVE_AND_MAP:
1430 cqn = get_param_l(&in_param);
1431 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1432 if (err)
1433 break;
1434
1435 __mlx4_cq_free_icm(dev, cqn);
1436 break;
1437
1438 default:
1439 err = -EINVAL;
1440 break;
1441 }
1442
1443 return err;
1444}
1445
1446static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1447 u64 in_param, u64 *out_param)
1448{
1449 int srqn;
1450 int err;
1451
1452 switch (op) {
1453 case RES_OP_RESERVE_AND_MAP:
1454 srqn = get_param_l(&in_param);
1455 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1456 if (err)
1457 break;
1458
1459 __mlx4_srq_free_icm(dev, srqn);
1460 break;
1461
1462 default:
1463 err = -EINVAL;
1464 break;
1465 }
1466
1467 return err;
1468}
1469
1470static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1471 u64 in_param, u64 *out_param)
1472{
1473 int port;
1474 int err = 0;
1475
1476 switch (op) {
1477 case RES_OP_RESERVE_AND_MAP:
1478 port = get_param_l(out_param);
1479 mac_del_from_slave(dev, slave, in_param, port);
1480 __mlx4_unregister_mac(dev, port, in_param);
1481 break;
1482 default:
1483 err = -EINVAL;
1484 break;
1485 }
1486
1487 return err;
1488
1489}
1490
ffe455ad
EE
1491static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1492 u64 in_param, u64 *out_param)
1493{
1494 return 0;
1495}
1496
c82e9aa0
EC
1497int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1498 struct mlx4_vhcr *vhcr,
1499 struct mlx4_cmd_mailbox *inbox,
1500 struct mlx4_cmd_mailbox *outbox,
1501 struct mlx4_cmd_info *cmd)
1502{
1503 int err = -EINVAL;
1504 int alop = vhcr->op_modifier;
1505
1506 switch (vhcr->in_modifier) {
1507 case RES_QP:
1508 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1509 vhcr->in_param);
1510 break;
1511
1512 case RES_MTT:
1513 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1515 break;
1516
1517 case RES_MPT:
1518 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1519 vhcr->in_param);
1520 break;
1521
1522 case RES_CQ:
1523 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1525 break;
1526
1527 case RES_SRQ:
1528 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1530 break;
1531
1532 case RES_MAC:
1533 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1535 break;
1536
ffe455ad
EE
1537 case RES_VLAN:
1538 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1540 break;
1541
c82e9aa0
EC
1542 default:
1543 break;
1544 }
1545 return err;
1546}
1547
1548/* ugly but other choices are uglier */
1549static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1550{
1551 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1552}
1553
2b8fb286 1554static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1555{
2b8fb286 1556 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1557}
1558
1559static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1560{
1561 return be32_to_cpu(mpt->mtt_sz);
1562}
1563
2b8fb286 1564static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1565{
1566 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1567}
1568
2b8fb286 1569static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1570{
1571 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1572}
1573
1574static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1575{
1576 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1577 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1578 int log_sq_sride = qpc->sq_size_stride & 7;
1579 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1580 int log_rq_stride = qpc->rq_size_stride & 7;
1581 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1582 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1583 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1584 int sq_size;
1585 int rq_size;
1586 int total_pages;
1587 int total_mem;
1588 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1589
1590 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1591 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1592 total_mem = sq_size + rq_size;
1593 total_pages =
1594 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1595 page_shift);
1596
1597 return total_pages;
1598}
1599
c82e9aa0
EC
1600static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1601 int size, struct res_mtt *mtt)
1602{
2b8fb286
MA
1603 int res_start = mtt->com.res_id;
1604 int res_size = (1 << mtt->order);
c82e9aa0
EC
1605
1606 if (start < res_start || start + size > res_start + res_size)
1607 return -EPERM;
1608 return 0;
1609}
1610
1611int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1612 struct mlx4_vhcr *vhcr,
1613 struct mlx4_cmd_mailbox *inbox,
1614 struct mlx4_cmd_mailbox *outbox,
1615 struct mlx4_cmd_info *cmd)
1616{
1617 int err;
1618 int index = vhcr->in_modifier;
1619 struct res_mtt *mtt;
1620 struct res_mpt *mpt;
2b8fb286 1621 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1622 int phys;
1623 int id;
1624
1625 id = index & mpt_mask(dev);
1626 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1627 if (err)
1628 return err;
1629
1630 phys = mr_phys_mpt(inbox->buf);
1631 if (!phys) {
2b8fb286 1632 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1633 if (err)
1634 goto ex_abort;
1635
1636 err = check_mtt_range(dev, slave, mtt_base,
1637 mr_get_mtt_size(inbox->buf), mtt);
1638 if (err)
1639 goto ex_put;
1640
1641 mpt->mtt = mtt;
1642 }
1643
c82e9aa0
EC
1644 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1645 if (err)
1646 goto ex_put;
1647
1648 if (!phys) {
1649 atomic_inc(&mtt->ref_count);
1650 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1651 }
1652
1653 res_end_move(dev, slave, RES_MPT, id);
1654 return 0;
1655
1656ex_put:
1657 if (!phys)
1658 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1659ex_abort:
1660 res_abort_move(dev, slave, RES_MPT, id);
1661
1662 return err;
1663}
1664
1665int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1666 struct mlx4_vhcr *vhcr,
1667 struct mlx4_cmd_mailbox *inbox,
1668 struct mlx4_cmd_mailbox *outbox,
1669 struct mlx4_cmd_info *cmd)
1670{
1671 int err;
1672 int index = vhcr->in_modifier;
1673 struct res_mpt *mpt;
1674 int id;
1675
1676 id = index & mpt_mask(dev);
1677 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1678 if (err)
1679 return err;
1680
1681 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1682 if (err)
1683 goto ex_abort;
1684
1685 if (mpt->mtt)
1686 atomic_dec(&mpt->mtt->ref_count);
1687
1688 res_end_move(dev, slave, RES_MPT, id);
1689 return 0;
1690
1691ex_abort:
1692 res_abort_move(dev, slave, RES_MPT, id);
1693
1694 return err;
1695}
1696
1697int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1698 struct mlx4_vhcr *vhcr,
1699 struct mlx4_cmd_mailbox *inbox,
1700 struct mlx4_cmd_mailbox *outbox,
1701 struct mlx4_cmd_info *cmd)
1702{
1703 int err;
1704 int index = vhcr->in_modifier;
1705 struct res_mpt *mpt;
1706 int id;
1707
1708 id = index & mpt_mask(dev);
1709 err = get_res(dev, slave, id, RES_MPT, &mpt);
1710 if (err)
1711 return err;
1712
1713 if (mpt->com.from_state != RES_MPT_HW) {
1714 err = -EBUSY;
1715 goto out;
1716 }
1717
1718 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1719
1720out:
1721 put_res(dev, slave, id, RES_MPT);
1722 return err;
1723}
1724
1725static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1726{
1727 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1728}
1729
1730static int qp_get_scqn(struct mlx4_qp_context *qpc)
1731{
1732 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1733}
1734
1735static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1736{
1737 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1738}
1739
1740int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1741 struct mlx4_vhcr *vhcr,
1742 struct mlx4_cmd_mailbox *inbox,
1743 struct mlx4_cmd_mailbox *outbox,
1744 struct mlx4_cmd_info *cmd)
1745{
1746 int err;
1747 int qpn = vhcr->in_modifier & 0x7fffff;
1748 struct res_mtt *mtt;
1749 struct res_qp *qp;
1750 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1751 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1752 int mtt_size = qp_get_mtt_size(qpc);
1753 struct res_cq *rcq;
1754 struct res_cq *scq;
1755 int rcqn = qp_get_rcqn(qpc);
1756 int scqn = qp_get_scqn(qpc);
1757 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1758 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1759 struct res_srq *srq;
1760 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1761
1762 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1763 if (err)
1764 return err;
1765 qp->local_qpn = local_qpn;
1766
2b8fb286 1767 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1768 if (err)
1769 goto ex_abort;
1770
1771 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1772 if (err)
1773 goto ex_put_mtt;
1774
c82e9aa0
EC
1775 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1776 if (err)
1777 goto ex_put_mtt;
1778
1779 if (scqn != rcqn) {
1780 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1781 if (err)
1782 goto ex_put_rcq;
1783 } else
1784 scq = rcq;
1785
1786 if (use_srq) {
1787 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1788 if (err)
1789 goto ex_put_scq;
1790 }
1791
1792 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1793 if (err)
1794 goto ex_put_srq;
1795 atomic_inc(&mtt->ref_count);
1796 qp->mtt = mtt;
1797 atomic_inc(&rcq->ref_count);
1798 qp->rcq = rcq;
1799 atomic_inc(&scq->ref_count);
1800 qp->scq = scq;
1801
1802 if (scqn != rcqn)
1803 put_res(dev, slave, scqn, RES_CQ);
1804
1805 if (use_srq) {
1806 atomic_inc(&srq->ref_count);
1807 put_res(dev, slave, srqn, RES_SRQ);
1808 qp->srq = srq;
1809 }
1810 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 1811 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1812 res_end_move(dev, slave, RES_QP, qpn);
1813
1814 return 0;
1815
1816ex_put_srq:
1817 if (use_srq)
1818 put_res(dev, slave, srqn, RES_SRQ);
1819ex_put_scq:
1820 if (scqn != rcqn)
1821 put_res(dev, slave, scqn, RES_CQ);
1822ex_put_rcq:
1823 put_res(dev, slave, rcqn, RES_CQ);
1824ex_put_mtt:
2b8fb286 1825 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1826ex_abort:
1827 res_abort_move(dev, slave, RES_QP, qpn);
1828
1829 return err;
1830}
1831
2b8fb286 1832static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
1833{
1834 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1835}
1836
1837static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1838{
1839 int log_eq_size = eqc->log_eq_size & 0x1f;
1840 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1841
1842 if (log_eq_size + 5 < page_shift)
1843 return 1;
1844
1845 return 1 << (log_eq_size + 5 - page_shift);
1846}
1847
2b8fb286 1848static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
1849{
1850 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1851}
1852
1853static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1854{
1855 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1856 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1857
1858 if (log_cq_size + 5 < page_shift)
1859 return 1;
1860
1861 return 1 << (log_cq_size + 5 - page_shift);
1862}
1863
1864int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1865 struct mlx4_vhcr *vhcr,
1866 struct mlx4_cmd_mailbox *inbox,
1867 struct mlx4_cmd_mailbox *outbox,
1868 struct mlx4_cmd_info *cmd)
1869{
1870 int err;
1871 int eqn = vhcr->in_modifier;
1872 int res_id = (slave << 8) | eqn;
1873 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 1874 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1875 int mtt_size = eq_get_mtt_size(eqc);
1876 struct res_eq *eq;
1877 struct res_mtt *mtt;
1878
1879 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1880 if (err)
1881 return err;
1882 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
1883 if (err)
1884 goto out_add;
1885
2b8fb286 1886 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1887 if (err)
1888 goto out_move;
1889
1890 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1891 if (err)
1892 goto out_put;
1893
1894 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1895 if (err)
1896 goto out_put;
1897
1898 atomic_inc(&mtt->ref_count);
1899 eq->mtt = mtt;
1900 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1901 res_end_move(dev, slave, RES_EQ, res_id);
1902 return 0;
1903
1904out_put:
1905 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1906out_move:
1907 res_abort_move(dev, slave, RES_EQ, res_id);
1908out_add:
1909 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1910 return err;
1911}
1912
1913static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
1914 int len, struct res_mtt **res)
1915{
1916 struct mlx4_priv *priv = mlx4_priv(dev);
1917 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1918 struct res_mtt *mtt;
1919 int err = -EINVAL;
1920
1921 spin_lock_irq(mlx4_tlock(dev));
1922 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
1923 com.list) {
1924 if (!check_mtt_range(dev, slave, start, len, mtt)) {
1925 *res = mtt;
1926 mtt->com.from_state = mtt->com.state;
1927 mtt->com.state = RES_MTT_BUSY;
1928 err = 0;
1929 break;
1930 }
1931 }
1932 spin_unlock_irq(mlx4_tlock(dev));
1933
1934 return err;
1935}
1936
1937int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
1938 struct mlx4_vhcr *vhcr,
1939 struct mlx4_cmd_mailbox *inbox,
1940 struct mlx4_cmd_mailbox *outbox,
1941 struct mlx4_cmd_info *cmd)
1942{
1943 struct mlx4_mtt mtt;
1944 __be64 *page_list = inbox->buf;
1945 u64 *pg_list = (u64 *)page_list;
1946 int i;
1947 struct res_mtt *rmtt = NULL;
1948 int start = be64_to_cpu(page_list[0]);
1949 int npages = vhcr->in_modifier;
1950 int err;
1951
1952 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
1953 if (err)
1954 return err;
1955
1956 /* Call the SW implementation of write_mtt:
1957 * - Prepare a dummy mtt struct
1958 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
1959 mtt.offset = 0; /* TBD this is broken but I don't handle it since
1960 we don't really use it */
c82e9aa0
EC
1961 mtt.order = 0;
1962 mtt.page_shift = 0;
1963 for (i = 0; i < npages; ++i)
1964 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
1965
1966 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
1967 ((u64 *)page_list + 2));
1968
1969 if (rmtt)
1970 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
1971
1972 return err;
1973}
1974
1975int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1976 struct mlx4_vhcr *vhcr,
1977 struct mlx4_cmd_mailbox *inbox,
1978 struct mlx4_cmd_mailbox *outbox,
1979 struct mlx4_cmd_info *cmd)
1980{
1981 int eqn = vhcr->in_modifier;
1982 int res_id = eqn | (slave << 8);
1983 struct res_eq *eq;
1984 int err;
1985
1986 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
1987 if (err)
1988 return err;
1989
1990 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
1991 if (err)
1992 goto ex_abort;
1993
1994 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1995 if (err)
1996 goto ex_put;
1997
1998 atomic_dec(&eq->mtt->ref_count);
1999 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2000 res_end_move(dev, slave, RES_EQ, res_id);
2001 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2002
2003 return 0;
2004
2005ex_put:
2006 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2007ex_abort:
2008 res_abort_move(dev, slave, RES_EQ, res_id);
2009
2010 return err;
2011}
2012
2013int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2014{
2015 struct mlx4_priv *priv = mlx4_priv(dev);
2016 struct mlx4_slave_event_eq_info *event_eq;
2017 struct mlx4_cmd_mailbox *mailbox;
2018 u32 in_modifier = 0;
2019 int err;
2020 int res_id;
2021 struct res_eq *req;
2022
2023 if (!priv->mfunc.master.slave_state)
2024 return -EINVAL;
2025
2026 event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
2027
2028 /* Create the event only if the slave is registered */
2029 if ((event_eq->event_type & (1 << eqe->type)) == 0)
2030 return 0;
2031
2032 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2033 res_id = (slave << 8) | event_eq->eqn;
2034 err = get_res(dev, slave, res_id, RES_EQ, &req);
2035 if (err)
2036 goto unlock;
2037
2038 if (req->com.from_state != RES_EQ_HW) {
2039 err = -EINVAL;
2040 goto put;
2041 }
2042
2043 mailbox = mlx4_alloc_cmd_mailbox(dev);
2044 if (IS_ERR(mailbox)) {
2045 err = PTR_ERR(mailbox);
2046 goto put;
2047 }
2048
2049 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2050 ++event_eq->token;
2051 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2052 }
2053
2054 memcpy(mailbox->buf, (u8 *) eqe, 28);
2055
2056 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2057
2058 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2059 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2060 MLX4_CMD_NATIVE);
2061
2062 put_res(dev, slave, res_id, RES_EQ);
2063 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2064 mlx4_free_cmd_mailbox(dev, mailbox);
2065 return err;
2066
2067put:
2068 put_res(dev, slave, res_id, RES_EQ);
2069
2070unlock:
2071 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2072 return err;
2073}
2074
2075int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2076 struct mlx4_vhcr *vhcr,
2077 struct mlx4_cmd_mailbox *inbox,
2078 struct mlx4_cmd_mailbox *outbox,
2079 struct mlx4_cmd_info *cmd)
2080{
2081 int eqn = vhcr->in_modifier;
2082 int res_id = eqn | (slave << 8);
2083 struct res_eq *eq;
2084 int err;
2085
2086 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2087 if (err)
2088 return err;
2089
2090 if (eq->com.from_state != RES_EQ_HW) {
2091 err = -EINVAL;
2092 goto ex_put;
2093 }
2094
2095 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2096
2097ex_put:
2098 put_res(dev, slave, res_id, RES_EQ);
2099 return err;
2100}
2101
2102int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2103 struct mlx4_vhcr *vhcr,
2104 struct mlx4_cmd_mailbox *inbox,
2105 struct mlx4_cmd_mailbox *outbox,
2106 struct mlx4_cmd_info *cmd)
2107{
2108 int err;
2109 int cqn = vhcr->in_modifier;
2110 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2111 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2112 struct res_cq *cq;
2113 struct res_mtt *mtt;
2114
2115 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2116 if (err)
2117 return err;
2b8fb286 2118 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2119 if (err)
2120 goto out_move;
2121 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2122 if (err)
2123 goto out_put;
2124 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2125 if (err)
2126 goto out_put;
2127 atomic_inc(&mtt->ref_count);
2128 cq->mtt = mtt;
2129 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2130 res_end_move(dev, slave, RES_CQ, cqn);
2131 return 0;
2132
2133out_put:
2134 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2135out_move:
2136 res_abort_move(dev, slave, RES_CQ, cqn);
2137 return err;
2138}
2139
2140int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2141 struct mlx4_vhcr *vhcr,
2142 struct mlx4_cmd_mailbox *inbox,
2143 struct mlx4_cmd_mailbox *outbox,
2144 struct mlx4_cmd_info *cmd)
2145{
2146 int err;
2147 int cqn = vhcr->in_modifier;
2148 struct res_cq *cq;
2149
2150 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2151 if (err)
2152 return err;
2153 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2154 if (err)
2155 goto out_move;
2156 atomic_dec(&cq->mtt->ref_count);
2157 res_end_move(dev, slave, RES_CQ, cqn);
2158 return 0;
2159
2160out_move:
2161 res_abort_move(dev, slave, RES_CQ, cqn);
2162 return err;
2163}
2164
2165int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2166 struct mlx4_vhcr *vhcr,
2167 struct mlx4_cmd_mailbox *inbox,
2168 struct mlx4_cmd_mailbox *outbox,
2169 struct mlx4_cmd_info *cmd)
2170{
2171 int cqn = vhcr->in_modifier;
2172 struct res_cq *cq;
2173 int err;
2174
2175 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2176 if (err)
2177 return err;
2178
2179 if (cq->com.from_state != RES_CQ_HW)
2180 goto ex_put;
2181
2182 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2183ex_put:
2184 put_res(dev, slave, cqn, RES_CQ);
2185
2186 return err;
2187}
2188
2189static int handle_resize(struct mlx4_dev *dev, int slave,
2190 struct mlx4_vhcr *vhcr,
2191 struct mlx4_cmd_mailbox *inbox,
2192 struct mlx4_cmd_mailbox *outbox,
2193 struct mlx4_cmd_info *cmd,
2194 struct res_cq *cq)
2195{
2196 int err;
2197 struct res_mtt *orig_mtt;
2198 struct res_mtt *mtt;
2199 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2200 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2201
2202 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2203 if (err)
2204 return err;
2205
2206 if (orig_mtt != cq->mtt) {
2207 err = -EINVAL;
2208 goto ex_put;
2209 }
2210
2b8fb286 2211 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2212 if (err)
2213 goto ex_put;
2214
2215 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2216 if (err)
2217 goto ex_put1;
2218 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2219 if (err)
2220 goto ex_put1;
2221 atomic_dec(&orig_mtt->ref_count);
2222 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2223 atomic_inc(&mtt->ref_count);
2224 cq->mtt = mtt;
2225 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2226 return 0;
2227
2228ex_put1:
2229 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2230ex_put:
2231 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2232
2233 return err;
2234
2235}
2236
2237int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2238 struct mlx4_vhcr *vhcr,
2239 struct mlx4_cmd_mailbox *inbox,
2240 struct mlx4_cmd_mailbox *outbox,
2241 struct mlx4_cmd_info *cmd)
2242{
2243 int cqn = vhcr->in_modifier;
2244 struct res_cq *cq;
2245 int err;
2246
2247 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2248 if (err)
2249 return err;
2250
2251 if (cq->com.from_state != RES_CQ_HW)
2252 goto ex_put;
2253
2254 if (vhcr->op_modifier == 0) {
2255 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2256 if (err)
2257 goto ex_put;
2258 }
2259
2260 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2261ex_put:
2262 put_res(dev, slave, cqn, RES_CQ);
2263
2264 return err;
2265}
2266
c82e9aa0
EC
2267static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2268{
2269 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2270 int log_rq_stride = srqc->logstride & 7;
2271 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2272
2273 if (log_srq_size + log_rq_stride + 4 < page_shift)
2274 return 1;
2275
2276 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2277}
2278
2279int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2280 struct mlx4_vhcr *vhcr,
2281 struct mlx4_cmd_mailbox *inbox,
2282 struct mlx4_cmd_mailbox *outbox,
2283 struct mlx4_cmd_info *cmd)
2284{
2285 int err;
2286 int srqn = vhcr->in_modifier;
2287 struct res_mtt *mtt;
2288 struct res_srq *srq;
2289 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2290 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2291
2292 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2293 return -EINVAL;
2294
2295 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2296 if (err)
2297 return err;
2b8fb286 2298 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2299 if (err)
2300 goto ex_abort;
2301 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2302 mtt);
2303 if (err)
2304 goto ex_put_mtt;
2305
c82e9aa0
EC
2306 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2307 if (err)
2308 goto ex_put_mtt;
2309
2310 atomic_inc(&mtt->ref_count);
2311 srq->mtt = mtt;
2312 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2313 res_end_move(dev, slave, RES_SRQ, srqn);
2314 return 0;
2315
2316ex_put_mtt:
2317 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2318ex_abort:
2319 res_abort_move(dev, slave, RES_SRQ, srqn);
2320
2321 return err;
2322}
2323
2324int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2325 struct mlx4_vhcr *vhcr,
2326 struct mlx4_cmd_mailbox *inbox,
2327 struct mlx4_cmd_mailbox *outbox,
2328 struct mlx4_cmd_info *cmd)
2329{
2330 int err;
2331 int srqn = vhcr->in_modifier;
2332 struct res_srq *srq;
2333
2334 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2335 if (err)
2336 return err;
2337 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2338 if (err)
2339 goto ex_abort;
2340 atomic_dec(&srq->mtt->ref_count);
2341 if (srq->cq)
2342 atomic_dec(&srq->cq->ref_count);
2343 res_end_move(dev, slave, RES_SRQ, srqn);
2344
2345 return 0;
2346
2347ex_abort:
2348 res_abort_move(dev, slave, RES_SRQ, srqn);
2349
2350 return err;
2351}
2352
2353int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2354 struct mlx4_vhcr *vhcr,
2355 struct mlx4_cmd_mailbox *inbox,
2356 struct mlx4_cmd_mailbox *outbox,
2357 struct mlx4_cmd_info *cmd)
2358{
2359 int err;
2360 int srqn = vhcr->in_modifier;
2361 struct res_srq *srq;
2362
2363 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2364 if (err)
2365 return err;
2366 if (srq->com.from_state != RES_SRQ_HW) {
2367 err = -EBUSY;
2368 goto out;
2369 }
2370 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2371out:
2372 put_res(dev, slave, srqn, RES_SRQ);
2373 return err;
2374}
2375
2376int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2377 struct mlx4_vhcr *vhcr,
2378 struct mlx4_cmd_mailbox *inbox,
2379 struct mlx4_cmd_mailbox *outbox,
2380 struct mlx4_cmd_info *cmd)
2381{
2382 int err;
2383 int srqn = vhcr->in_modifier;
2384 struct res_srq *srq;
2385
2386 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2387 if (err)
2388 return err;
2389
2390 if (srq->com.from_state != RES_SRQ_HW) {
2391 err = -EBUSY;
2392 goto out;
2393 }
2394
2395 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2396out:
2397 put_res(dev, slave, srqn, RES_SRQ);
2398 return err;
2399}
2400
2401int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2402 struct mlx4_vhcr *vhcr,
2403 struct mlx4_cmd_mailbox *inbox,
2404 struct mlx4_cmd_mailbox *outbox,
2405 struct mlx4_cmd_info *cmd)
2406{
2407 int err;
2408 int qpn = vhcr->in_modifier & 0x7fffff;
2409 struct res_qp *qp;
2410
2411 err = get_res(dev, slave, qpn, RES_QP, &qp);
2412 if (err)
2413 return err;
2414 if (qp->com.from_state != RES_QP_HW) {
2415 err = -EBUSY;
2416 goto out;
2417 }
2418
2419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2420out:
2421 put_res(dev, slave, qpn, RES_QP);
2422 return err;
2423}
2424
2425int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2426 struct mlx4_vhcr *vhcr,
2427 struct mlx4_cmd_mailbox *inbox,
2428 struct mlx4_cmd_mailbox *outbox,
2429 struct mlx4_cmd_info *cmd)
2430{
2431 struct mlx4_qp_context *qpc = inbox->buf + 8;
2432
2433 update_ud_gid(dev, qpc, (u8)slave);
2434
2435 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2436}
2437
2438int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2443{
2444 int err;
2445 int qpn = vhcr->in_modifier & 0x7fffff;
2446 struct res_qp *qp;
2447
2448 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2449 if (err)
2450 return err;
2451 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2452 if (err)
2453 goto ex_abort;
2454
2455 atomic_dec(&qp->mtt->ref_count);
2456 atomic_dec(&qp->rcq->ref_count);
2457 atomic_dec(&qp->scq->ref_count);
2458 if (qp->srq)
2459 atomic_dec(&qp->srq->ref_count);
2460 res_end_move(dev, slave, RES_QP, qpn);
2461 return 0;
2462
2463ex_abort:
2464 res_abort_move(dev, slave, RES_QP, qpn);
2465
2466 return err;
2467}
2468
2469static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2470 struct res_qp *rqp, u8 *gid)
2471{
2472 struct res_gid *res;
2473
2474 list_for_each_entry(res, &rqp->mcg_list, list) {
2475 if (!memcmp(res->gid, gid, 16))
2476 return res;
2477 }
2478 return NULL;
2479}
2480
2481static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2482 u8 *gid, enum mlx4_protocol prot)
2483{
2484 struct res_gid *res;
2485 int err;
2486
2487 res = kzalloc(sizeof *res, GFP_KERNEL);
2488 if (!res)
2489 return -ENOMEM;
2490
2491 spin_lock_irq(&rqp->mcg_spl);
2492 if (find_gid(dev, slave, rqp, gid)) {
2493 kfree(res);
2494 err = -EEXIST;
2495 } else {
2496 memcpy(res->gid, gid, 16);
2497 res->prot = prot;
2498 list_add_tail(&res->list, &rqp->mcg_list);
2499 err = 0;
2500 }
2501 spin_unlock_irq(&rqp->mcg_spl);
2502
2503 return err;
2504}
2505
2506static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2507 u8 *gid, enum mlx4_protocol prot)
2508{
2509 struct res_gid *res;
2510 int err;
2511
2512 spin_lock_irq(&rqp->mcg_spl);
2513 res = find_gid(dev, slave, rqp, gid);
2514 if (!res || res->prot != prot)
2515 err = -EINVAL;
2516 else {
2517 list_del(&res->list);
2518 kfree(res);
2519 err = 0;
2520 }
2521 spin_unlock_irq(&rqp->mcg_spl);
2522
2523 return err;
2524}
2525
2526int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2527 struct mlx4_vhcr *vhcr,
2528 struct mlx4_cmd_mailbox *inbox,
2529 struct mlx4_cmd_mailbox *outbox,
2530 struct mlx4_cmd_info *cmd)
2531{
2532 struct mlx4_qp qp; /* dummy for calling attach/detach */
2533 u8 *gid = inbox->buf;
2534 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2535 int err, err1;
2536 int qpn;
2537 struct res_qp *rqp;
2538 int attach = vhcr->op_modifier;
2539 int block_loopback = vhcr->in_modifier >> 31;
2540 u8 steer_type_mask = 2;
2541 enum mlx4_steer_type type = gid[7] & steer_type_mask;
2542
2543 qpn = vhcr->in_modifier & 0xffffff;
2544 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2545 if (err)
2546 return err;
2547
2548 qp.qpn = qpn;
2549 if (attach) {
2550 err = add_mcg_res(dev, slave, rqp, gid, prot);
2551 if (err)
2552 goto ex_put;
2553
2554 err = mlx4_qp_attach_common(dev, &qp, gid,
2555 block_loopback, prot, type);
2556 if (err)
2557 goto ex_rem;
2558 } else {
2559 err = rem_mcg_res(dev, slave, rqp, gid, prot);
2560 if (err)
2561 goto ex_put;
2562 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2563 }
2564
2565 put_res(dev, slave, qpn, RES_QP);
2566 return 0;
2567
2568ex_rem:
2569 /* ignore error return below, already in error */
2570 err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
2571ex_put:
2572 put_res(dev, slave, qpn, RES_QP);
2573
2574 return err;
2575}
2576
2577enum {
2578 BUSY_MAX_RETRIES = 10
2579};
2580
2581int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2582 struct mlx4_vhcr *vhcr,
2583 struct mlx4_cmd_mailbox *inbox,
2584 struct mlx4_cmd_mailbox *outbox,
2585 struct mlx4_cmd_info *cmd)
2586{
2587 int err;
2588 int index = vhcr->in_modifier & 0xffff;
2589
2590 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2591 if (err)
2592 return err;
2593
2594 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2595 put_res(dev, slave, index, RES_COUNTER);
2596 return err;
2597}
2598
2599static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2600{
2601 struct res_gid *rgid;
2602 struct res_gid *tmp;
2603 int err;
2604 struct mlx4_qp qp; /* dummy for calling attach/detach */
2605
2606 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2607 qp.qpn = rqp->local_qpn;
2608 err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2609 MLX4_MC_STEER);
2610 list_del(&rgid->list);
2611 kfree(rgid);
2612 }
2613}
2614
2615static int _move_all_busy(struct mlx4_dev *dev, int slave,
2616 enum mlx4_resource type, int print)
2617{
2618 struct mlx4_priv *priv = mlx4_priv(dev);
2619 struct mlx4_resource_tracker *tracker =
2620 &priv->mfunc.master.res_tracker;
2621 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2622 struct res_common *r;
2623 struct res_common *tmp;
2624 int busy;
2625
2626 busy = 0;
2627 spin_lock_irq(mlx4_tlock(dev));
2628 list_for_each_entry_safe(r, tmp, rlist, list) {
2629 if (r->owner == slave) {
2630 if (!r->removing) {
2631 if (r->state == RES_ANY_BUSY) {
2632 if (print)
2633 mlx4_dbg(dev,
2634 "%s id 0x%x is busy\n",
2635 ResourceType(type),
2636 r->res_id);
2637 ++busy;
2638 } else {
2639 r->from_state = r->state;
2640 r->state = RES_ANY_BUSY;
2641 r->removing = 1;
2642 }
2643 }
2644 }
2645 }
2646 spin_unlock_irq(mlx4_tlock(dev));
2647
2648 return busy;
2649}
2650
2651static int move_all_busy(struct mlx4_dev *dev, int slave,
2652 enum mlx4_resource type)
2653{
2654 unsigned long begin;
2655 int busy;
2656
2657 begin = jiffies;
2658 do {
2659 busy = _move_all_busy(dev, slave, type, 0);
2660 if (time_after(jiffies, begin + 5 * HZ))
2661 break;
2662 if (busy)
2663 cond_resched();
2664 } while (busy);
2665
2666 if (busy)
2667 busy = _move_all_busy(dev, slave, type, 1);
2668
2669 return busy;
2670}
2671static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2672{
2673 struct mlx4_priv *priv = mlx4_priv(dev);
2674 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2675 struct list_head *qp_list =
2676 &tracker->slave_list[slave].res_list[RES_QP];
2677 struct res_qp *qp;
2678 struct res_qp *tmp;
2679 int state;
2680 u64 in_param;
2681 int qpn;
2682 int err;
2683
2684 err = move_all_busy(dev, slave, RES_QP);
2685 if (err)
2686 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2687 "for slave %d\n", slave);
2688
2689 spin_lock_irq(mlx4_tlock(dev));
2690 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2691 spin_unlock_irq(mlx4_tlock(dev));
2692 if (qp->com.owner == slave) {
2693 qpn = qp->com.res_id;
2694 detach_qp(dev, slave, qp);
2695 state = qp->com.from_state;
2696 while (state != 0) {
2697 switch (state) {
2698 case RES_QP_RESERVED:
2699 spin_lock_irq(mlx4_tlock(dev));
2700 radix_tree_delete(&tracker->res_tree[RES_QP],
2701 qp->com.res_id);
2702 list_del(&qp->com.list);
2703 spin_unlock_irq(mlx4_tlock(dev));
2704 kfree(qp);
2705 state = 0;
2706 break;
2707 case RES_QP_MAPPED:
2708 if (!valid_reserved(dev, slave, qpn))
2709 __mlx4_qp_free_icm(dev, qpn);
2710 state = RES_QP_RESERVED;
2711 break;
2712 case RES_QP_HW:
2713 in_param = slave;
2714 err = mlx4_cmd(dev, in_param,
2715 qp->local_qpn, 2,
2716 MLX4_CMD_2RST_QP,
2717 MLX4_CMD_TIME_CLASS_A,
2718 MLX4_CMD_NATIVE);
2719 if (err)
2720 mlx4_dbg(dev, "rem_slave_qps: failed"
2721 " to move slave %d qpn %d to"
2722 " reset\n", slave,
2723 qp->local_qpn);
2724 atomic_dec(&qp->rcq->ref_count);
2725 atomic_dec(&qp->scq->ref_count);
2726 atomic_dec(&qp->mtt->ref_count);
2727 if (qp->srq)
2728 atomic_dec(&qp->srq->ref_count);
2729 state = RES_QP_MAPPED;
2730 break;
2731 default:
2732 state = 0;
2733 }
2734 }
2735 }
2736 spin_lock_irq(mlx4_tlock(dev));
2737 }
2738 spin_unlock_irq(mlx4_tlock(dev));
2739}
2740
2741static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2742{
2743 struct mlx4_priv *priv = mlx4_priv(dev);
2744 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2745 struct list_head *srq_list =
2746 &tracker->slave_list[slave].res_list[RES_SRQ];
2747 struct res_srq *srq;
2748 struct res_srq *tmp;
2749 int state;
2750 u64 in_param;
2751 LIST_HEAD(tlist);
2752 int srqn;
2753 int err;
2754
2755 err = move_all_busy(dev, slave, RES_SRQ);
2756 if (err)
2757 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2758 "busy for slave %d\n", slave);
2759
2760 spin_lock_irq(mlx4_tlock(dev));
2761 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2762 spin_unlock_irq(mlx4_tlock(dev));
2763 if (srq->com.owner == slave) {
2764 srqn = srq->com.res_id;
2765 state = srq->com.from_state;
2766 while (state != 0) {
2767 switch (state) {
2768 case RES_SRQ_ALLOCATED:
2769 __mlx4_srq_free_icm(dev, srqn);
2770 spin_lock_irq(mlx4_tlock(dev));
2771 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2772 srqn);
2773 list_del(&srq->com.list);
2774 spin_unlock_irq(mlx4_tlock(dev));
2775 kfree(srq);
2776 state = 0;
2777 break;
2778
2779 case RES_SRQ_HW:
2780 in_param = slave;
2781 err = mlx4_cmd(dev, in_param, srqn, 1,
2782 MLX4_CMD_HW2SW_SRQ,
2783 MLX4_CMD_TIME_CLASS_A,
2784 MLX4_CMD_NATIVE);
2785 if (err)
2786 mlx4_dbg(dev, "rem_slave_srqs: failed"
2787 " to move slave %d srq %d to"
2788 " SW ownership\n",
2789 slave, srqn);
2790
2791 atomic_dec(&srq->mtt->ref_count);
2792 if (srq->cq)
2793 atomic_dec(&srq->cq->ref_count);
2794 state = RES_SRQ_ALLOCATED;
2795 break;
2796
2797 default:
2798 state = 0;
2799 }
2800 }
2801 }
2802 spin_lock_irq(mlx4_tlock(dev));
2803 }
2804 spin_unlock_irq(mlx4_tlock(dev));
2805}
2806
2807static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2808{
2809 struct mlx4_priv *priv = mlx4_priv(dev);
2810 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2811 struct list_head *cq_list =
2812 &tracker->slave_list[slave].res_list[RES_CQ];
2813 struct res_cq *cq;
2814 struct res_cq *tmp;
2815 int state;
2816 u64 in_param;
2817 LIST_HEAD(tlist);
2818 int cqn;
2819 int err;
2820
2821 err = move_all_busy(dev, slave, RES_CQ);
2822 if (err)
2823 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2824 "busy for slave %d\n", slave);
2825
2826 spin_lock_irq(mlx4_tlock(dev));
2827 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2828 spin_unlock_irq(mlx4_tlock(dev));
2829 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2830 cqn = cq->com.res_id;
2831 state = cq->com.from_state;
2832 while (state != 0) {
2833 switch (state) {
2834 case RES_CQ_ALLOCATED:
2835 __mlx4_cq_free_icm(dev, cqn);
2836 spin_lock_irq(mlx4_tlock(dev));
2837 radix_tree_delete(&tracker->res_tree[RES_CQ],
2838 cqn);
2839 list_del(&cq->com.list);
2840 spin_unlock_irq(mlx4_tlock(dev));
2841 kfree(cq);
2842 state = 0;
2843 break;
2844
2845 case RES_CQ_HW:
2846 in_param = slave;
2847 err = mlx4_cmd(dev, in_param, cqn, 1,
2848 MLX4_CMD_HW2SW_CQ,
2849 MLX4_CMD_TIME_CLASS_A,
2850 MLX4_CMD_NATIVE);
2851 if (err)
2852 mlx4_dbg(dev, "rem_slave_cqs: failed"
2853 " to move slave %d cq %d to"
2854 " SW ownership\n",
2855 slave, cqn);
2856 atomic_dec(&cq->mtt->ref_count);
2857 state = RES_CQ_ALLOCATED;
2858 break;
2859
2860 default:
2861 state = 0;
2862 }
2863 }
2864 }
2865 spin_lock_irq(mlx4_tlock(dev));
2866 }
2867 spin_unlock_irq(mlx4_tlock(dev));
2868}
2869
2870static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2871{
2872 struct mlx4_priv *priv = mlx4_priv(dev);
2873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2874 struct list_head *mpt_list =
2875 &tracker->slave_list[slave].res_list[RES_MPT];
2876 struct res_mpt *mpt;
2877 struct res_mpt *tmp;
2878 int state;
2879 u64 in_param;
2880 LIST_HEAD(tlist);
2881 int mptn;
2882 int err;
2883
2884 err = move_all_busy(dev, slave, RES_MPT);
2885 if (err)
2886 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
2887 "busy for slave %d\n", slave);
2888
2889 spin_lock_irq(mlx4_tlock(dev));
2890 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
2891 spin_unlock_irq(mlx4_tlock(dev));
2892 if (mpt->com.owner == slave) {
2893 mptn = mpt->com.res_id;
2894 state = mpt->com.from_state;
2895 while (state != 0) {
2896 switch (state) {
2897 case RES_MPT_RESERVED:
2898 __mlx4_mr_release(dev, mpt->key);
2899 spin_lock_irq(mlx4_tlock(dev));
2900 radix_tree_delete(&tracker->res_tree[RES_MPT],
2901 mptn);
2902 list_del(&mpt->com.list);
2903 spin_unlock_irq(mlx4_tlock(dev));
2904 kfree(mpt);
2905 state = 0;
2906 break;
2907
2908 case RES_MPT_MAPPED:
2909 __mlx4_mr_free_icm(dev, mpt->key);
2910 state = RES_MPT_RESERVED;
2911 break;
2912
2913 case RES_MPT_HW:
2914 in_param = slave;
2915 err = mlx4_cmd(dev, in_param, mptn, 0,
2916 MLX4_CMD_HW2SW_MPT,
2917 MLX4_CMD_TIME_CLASS_A,
2918 MLX4_CMD_NATIVE);
2919 if (err)
2920 mlx4_dbg(dev, "rem_slave_mrs: failed"
2921 " to move slave %d mpt %d to"
2922 " SW ownership\n",
2923 slave, mptn);
2924 if (mpt->mtt)
2925 atomic_dec(&mpt->mtt->ref_count);
2926 state = RES_MPT_MAPPED;
2927 break;
2928 default:
2929 state = 0;
2930 }
2931 }
2932 }
2933 spin_lock_irq(mlx4_tlock(dev));
2934 }
2935 spin_unlock_irq(mlx4_tlock(dev));
2936}
2937
2938static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
2939{
2940 struct mlx4_priv *priv = mlx4_priv(dev);
2941 struct mlx4_resource_tracker *tracker =
2942 &priv->mfunc.master.res_tracker;
2943 struct list_head *mtt_list =
2944 &tracker->slave_list[slave].res_list[RES_MTT];
2945 struct res_mtt *mtt;
2946 struct res_mtt *tmp;
2947 int state;
2948 LIST_HEAD(tlist);
2949 int base;
2950 int err;
2951
2952 err = move_all_busy(dev, slave, RES_MTT);
2953 if (err)
2954 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
2955 "busy for slave %d\n", slave);
2956
2957 spin_lock_irq(mlx4_tlock(dev));
2958 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
2959 spin_unlock_irq(mlx4_tlock(dev));
2960 if (mtt->com.owner == slave) {
2961 base = mtt->com.res_id;
2962 state = mtt->com.from_state;
2963 while (state != 0) {
2964 switch (state) {
2965 case RES_MTT_ALLOCATED:
2966 __mlx4_free_mtt_range(dev, base,
2967 mtt->order);
2968 spin_lock_irq(mlx4_tlock(dev));
2969 radix_tree_delete(&tracker->res_tree[RES_MTT],
2970 base);
2971 list_del(&mtt->com.list);
2972 spin_unlock_irq(mlx4_tlock(dev));
2973 kfree(mtt);
2974 state = 0;
2975 break;
2976
2977 default:
2978 state = 0;
2979 }
2980 }
2981 }
2982 spin_lock_irq(mlx4_tlock(dev));
2983 }
2984 spin_unlock_irq(mlx4_tlock(dev));
2985}
2986
2987static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
2988{
2989 struct mlx4_priv *priv = mlx4_priv(dev);
2990 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2991 struct list_head *eq_list =
2992 &tracker->slave_list[slave].res_list[RES_EQ];
2993 struct res_eq *eq;
2994 struct res_eq *tmp;
2995 int err;
2996 int state;
2997 LIST_HEAD(tlist);
2998 int eqn;
2999 struct mlx4_cmd_mailbox *mailbox;
3000
3001 err = move_all_busy(dev, slave, RES_EQ);
3002 if (err)
3003 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3004 "busy for slave %d\n", slave);
3005
3006 spin_lock_irq(mlx4_tlock(dev));
3007 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3008 spin_unlock_irq(mlx4_tlock(dev));
3009 if (eq->com.owner == slave) {
3010 eqn = eq->com.res_id;
3011 state = eq->com.from_state;
3012 while (state != 0) {
3013 switch (state) {
3014 case RES_EQ_RESERVED:
3015 spin_lock_irq(mlx4_tlock(dev));
3016 radix_tree_delete(&tracker->res_tree[RES_EQ],
3017 eqn);
3018 list_del(&eq->com.list);
3019 spin_unlock_irq(mlx4_tlock(dev));
3020 kfree(eq);
3021 state = 0;
3022 break;
3023
3024 case RES_EQ_HW:
3025 mailbox = mlx4_alloc_cmd_mailbox(dev);
3026 if (IS_ERR(mailbox)) {
3027 cond_resched();
3028 continue;
3029 }
3030 err = mlx4_cmd_box(dev, slave, 0,
3031 eqn & 0xff, 0,
3032 MLX4_CMD_HW2SW_EQ,
3033 MLX4_CMD_TIME_CLASS_A,
3034 MLX4_CMD_NATIVE);
3035 mlx4_dbg(dev, "rem_slave_eqs: failed"
3036 " to move slave %d eqs %d to"
3037 " SW ownership\n", slave, eqn);
3038 mlx4_free_cmd_mailbox(dev, mailbox);
3039 if (!err) {
3040 atomic_dec(&eq->mtt->ref_count);
3041 state = RES_EQ_RESERVED;
3042 }
3043 break;
3044
3045 default:
3046 state = 0;
3047 }
3048 }
3049 }
3050 spin_lock_irq(mlx4_tlock(dev));
3051 }
3052 spin_unlock_irq(mlx4_tlock(dev));
3053}
3054
3055void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3056{
3057 struct mlx4_priv *priv = mlx4_priv(dev);
3058
3059 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3060 /*VLAN*/
3061 rem_slave_macs(dev, slave);
3062 rem_slave_qps(dev, slave);
3063 rem_slave_srqs(dev, slave);
3064 rem_slave_cqs(dev, slave);
3065 rem_slave_mrs(dev, slave);
3066 rem_slave_eqs(dev, slave);
3067 rem_slave_mtts(dev, slave);
3068 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3069}
This page took 0.151521 seconds and 5 git commands to generate.