net/mlx5_core: Set priority attributes
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
35
36#include "mlx5_core.h"
37#include "fs_core.h"
0c56b975
MG
38#include "fs_cmd.h"
39
25302363
MG
40#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
41 sizeof(struct init_tree_node))
42
43#define INIT_PRIO(min_level_val, max_ft_val,\
655227ed 44 ...) {.type = FS_TYPE_PRIO,\
25302363 45 .min_ft_level = min_level_val,\
25302363
MG
46 .max_ft = max_ft_val,\
47 .children = (struct init_tree_node[]) {__VA_ARGS__},\
48 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
49}
50
655227ed
MG
51#define ADD_PRIO(min_level_val, max_ft_val, ...)\
52 INIT_PRIO(min_level_val, max_ft_val,\
25302363
MG
53 __VA_ARGS__)\
54
655227ed
MG
55#define ADD_FT_PRIO(max_ft_val, ...)\
56 INIT_PRIO(0, max_ft_val,\
25302363
MG
57 __VA_ARGS__)\
58
59#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
60 .children = (struct init_tree_node[]) {__VA_ARGS__},\
61 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
62}
63
25302363
MG
64#define KERNEL_MAX_FT 2
65#define KENREL_MIN_LEVEL 2
66static struct init_tree_node {
67 enum fs_node_type type;
68 struct init_tree_node *children;
69 int ar_size;
70 int min_ft_level;
71 int prio;
72 int max_ft;
25302363
MG
73} root_fs = {
74 .type = FS_TYPE_NAMESPACE,
75 .ar_size = 1,
76 .children = (struct init_tree_node[]) {
655227ed
MG
77 ADD_PRIO(KENREL_MIN_LEVEL, 0,
78 ADD_NS(ADD_FT_PRIO(KERNEL_MAX_FT))),
25302363
MG
79 }
80};
81
f0d22d18
MG
82enum fs_i_mutex_lock_class {
83 FS_MUTEX_GRANDPARENT,
84 FS_MUTEX_PARENT,
85 FS_MUTEX_CHILD
86};
87
0c56b975
MG
88static void del_rule(struct fs_node *node);
89static void del_flow_table(struct fs_node *node);
90static void del_flow_group(struct fs_node *node);
91static void del_fte(struct fs_node *node);
de8575e0
MG
92
93static void tree_init_node(struct fs_node *node,
94 unsigned int refcount,
95 void (*remove_func)(struct fs_node *))
96{
97 atomic_set(&node->refcount, refcount);
98 INIT_LIST_HEAD(&node->list);
99 INIT_LIST_HEAD(&node->children);
100 mutex_init(&node->lock);
101 node->remove_func = remove_func;
102}
103
104static void tree_add_node(struct fs_node *node, struct fs_node *parent)
105{
106 if (parent)
107 atomic_inc(&parent->refcount);
108 node->parent = parent;
109
110 /* Parent is the root */
111 if (!parent)
112 node->root = node;
113 else
114 node->root = parent->root;
115}
116
117static void tree_get_node(struct fs_node *node)
118{
119 atomic_inc(&node->refcount);
120}
121
f0d22d18
MG
122static void nested_lock_ref_node(struct fs_node *node,
123 enum fs_i_mutex_lock_class class)
de8575e0
MG
124{
125 if (node) {
f0d22d18 126 mutex_lock_nested(&node->lock, class);
de8575e0
MG
127 atomic_inc(&node->refcount);
128 }
129}
130
131static void lock_ref_node(struct fs_node *node)
132{
133 if (node) {
134 mutex_lock(&node->lock);
135 atomic_inc(&node->refcount);
136 }
137}
138
139static void unlock_ref_node(struct fs_node *node)
140{
141 if (node) {
142 atomic_dec(&node->refcount);
143 mutex_unlock(&node->lock);
144 }
145}
146
147static void tree_put_node(struct fs_node *node)
148{
149 struct fs_node *parent_node = node->parent;
150
151 lock_ref_node(parent_node);
152 if (atomic_dec_and_test(&node->refcount)) {
153 if (parent_node)
154 list_del_init(&node->list);
155 if (node->remove_func)
156 node->remove_func(node);
157 kfree(node);
158 node = NULL;
159 }
160 unlock_ref_node(parent_node);
161 if (!node && parent_node)
162 tree_put_node(parent_node);
163}
164
165static int tree_remove_node(struct fs_node *node)
166{
167 if (atomic_read(&node->refcount) > 1)
168 return -EPERM;
169 tree_put_node(node);
170 return 0;
171}
5e1626c0
MG
172
173static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
174 unsigned int prio)
175{
176 struct fs_prio *iter_prio;
177
178 fs_for_each_prio(iter_prio, ns) {
179 if (iter_prio->prio == prio)
180 return iter_prio;
181 }
182
183 return NULL;
184}
185
186static unsigned int find_next_free_level(struct fs_prio *prio)
187{
188 if (!list_empty(&prio->node.children)) {
189 struct mlx5_flow_table *ft;
190
191 ft = list_last_entry(&prio->node.children,
192 struct mlx5_flow_table,
193 node.list);
194 return ft->level + 1;
195 }
196 return prio->start_level;
197}
198
199static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
200{
201 unsigned int i;
202
203 for (i = 0; i < size; i++, mask++, val1++, val2++)
204 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
205 ((*(u8 *)val2) & (*(u8 *)mask)))
206 return false;
207
208 return true;
209}
210
211static bool compare_match_value(struct mlx5_flow_group_mask *mask,
212 void *fte_param1, void *fte_param2)
213{
214 if (mask->match_criteria_enable &
215 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
216 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
217 fte_param1, outer_headers);
218 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
219 fte_param2, outer_headers);
220 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
221 mask->match_criteria, outer_headers);
222
223 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
224 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
225 return false;
226 }
227
228 if (mask->match_criteria_enable &
229 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
230 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
231 fte_param1, misc_parameters);
232 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
233 fte_param2, misc_parameters);
234 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
235 mask->match_criteria, misc_parameters);
236
237 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
238 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
239 return false;
240 }
241
242 if (mask->match_criteria_enable &
243 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
244 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
245 fte_param1, inner_headers);
246 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
247 fte_param2, inner_headers);
248 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
249 mask->match_criteria, inner_headers);
250
251 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
252 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
253 return false;
254 }
255 return true;
256}
257
258static bool compare_match_criteria(u8 match_criteria_enable1,
259 u8 match_criteria_enable2,
260 void *mask1, void *mask2)
261{
262 return match_criteria_enable1 == match_criteria_enable2 &&
263 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
264}
0c56b975
MG
265
266static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
267{
268 struct fs_node *root;
269 struct mlx5_flow_namespace *ns;
270
271 root = node->root;
272
273 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
274 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
275 return NULL;
276 }
277
278 ns = container_of(root, struct mlx5_flow_namespace, node);
279 return container_of(ns, struct mlx5_flow_root_namespace, ns);
280}
281
282static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
283{
284 struct mlx5_flow_root_namespace *root = find_root(node);
285
286 if (root)
287 return root->dev;
288 return NULL;
289}
290
291static void del_flow_table(struct fs_node *node)
292{
293 struct mlx5_flow_table *ft;
294 struct mlx5_core_dev *dev;
295 struct fs_prio *prio;
296 int err;
297
298 fs_get_obj(ft, node);
299 dev = get_dev(&ft->node);
300
301 err = mlx5_cmd_destroy_flow_table(dev, ft);
302 if (err)
303 pr_warn("flow steering can't destroy ft\n");
304 fs_get_obj(prio, ft->node.parent);
305 prio->num_ft--;
306}
307
308static void del_rule(struct fs_node *node)
309{
310 struct mlx5_flow_rule *rule;
311 struct mlx5_flow_table *ft;
312 struct mlx5_flow_group *fg;
313 struct fs_fte *fte;
314 u32 *match_value;
315 struct mlx5_core_dev *dev = get_dev(node);
316 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
317 int err;
318
319 match_value = mlx5_vzalloc(match_len);
320 if (!match_value) {
321 pr_warn("failed to allocate inbox\n");
322 return;
323 }
324
325 fs_get_obj(rule, node);
326 fs_get_obj(fte, rule->node.parent);
327 fs_get_obj(fg, fte->node.parent);
328 memcpy(match_value, fte->val, sizeof(fte->val));
329 fs_get_obj(ft, fg->node.parent);
330 list_del(&rule->node.list);
331 fte->dests_size--;
332 if (fte->dests_size) {
333 err = mlx5_cmd_update_fte(dev, ft,
334 fg->id, fte);
335 if (err)
336 pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
337 __func__, fg->id, fte->index);
338 }
339 kvfree(match_value);
340}
341
342static void del_fte(struct fs_node *node)
343{
344 struct mlx5_flow_table *ft;
345 struct mlx5_flow_group *fg;
346 struct mlx5_core_dev *dev;
347 struct fs_fte *fte;
348 int err;
349
350 fs_get_obj(fte, node);
351 fs_get_obj(fg, fte->node.parent);
352 fs_get_obj(ft, fg->node.parent);
353
354 dev = get_dev(&ft->node);
355 err = mlx5_cmd_delete_fte(dev, ft,
356 fte->index);
357 if (err)
358 pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
359 fte->index, fg->id);
360
361 fte->status = 0;
362 fg->num_ftes--;
363}
364
365static void del_flow_group(struct fs_node *node)
366{
367 struct mlx5_flow_group *fg;
368 struct mlx5_flow_table *ft;
369 struct mlx5_core_dev *dev;
370
371 fs_get_obj(fg, node);
372 fs_get_obj(ft, fg->node.parent);
373 dev = get_dev(&ft->node);
374
375 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
376 pr_warn("flow steering can't destroy fg %d of ft %d\n",
377 fg->id, ft->id);
378}
379
380static struct fs_fte *alloc_fte(u8 action,
381 u32 flow_tag,
382 u32 *match_value,
383 unsigned int index)
384{
385 struct fs_fte *fte;
386
387 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
388 if (!fte)
389 return ERR_PTR(-ENOMEM);
390
391 memcpy(fte->val, match_value, sizeof(fte->val));
392 fte->node.type = FS_TYPE_FLOW_ENTRY;
393 fte->flow_tag = flow_tag;
394 fte->index = index;
395 fte->action = action;
396
397 return fte;
398}
399
400static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
401{
402 struct mlx5_flow_group *fg;
403 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
404 create_fg_in, match_criteria);
405 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
406 create_fg_in,
407 match_criteria_enable);
408 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
409 if (!fg)
410 return ERR_PTR(-ENOMEM);
411
412 fg->mask.match_criteria_enable = match_criteria_enable;
413 memcpy(&fg->mask.match_criteria, match_criteria,
414 sizeof(fg->mask.match_criteria));
415 fg->node.type = FS_TYPE_FLOW_GROUP;
416 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
417 start_flow_index);
418 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
419 end_flow_index) - fg->start_index + 1;
420 return fg;
421}
422
423static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
424 enum fs_flow_table_type table_type)
425{
426 struct mlx5_flow_table *ft;
427
428 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
429 if (!ft)
430 return NULL;
431
432 ft->level = level;
433 ft->node.type = FS_TYPE_FLOW_TABLE;
434 ft->type = table_type;
435 ft->max_fte = max_fte;
436
437 return ft;
438}
439
fdb6896f
MG
440/* If reverse is false, then we search for the first flow table in the
441 * root sub-tree from start(closest from right), else we search for the
442 * last flow table in the root sub-tree till start(closest from left).
443 */
444static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
445 struct list_head *start,
446 bool reverse)
447{
448#define list_advance_entry(pos, reverse) \
449 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
450
451#define list_for_each_advance_continue(pos, head, reverse) \
452 for (pos = list_advance_entry(pos, reverse); \
453 &pos->list != (head); \
454 pos = list_advance_entry(pos, reverse))
455
456 struct fs_node *iter = list_entry(start, struct fs_node, list);
457 struct mlx5_flow_table *ft = NULL;
458
459 if (!root)
460 return NULL;
461
462 list_for_each_advance_continue(iter, &root->children, reverse) {
463 if (iter->type == FS_TYPE_FLOW_TABLE) {
464 fs_get_obj(ft, iter);
465 return ft;
466 }
467 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
468 if (ft)
469 return ft;
470 }
471
472 return ft;
473}
474
475/* If reverse if false then return the first flow table in next priority of
476 * prio in the tree, else return the last flow table in the previous priority
477 * of prio in the tree.
478 */
479static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
480{
481 struct mlx5_flow_table *ft = NULL;
482 struct fs_node *curr_node;
483 struct fs_node *parent;
484
485 parent = prio->node.parent;
486 curr_node = &prio->node;
487 while (!ft && parent) {
488 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
489 curr_node = parent;
490 parent = curr_node->parent;
491 }
492 return ft;
493}
494
495/* Assuming all the tree is locked by mutex chain lock */
496static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
497{
498 return find_closest_ft(prio, false);
499}
500
501/* Assuming all the tree is locked by mutex chain lock */
502static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
503{
504 return find_closest_ft(prio, true);
505}
506
f90edfd2
MG
507static int connect_fts_in_prio(struct mlx5_core_dev *dev,
508 struct fs_prio *prio,
509 struct mlx5_flow_table *ft)
510{
511 struct mlx5_flow_table *iter;
512 int i = 0;
513 int err;
514
515 fs_for_each_ft(iter, prio) {
516 i++;
517 err = mlx5_cmd_modify_flow_table(dev,
518 iter,
519 ft);
520 if (err) {
521 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
522 iter->id);
523 /* The driver is out of sync with the FW */
524 if (i > 1)
525 WARN_ON(true);
526 return err;
527 }
528 }
529 return 0;
530}
531
532/* Connect flow tables from previous priority of prio to ft */
533static int connect_prev_fts(struct mlx5_core_dev *dev,
534 struct mlx5_flow_table *ft,
535 struct fs_prio *prio)
536{
537 struct mlx5_flow_table *prev_ft;
538
539 prev_ft = find_prev_chained_ft(prio);
540 if (prev_ft) {
541 struct fs_prio *prev_prio;
542
543 fs_get_obj(prev_prio, prev_ft->node.parent);
544 return connect_fts_in_prio(dev, prev_prio, ft);
545 }
546 return 0;
547}
548
2cc43b49
MG
549static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
550 *prio)
551{
552 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
553 int min_level = INT_MAX;
554 int err;
555
556 if (root->root_ft)
557 min_level = root->root_ft->level;
558
559 if (ft->level >= min_level)
560 return 0;
561
562 err = mlx5_cmd_update_root_ft(root->dev, ft);
563 if (err)
564 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
565 ft->id);
566 else
567 root->root_ft = ft;
568
569 return err;
570}
571
f90edfd2
MG
572static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
573 struct fs_prio *prio)
574{
575 int err = 0;
576
577 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
578
579 if (list_empty(&prio->node.children)) {
580 err = connect_prev_fts(dev, ft, prio);
581 if (err)
582 return err;
583 }
584
585 if (MLX5_CAP_FLOWTABLE(dev,
586 flow_table_properties_nic_receive.modify_root))
587 err = update_root_ft_create(ft, prio);
588 return err;
589}
590
86d722ad
MG
591struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
592 int prio,
593 int max_fte)
0c56b975 594{
f90edfd2 595 struct mlx5_flow_table *next_ft = NULL;
0c56b975
MG
596 struct mlx5_flow_table *ft;
597 int err;
598 int log_table_sz;
599 struct mlx5_flow_root_namespace *root =
600 find_root(&ns->node);
601 struct fs_prio *fs_prio = NULL;
602
603 if (!root) {
604 pr_err("mlx5: flow steering failed to find root of namespace\n");
605 return ERR_PTR(-ENODEV);
606 }
607
2cc43b49 608 mutex_lock(&root->chain_lock);
0c56b975 609 fs_prio = find_prio(ns, prio);
2cc43b49
MG
610 if (!fs_prio) {
611 err = -EINVAL;
612 goto unlock_root;
613 }
0c56b975
MG
614 if (fs_prio->num_ft == fs_prio->max_ft) {
615 err = -ENOSPC;
2cc43b49 616 goto unlock_root;
0c56b975
MG
617 }
618
619 ft = alloc_flow_table(find_next_free_level(fs_prio),
620 roundup_pow_of_two(max_fte),
621 root->table_type);
622 if (!ft) {
623 err = -ENOMEM;
2cc43b49 624 goto unlock_root;
0c56b975
MG
625 }
626
627 tree_init_node(&ft->node, 1, del_flow_table);
628 log_table_sz = ilog2(ft->max_fte);
f90edfd2 629 next_ft = find_next_chained_ft(fs_prio);
0c56b975 630 err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
f90edfd2 631 log_table_sz, next_ft, &ft->id);
0c56b975
MG
632 if (err)
633 goto free_ft;
634
f90edfd2
MG
635 err = connect_flow_table(root->dev, ft, fs_prio);
636 if (err)
637 goto destroy_ft;
2cc43b49 638 lock_ref_node(&fs_prio->node);
0c56b975
MG
639 tree_add_node(&ft->node, &fs_prio->node);
640 list_add_tail(&ft->node.list, &fs_prio->node.children);
641 fs_prio->num_ft++;
642 unlock_ref_node(&fs_prio->node);
2cc43b49 643 mutex_unlock(&root->chain_lock);
0c56b975 644 return ft;
2cc43b49
MG
645destroy_ft:
646 mlx5_cmd_destroy_flow_table(root->dev, ft);
0c56b975
MG
647free_ft:
648 kfree(ft);
2cc43b49
MG
649unlock_root:
650 mutex_unlock(&root->chain_lock);
0c56b975
MG
651 return ERR_PTR(err);
652}
653
f0d22d18
MG
654struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
655 int prio,
656 int num_flow_table_entries,
657 int max_num_groups)
658{
659 struct mlx5_flow_table *ft;
660
661 if (max_num_groups > num_flow_table_entries)
662 return ERR_PTR(-EINVAL);
663
664 ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
665 if (IS_ERR(ft))
666 return ft;
667
668 ft->autogroup.active = true;
669 ft->autogroup.required_groups = max_num_groups;
670
671 return ft;
672}
673
674/* Flow table should be locked */
675static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
676 u32 *fg_in,
677 struct list_head
678 *prev_fg,
679 bool is_auto_fg)
0c56b975
MG
680{
681 struct mlx5_flow_group *fg;
682 struct mlx5_core_dev *dev = get_dev(&ft->node);
683 int err;
684
685 if (!dev)
686 return ERR_PTR(-ENODEV);
687
688 fg = alloc_flow_group(fg_in);
689 if (IS_ERR(fg))
690 return fg;
691
0c56b975
MG
692 err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
693 if (err) {
694 kfree(fg);
0c56b975
MG
695 return ERR_PTR(err);
696 }
f0d22d18
MG
697
698 if (ft->autogroup.active)
699 ft->autogroup.num_groups++;
0c56b975 700 /* Add node to tree */
f0d22d18 701 tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
0c56b975
MG
702 tree_add_node(&fg->node, &ft->node);
703 /* Add node to group list */
704 list_add(&fg->node.list, ft->node.children.prev);
f0d22d18
MG
705
706 return fg;
707}
708
709struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
710 u32 *fg_in)
711{
712 struct mlx5_flow_group *fg;
713
714 if (ft->autogroup.active)
715 return ERR_PTR(-EPERM);
716
717 lock_ref_node(&ft->node);
718 fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
0c56b975
MG
719 unlock_ref_node(&ft->node);
720
721 return fg;
722}
723
724static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
725{
726 struct mlx5_flow_rule *rule;
727
728 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
729 if (!rule)
730 return NULL;
731
732 rule->node.type = FS_TYPE_FLOW_DEST;
733 memcpy(&rule->dest_attr, dest, sizeof(*dest));
734
735 return rule;
736}
737
738/* fte should not be deleted while calling this function */
739static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
740 struct mlx5_flow_group *fg,
741 struct mlx5_flow_destination *dest)
742{
743 struct mlx5_flow_table *ft;
744 struct mlx5_flow_rule *rule;
745 int err;
746
747 rule = alloc_rule(dest);
748 if (!rule)
749 return ERR_PTR(-ENOMEM);
750
751 fs_get_obj(ft, fg->node.parent);
752 /* Add dest to dests list- added as first element after the head */
753 tree_init_node(&rule->node, 1, del_rule);
754 list_add_tail(&rule->node.list, &fte->node.children);
755 fte->dests_size++;
756 if (fte->dests_size == 1)
757 err = mlx5_cmd_create_fte(get_dev(&ft->node),
758 ft, fg->id, fte);
759 else
760 err = mlx5_cmd_update_fte(get_dev(&ft->node),
761 ft, fg->id, fte);
762 if (err)
763 goto free_rule;
764
765 fte->status |= FS_FTE_STATUS_EXISTING;
766
767 return rule;
768
769free_rule:
770 list_del(&rule->node.list);
771 kfree(rule);
772 fte->dests_size--;
773 return ERR_PTR(err);
774}
775
776/* Assumed fg is locked */
777static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
778 struct list_head **prev)
779{
780 struct fs_fte *fte;
781 unsigned int start = fg->start_index;
782
783 if (prev)
784 *prev = &fg->node.children;
785
786 /* assumed list is sorted by index */
787 fs_for_each_fte(fte, fg) {
788 if (fte->index != start)
789 return start;
790 start++;
791 if (prev)
792 *prev = &fte->node.list;
793 }
794
795 return start;
796}
797
798/* prev is output, prev->next = new_fte */
799static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
800 u32 *match_value,
801 u8 action,
802 u32 flow_tag,
803 struct list_head **prev)
804{
805 struct fs_fte *fte;
806 int index;
807
808 index = get_free_fte_index(fg, prev);
809 fte = alloc_fte(action, flow_tag, match_value, index);
810 if (IS_ERR(fte))
811 return fte;
812
813 return fte;
814}
815
f0d22d18
MG
816static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
817 u8 match_criteria_enable,
818 u32 *match_criteria)
819{
820 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
821 struct list_head *prev = &ft->node.children;
822 unsigned int candidate_index = 0;
823 struct mlx5_flow_group *fg;
824 void *match_criteria_addr;
825 unsigned int group_size = 0;
826 u32 *in;
827
828 if (!ft->autogroup.active)
829 return ERR_PTR(-ENOENT);
830
831 in = mlx5_vzalloc(inlen);
832 if (!in)
833 return ERR_PTR(-ENOMEM);
834
835 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
836 /* We save place for flow groups in addition to max types */
837 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
838
839 /* ft->max_fte == ft->autogroup.max_types */
840 if (group_size == 0)
841 group_size = 1;
842
843 /* sorted by start_index */
844 fs_for_each_fg(fg, ft) {
845 if (candidate_index + group_size > fg->start_index)
846 candidate_index = fg->start_index + fg->max_ftes;
847 else
848 break;
849 prev = &fg->node.list;
850 }
851
852 if (candidate_index + group_size > ft->max_fte) {
853 fg = ERR_PTR(-ENOSPC);
854 goto out;
855 }
856
857 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
858 match_criteria_enable);
859 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
860 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
861 group_size - 1);
862 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
863 in, match_criteria);
864 memcpy(match_criteria_addr, match_criteria,
865 MLX5_ST_SZ_BYTES(fte_match_param));
866
867 fg = create_flow_group_common(ft, in, prev, true);
868out:
869 kvfree(in);
870 return fg;
871}
872
0c56b975
MG
873static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
874 u32 *match_value,
875 u8 action,
876 u32 flow_tag,
877 struct mlx5_flow_destination *dest)
878{
879 struct fs_fte *fte;
880 struct mlx5_flow_rule *rule;
881 struct mlx5_flow_table *ft;
882 struct list_head *prev;
883
f0d22d18 884 nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
0c56b975 885 fs_for_each_fte(fte, fg) {
f0d22d18 886 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
0c56b975
MG
887 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
888 action == fte->action && flow_tag == fte->flow_tag) {
889 rule = add_rule_fte(fte, fg, dest);
890 unlock_ref_node(&fte->node);
891 if (IS_ERR(rule))
892 goto unlock_fg;
893 else
894 goto add_rule;
895 }
896 unlock_ref_node(&fte->node);
897 }
898 fs_get_obj(ft, fg->node.parent);
899 if (fg->num_ftes >= fg->max_ftes) {
900 rule = ERR_PTR(-ENOSPC);
901 goto unlock_fg;
902 }
903
904 fte = create_fte(fg, match_value, action, flow_tag, &prev);
905 if (IS_ERR(fte)) {
906 rule = (void *)fte;
907 goto unlock_fg;
908 }
909 tree_init_node(&fte->node, 0, del_fte);
910 rule = add_rule_fte(fte, fg, dest);
911 if (IS_ERR(rule)) {
912 kfree(fte);
913 goto unlock_fg;
914 }
915
916 fg->num_ftes++;
917
918 tree_add_node(&fte->node, &fg->node);
919 list_add(&fte->node.list, prev);
920add_rule:
921 tree_add_node(&rule->node, &fte->node);
922unlock_fg:
923 unlock_ref_node(&fg->node);
924 return rule;
925}
926
f0d22d18
MG
927static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
928 u8 match_criteria_enable,
929 u32 *match_criteria,
930 u32 *match_value,
931 u8 action,
932 u32 flow_tag,
933 struct mlx5_flow_destination *dest)
934{
935 struct mlx5_flow_rule *rule;
936 struct mlx5_flow_group *g;
937
938 g = create_autogroup(ft, match_criteria_enable, match_criteria);
939 if (IS_ERR(g))
940 return (void *)g;
941
942 rule = add_rule_fg(g, match_value,
943 action, flow_tag, dest);
944 if (IS_ERR(rule)) {
945 /* Remove assumes refcount > 0 and autogroup creates a group
946 * with a refcount = 0.
947 */
948 tree_get_node(&g->node);
949 tree_remove_node(&g->node);
950 }
951 return rule;
952}
953
86d722ad 954struct mlx5_flow_rule *
0c56b975
MG
955mlx5_add_flow_rule(struct mlx5_flow_table *ft,
956 u8 match_criteria_enable,
957 u32 *match_criteria,
958 u32 *match_value,
959 u32 action,
960 u32 flow_tag,
961 struct mlx5_flow_destination *dest)
962{
963 struct mlx5_flow_group *g;
f0d22d18 964 struct mlx5_flow_rule *rule;
0c56b975 965
f0d22d18 966 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
0c56b975
MG
967 fs_for_each_fg(g, ft)
968 if (compare_match_criteria(g->mask.match_criteria_enable,
969 match_criteria_enable,
970 g->mask.match_criteria,
971 match_criteria)) {
0c56b975
MG
972 rule = add_rule_fg(g, match_value,
973 action, flow_tag, dest);
f0d22d18
MG
974 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
975 goto unlock;
0c56b975 976 }
f0d22d18
MG
977
978 rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
979 match_value, action, flow_tag, dest);
980unlock:
0c56b975 981 unlock_ref_node(&ft->node);
0c56b975
MG
982 return rule;
983}
984
86d722ad 985void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
0c56b975
MG
986{
987 tree_remove_node(&rule->node);
988}
989
2cc43b49
MG
990/* Assuming prio->node.children(flow tables) is sorted by level */
991static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
992{
993 struct fs_prio *prio;
994
995 fs_get_obj(prio, ft->node.parent);
996
997 if (!list_is_last(&ft->node.list, &prio->node.children))
998 return list_next_entry(ft, node.list);
999 return find_next_chained_ft(prio);
1000}
1001
1002static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1003{
1004 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1005 struct mlx5_flow_table *new_root_ft = NULL;
1006
1007 if (root->root_ft != ft)
1008 return 0;
1009
1010 new_root_ft = find_next_ft(ft);
1011 if (new_root_ft) {
1012 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft);
1013
1014 if (err) {
1015 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
1016 ft->id);
1017 return err;
1018 }
1019 root->root_ft = new_root_ft;
1020 }
1021 return 0;
1022}
1023
f90edfd2
MG
1024/* Connect flow table from previous priority to
1025 * the next flow table.
1026 */
1027static int disconnect_flow_table(struct mlx5_flow_table *ft)
1028{
1029 struct mlx5_core_dev *dev = get_dev(&ft->node);
1030 struct mlx5_flow_table *next_ft;
1031 struct fs_prio *prio;
1032 int err = 0;
1033
1034 err = update_root_ft_destroy(ft);
1035 if (err)
1036 return err;
1037
1038 fs_get_obj(prio, ft->node.parent);
1039 if (!(list_first_entry(&prio->node.children,
1040 struct mlx5_flow_table,
1041 node.list) == ft))
1042 return 0;
1043
1044 next_ft = find_next_chained_ft(prio);
1045 err = connect_prev_fts(dev, next_ft, prio);
1046 if (err)
1047 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1048 ft->id);
1049 return err;
1050}
1051
86d722ad 1052int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 1053{
2cc43b49
MG
1054 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1055 int err = 0;
1056
1057 mutex_lock(&root->chain_lock);
f90edfd2 1058 err = disconnect_flow_table(ft);
2cc43b49
MG
1059 if (err) {
1060 mutex_unlock(&root->chain_lock);
1061 return err;
1062 }
0c56b975
MG
1063 if (tree_remove_node(&ft->node))
1064 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1065 ft->id);
2cc43b49 1066 mutex_unlock(&root->chain_lock);
0c56b975 1067
2cc43b49 1068 return err;
0c56b975
MG
1069}
1070
86d722ad 1071void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975
MG
1072{
1073 if (tree_remove_node(&fg->node))
1074 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1075 fg->id);
1076}
25302363 1077
86d722ad
MG
1078struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1079 enum mlx5_flow_namespace_type type)
25302363
MG
1080{
1081 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1082 int prio;
1083 static struct fs_prio *fs_prio;
1084 struct mlx5_flow_namespace *ns;
1085
1086 if (!root_ns)
1087 return NULL;
1088
1089 switch (type) {
1090 case MLX5_FLOW_NAMESPACE_KERNEL:
1091 prio = 0;
1092 break;
1093 case MLX5_FLOW_NAMESPACE_FDB:
1094 if (dev->priv.fdb_root_ns)
1095 return &dev->priv.fdb_root_ns->ns;
1096 else
1097 return NULL;
1098 default:
1099 return NULL;
1100 }
1101
1102 fs_prio = find_prio(&root_ns->ns, prio);
1103 if (!fs_prio)
1104 return NULL;
1105
1106 ns = list_first_entry(&fs_prio->node.children,
1107 typeof(*ns),
1108 node.list);
1109
1110 return ns;
1111}
1112
1113static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
655227ed 1114 unsigned prio, int max_ft)
25302363
MG
1115{
1116 struct fs_prio *fs_prio;
1117
1118 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1119 if (!fs_prio)
1120 return ERR_PTR(-ENOMEM);
1121
1122 fs_prio->node.type = FS_TYPE_PRIO;
1123 tree_init_node(&fs_prio->node, 1, NULL);
1124 tree_add_node(&fs_prio->node, &ns->node);
1125 fs_prio->max_ft = max_ft;
1126 fs_prio->prio = prio;
25302363
MG
1127 list_add_tail(&fs_prio->node.list, &ns->node.children);
1128
1129 return fs_prio;
1130}
1131
1132static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1133 *ns)
1134{
1135 ns->node.type = FS_TYPE_NAMESPACE;
1136
1137 return ns;
1138}
1139
1140static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
1141{
1142 struct mlx5_flow_namespace *ns;
1143
1144 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1145 if (!ns)
1146 return ERR_PTR(-ENOMEM);
1147
1148 fs_init_namespace(ns);
1149 tree_init_node(&ns->node, 1, NULL);
1150 tree_add_node(&ns->node, &prio->node);
1151 list_add_tail(&ns->node.list, &prio->node.children);
1152
1153 return ns;
1154}
1155
1156static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *init_node,
1157 struct fs_node *fs_parent_node,
1158 struct init_tree_node *init_parent_node,
1159 int index)
1160{
1161 struct mlx5_flow_namespace *fs_ns;
1162 struct fs_prio *fs_prio;
1163 struct fs_node *base;
1164 int i;
1165 int err;
1166
1167 if (init_node->type == FS_TYPE_PRIO) {
1168 if (init_node->min_ft_level > max_ft_level)
1169 return -ENOTSUPP;
1170
1171 fs_get_obj(fs_ns, fs_parent_node);
655227ed 1172 fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
25302363
MG
1173 if (IS_ERR(fs_prio))
1174 return PTR_ERR(fs_prio);
1175 base = &fs_prio->node;
1176 } else if (init_node->type == FS_TYPE_NAMESPACE) {
1177 fs_get_obj(fs_prio, fs_parent_node);
1178 fs_ns = fs_create_namespace(fs_prio);
1179 if (IS_ERR(fs_ns))
1180 return PTR_ERR(fs_ns);
1181 base = &fs_ns->node;
1182 } else {
1183 return -EINVAL;
1184 }
1185 for (i = 0; i < init_node->ar_size; i++) {
1186 err = init_root_tree_recursive(max_ft_level,
1187 &init_node->children[i], base,
1188 init_node, i);
1189 if (err)
1190 return err;
1191 }
1192
1193 return 0;
1194}
1195
1196static int init_root_tree(int max_ft_level, struct init_tree_node *init_node,
1197 struct fs_node *fs_parent_node)
1198{
1199 int i;
1200 struct mlx5_flow_namespace *fs_ns;
1201 int err;
1202
1203 fs_get_obj(fs_ns, fs_parent_node);
1204 for (i = 0; i < init_node->ar_size; i++) {
1205 err = init_root_tree_recursive(max_ft_level,
1206 &init_node->children[i],
1207 &fs_ns->node,
1208 init_node, i);
1209 if (err)
1210 return err;
1211 }
1212 return 0;
1213}
1214
1215static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
1216 enum fs_flow_table_type
1217 table_type)
1218{
1219 struct mlx5_flow_root_namespace *root_ns;
1220 struct mlx5_flow_namespace *ns;
1221
86d722ad 1222 /* Create the root namespace */
25302363
MG
1223 root_ns = mlx5_vzalloc(sizeof(*root_ns));
1224 if (!root_ns)
1225 return NULL;
1226
1227 root_ns->dev = dev;
1228 root_ns->table_type = table_type;
1229
1230 ns = &root_ns->ns;
1231 fs_init_namespace(ns);
2cc43b49 1232 mutex_init(&root_ns->chain_lock);
25302363
MG
1233 tree_init_node(&ns->node, 1, NULL);
1234 tree_add_node(&ns->node, NULL);
1235
1236 return root_ns;
1237}
1238
655227ed
MG
1239static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
1240
1241static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
1242{
1243 struct fs_prio *prio;
1244
1245 fs_for_each_prio(prio, ns) {
1246 /* This updates prio start_level and max_ft */
1247 set_prio_attrs_in_prio(prio, acc_level);
1248 acc_level += prio->max_ft;
1249 }
1250 return acc_level;
1251}
1252
1253static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
1254{
1255 struct mlx5_flow_namespace *ns;
1256 int acc_level_ns = acc_level;
1257
1258 prio->start_level = acc_level;
1259 fs_for_each_ns(ns, prio)
1260 /* This updates start_level and max_ft of ns's priority descendants */
1261 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
1262 if (!prio->max_ft)
1263 prio->max_ft = acc_level_ns - prio->start_level;
1264 WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
1265}
1266
1267static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
1268{
1269 struct mlx5_flow_namespace *ns = &root_ns->ns;
1270 struct fs_prio *prio;
1271 int start_level = 0;
1272
1273 fs_for_each_prio(prio, ns) {
1274 set_prio_attrs_in_prio(prio, start_level);
1275 start_level += prio->max_ft;
1276 }
1277}
1278
25302363
MG
1279static int init_root_ns(struct mlx5_core_dev *dev)
1280{
1281 int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
1282 flow_table_properties_nic_receive.
1283 max_ft_level);
1284
1285 dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
1286 if (IS_ERR_OR_NULL(dev->priv.root_ns))
1287 goto cleanup;
1288
1289 if (init_root_tree(max_ft_level, &root_fs, &dev->priv.root_ns->ns.node))
1290 goto cleanup;
1291
655227ed
MG
1292 set_prio_attrs(dev->priv.root_ns);
1293
25302363
MG
1294 return 0;
1295
1296cleanup:
1297 mlx5_cleanup_fs(dev);
1298 return -ENOMEM;
1299}
1300
1301static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
1302 struct mlx5_flow_root_namespace *root_ns)
1303{
1304 struct fs_node *prio;
1305
1306 if (!root_ns)
1307 return;
1308
1309 if (!list_empty(&root_ns->ns.node.children)) {
1310 prio = list_first_entry(&root_ns->ns.node.children,
1311 struct fs_node,
1312 list);
1313 if (tree_remove_node(prio))
1314 mlx5_core_warn(dev,
1315 "Flow steering priority wasn't destroyed, refcount > 1\n");
1316 }
1317 if (tree_remove_node(&root_ns->ns.node))
1318 mlx5_core_warn(dev,
1319 "Flow steering namespace wasn't destroyed, refcount > 1\n");
1320 root_ns = NULL;
1321}
1322
1323static void cleanup_root_ns(struct mlx5_core_dev *dev)
1324{
1325 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1326 struct fs_prio *iter_prio;
1327
1328 if (!MLX5_CAP_GEN(dev, nic_flow_table))
1329 return;
1330
1331 if (!root_ns)
1332 return;
1333
1334 /* stage 1 */
1335 fs_for_each_prio(iter_prio, &root_ns->ns) {
1336 struct fs_node *node;
1337 struct mlx5_flow_namespace *iter_ns;
1338
1339 fs_for_each_ns_or_ft(node, iter_prio) {
1340 if (node->type == FS_TYPE_FLOW_TABLE)
1341 continue;
1342 fs_get_obj(iter_ns, node);
1343 while (!list_empty(&iter_ns->node.children)) {
1344 struct fs_prio *obj_iter_prio2;
1345 struct fs_node *iter_prio2 =
1346 list_first_entry(&iter_ns->node.children,
1347 struct fs_node,
1348 list);
1349
1350 fs_get_obj(obj_iter_prio2, iter_prio2);
1351 if (tree_remove_node(iter_prio2)) {
1352 mlx5_core_warn(dev,
1353 "Priority %d wasn't destroyed, refcount > 1\n",
1354 obj_iter_prio2->prio);
1355 return;
1356 }
1357 }
1358 }
1359 }
1360
1361 /* stage 2 */
1362 fs_for_each_prio(iter_prio, &root_ns->ns) {
1363 while (!list_empty(&iter_prio->node.children)) {
1364 struct fs_node *iter_ns =
1365 list_first_entry(&iter_prio->node.children,
1366 struct fs_node,
1367 list);
1368 if (tree_remove_node(iter_ns)) {
1369 mlx5_core_warn(dev,
1370 "Namespace wasn't destroyed, refcount > 1\n");
1371 return;
1372 }
1373 }
1374 }
1375
1376 /* stage 3 */
1377 while (!list_empty(&root_ns->ns.node.children)) {
1378 struct fs_prio *obj_prio_node;
1379 struct fs_node *prio_node =
1380 list_first_entry(&root_ns->ns.node.children,
1381 struct fs_node,
1382 list);
1383
1384 fs_get_obj(obj_prio_node, prio_node);
1385 if (tree_remove_node(prio_node)) {
1386 mlx5_core_warn(dev,
1387 "Priority %d wasn't destroyed, refcount > 1\n",
1388 obj_prio_node->prio);
1389 return;
1390 }
1391 }
1392
1393 if (tree_remove_node(&root_ns->ns.node)) {
1394 mlx5_core_warn(dev,
1395 "root namespace wasn't destroyed, refcount > 1\n");
1396 return;
1397 }
1398
1399 dev->priv.root_ns = NULL;
1400}
1401
1402void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1403{
1404 cleanup_root_ns(dev);
1405 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
1406}
1407
1408static int init_fdb_root_ns(struct mlx5_core_dev *dev)
1409{
1410 struct fs_prio *prio;
1411
1412 dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
1413 if (!dev->priv.fdb_root_ns)
1414 return -ENOMEM;
1415
86d722ad 1416 /* Create single prio */
655227ed 1417 prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1);
25302363
MG
1418 if (IS_ERR(prio)) {
1419 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
1420 return PTR_ERR(prio);
1421 } else {
1422 return 0;
1423 }
1424}
1425
1426int mlx5_init_fs(struct mlx5_core_dev *dev)
1427{
1428 int err = 0;
1429
1430 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
1431 err = init_root_ns(dev);
1432 if (err)
1433 return err;
1434 }
1435 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1436 err = init_fdb_root_ns(dev);
1437 if (err)
1438 cleanup_root_ns(dev);
1439 }
1440
1441 return err;
1442}
This page took 0.098129 seconds and 5 git commands to generate.