Commit | Line | Data |
---|---|---|
c27a02cd YP |
1 | /* |
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
5a0e3ad6 | 34 | #include <linux/slab.h> |
c27a02cd YP |
35 | #include <linux/vmalloc.h> |
36 | #include <linux/mlx4/qp.h> | |
37 | ||
38 | #include "mlx4_en.h" | |
39 | ||
40 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | |
9f519f68 | 41 | int is_tx, int rss, int qpn, int cqn, |
0e98b523 | 42 | int user_prio, struct mlx4_qp_context *context) |
c27a02cd YP |
43 | { |
44 | struct mlx4_en_dev *mdev = priv->mdev; | |
ec693d47 | 45 | struct net_device *dev = priv->dev; |
c27a02cd YP |
46 | |
47 | memset(context, 0, sizeof *context); | |
876f6e67 | 48 | context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); |
c27a02cd YP |
49 | context->pd = cpu_to_be32(mdev->priv_pdn); |
50 | context->mtu_msgmax = 0xff; | |
9f519f68 YP |
51 | if (!is_tx && !rss) |
52 | context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | |
c27a02cd YP |
53 | if (is_tx) |
54 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | |
55 | else | |
9f519f68 | 56 | context->sq_size_stride = ilog2(TXBB_SIZE) - 4; |
c27a02cd YP |
57 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); |
58 | context->local_qpn = cpu_to_be32(qpn); | |
59 | context->pri_path.ackto = 1 & 0x07; | |
60 | context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; | |
0e98b523 AV |
61 | if (user_prio >= 0) { |
62 | context->pri_path.sched_queue |= user_prio << 3; | |
7677fc96 | 63 | context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; |
0e98b523 | 64 | } |
c27a02cd YP |
65 | context->pri_path.counter_index = 0xff; |
66 | context->cqn_send = cpu_to_be32(cqn); | |
67 | context->cqn_recv = cpu_to_be32(cqn); | |
68 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); | |
ec693d47 AV |
69 | if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
70 | context->param3 |= cpu_to_be32(1 << 30); | |
837052d0 OG |
71 | |
72 | if (!is_tx && !rss && | |
73 | (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) { | |
74 | en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); | |
75 | context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ | |
76 | } | |
c27a02cd YP |
77 | } |
78 | ||
79 | ||
80 | int mlx4_en_map_buffer(struct mlx4_buf *buf) | |
81 | { | |
82 | struct page **pages; | |
83 | int i; | |
84 | ||
85 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | |
86 | return 0; | |
87 | ||
88 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | |
89 | if (!pages) | |
90 | return -ENOMEM; | |
91 | ||
92 | for (i = 0; i < buf->nbufs; ++i) | |
93 | pages[i] = virt_to_page(buf->page_list[i].buf); | |
94 | ||
95 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | |
96 | kfree(pages); | |
97 | if (!buf->direct.buf) | |
98 | return -ENOMEM; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | void mlx4_en_unmap_buffer(struct mlx4_buf *buf) | |
104 | { | |
105 | if (BITS_PER_LONG == 64 || buf->nbufs == 1) | |
106 | return; | |
107 | ||
108 | vunmap(buf->direct.buf); | |
109 | } | |
966508f7 YP |
110 | |
111 | void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) | |
112 | { | |
113 | return; | |
114 | } | |
115 |