Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/sched.h> | |
36 | #include <linux/pci.h> | |
37 | #include <linux/errno.h> | |
38 | ||
39 | #include <linux/mlx4/cmd.h> | |
40 | ||
41 | #include <asm/io.h> | |
42 | ||
43 | #include "mlx4.h" | |
44 | ||
45 | #define CMD_POLL_TOKEN 0xffff | |
46 | ||
47 | enum { | |
48 | /* command completed successfully: */ | |
49 | CMD_STAT_OK = 0x00, | |
50 | /* Internal error (such as a bus error) occurred while processing command: */ | |
51 | CMD_STAT_INTERNAL_ERR = 0x01, | |
52 | /* Operation/command not supported or opcode modifier not supported: */ | |
53 | CMD_STAT_BAD_OP = 0x02, | |
54 | /* Parameter not supported or parameter out of range: */ | |
55 | CMD_STAT_BAD_PARAM = 0x03, | |
56 | /* System not enabled or bad system state: */ | |
57 | CMD_STAT_BAD_SYS_STATE = 0x04, | |
58 | /* Attempt to access reserved or unallocaterd resource: */ | |
59 | CMD_STAT_BAD_RESOURCE = 0x05, | |
60 | /* Requested resource is currently executing a command, or is otherwise busy: */ | |
61 | CMD_STAT_RESOURCE_BUSY = 0x06, | |
62 | /* Required capability exceeds device limits: */ | |
63 | CMD_STAT_EXCEED_LIM = 0x08, | |
64 | /* Resource is not in the appropriate state or ownership: */ | |
65 | CMD_STAT_BAD_RES_STATE = 0x09, | |
66 | /* Index out of range: */ | |
67 | CMD_STAT_BAD_INDEX = 0x0a, | |
68 | /* FW image corrupted: */ | |
69 | CMD_STAT_BAD_NVMEM = 0x0b, | |
70 | /* Attempt to modify a QP/EE which is not in the presumed state: */ | |
71 | CMD_STAT_BAD_QP_STATE = 0x10, | |
72 | /* Bad segment parameters (Address/Size): */ | |
73 | CMD_STAT_BAD_SEG_PARAM = 0x20, | |
74 | /* Memory Region has Memory Windows bound to: */ | |
75 | CMD_STAT_REG_BOUND = 0x21, | |
76 | /* HCA local attached memory not present: */ | |
77 | CMD_STAT_LAM_NOT_PRE = 0x22, | |
78 | /* Bad management packet (silently discarded): */ | |
79 | CMD_STAT_BAD_PKT = 0x30, | |
80 | /* More outstanding CQEs in CQ than new CQ size: */ | |
81 | CMD_STAT_BAD_SIZE = 0x40 | |
82 | }; | |
83 | ||
84 | enum { | |
85 | HCR_IN_PARAM_OFFSET = 0x00, | |
86 | HCR_IN_MODIFIER_OFFSET = 0x08, | |
87 | HCR_OUT_PARAM_OFFSET = 0x0c, | |
88 | HCR_TOKEN_OFFSET = 0x14, | |
89 | HCR_STATUS_OFFSET = 0x18, | |
90 | ||
91 | HCR_OPMOD_SHIFT = 12, | |
92 | HCR_T_BIT = 21, | |
93 | HCR_E_BIT = 22, | |
94 | HCR_GO_BIT = 23 | |
95 | }; | |
96 | ||
97 | enum { | |
36ce10d3 | 98 | GO_BIT_TIMEOUT_MSECS = 10000 |
225c7b1f RD |
99 | }; |
100 | ||
101 | struct mlx4_cmd_context { | |
102 | struct completion done; | |
103 | int result; | |
104 | int next; | |
105 | u64 out_param; | |
106 | u16 token; | |
107 | }; | |
108 | ||
ca281211 RD |
109 | static int mlx4_status_to_errno(u8 status) |
110 | { | |
225c7b1f RD |
111 | static const int trans_table[] = { |
112 | [CMD_STAT_INTERNAL_ERR] = -EIO, | |
113 | [CMD_STAT_BAD_OP] = -EPERM, | |
114 | [CMD_STAT_BAD_PARAM] = -EINVAL, | |
115 | [CMD_STAT_BAD_SYS_STATE] = -ENXIO, | |
116 | [CMD_STAT_BAD_RESOURCE] = -EBADF, | |
117 | [CMD_STAT_RESOURCE_BUSY] = -EBUSY, | |
118 | [CMD_STAT_EXCEED_LIM] = -ENOMEM, | |
119 | [CMD_STAT_BAD_RES_STATE] = -EBADF, | |
120 | [CMD_STAT_BAD_INDEX] = -EBADF, | |
121 | [CMD_STAT_BAD_NVMEM] = -EFAULT, | |
122 | [CMD_STAT_BAD_QP_STATE] = -EINVAL, | |
123 | [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, | |
124 | [CMD_STAT_REG_BOUND] = -EBUSY, | |
125 | [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, | |
126 | [CMD_STAT_BAD_PKT] = -EINVAL, | |
127 | [CMD_STAT_BAD_SIZE] = -ENOMEM, | |
128 | }; | |
129 | ||
130 | if (status >= ARRAY_SIZE(trans_table) || | |
131 | (status != CMD_STAT_OK && trans_table[status] == 0)) | |
132 | return -EIO; | |
133 | ||
134 | return trans_table[status]; | |
135 | } | |
136 | ||
137 | static int cmd_pending(struct mlx4_dev *dev) | |
138 | { | |
139 | u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); | |
140 | ||
141 | return (status & swab32(1 << HCR_GO_BIT)) || | |
142 | (mlx4_priv(dev)->cmd.toggle == | |
143 | !!(status & swab32(1 << HCR_T_BIT))); | |
144 | } | |
145 | ||
146 | static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, | |
147 | u32 in_modifier, u8 op_modifier, u16 op, u16 token, | |
148 | int event) | |
149 | { | |
150 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | |
151 | u32 __iomem *hcr = cmd->hcr; | |
152 | int ret = -EAGAIN; | |
153 | unsigned long end; | |
154 | ||
155 | mutex_lock(&cmd->hcr_mutex); | |
156 | ||
157 | end = jiffies; | |
158 | if (event) | |
36ce10d3 | 159 | end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); |
225c7b1f RD |
160 | |
161 | while (cmd_pending(dev)) { | |
162 | if (time_after_eq(jiffies, end)) | |
163 | goto out; | |
164 | cond_resched(); | |
165 | } | |
166 | ||
167 | /* | |
168 | * We use writel (instead of something like memcpy_toio) | |
169 | * because writes of less than 32 bits to the HCR don't work | |
170 | * (and some architectures such as ia64 implement memcpy_toio | |
171 | * in terms of writeb). | |
172 | */ | |
173 | __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); | |
174 | __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); | |
175 | __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); | |
176 | __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); | |
177 | __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); | |
178 | __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); | |
179 | ||
180 | /* __raw_writel may not order writes. */ | |
181 | wmb(); | |
182 | ||
183 | __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | | |
184 | (cmd->toggle << HCR_T_BIT) | | |
185 | (event ? (1 << HCR_E_BIT) : 0) | | |
186 | (op_modifier << HCR_OPMOD_SHIFT) | | |
187 | op), hcr + 6); | |
2e61c646 RD |
188 | |
189 | /* | |
190 | * Make sure that our HCR writes don't get mixed in with | |
191 | * writes from another CPU starting a FW command. | |
192 | */ | |
193 | mmiowb(); | |
194 | ||
225c7b1f RD |
195 | cmd->toggle = cmd->toggle ^ 1; |
196 | ||
197 | ret = 0; | |
198 | ||
199 | out: | |
200 | mutex_unlock(&cmd->hcr_mutex); | |
201 | return ret; | |
202 | } | |
203 | ||
204 | static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
205 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
206 | u16 op, unsigned long timeout) | |
207 | { | |
208 | struct mlx4_priv *priv = mlx4_priv(dev); | |
209 | void __iomem *hcr = priv->cmd.hcr; | |
210 | int err = 0; | |
211 | unsigned long end; | |
212 | ||
213 | down(&priv->cmd.poll_sem); | |
214 | ||
215 | err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | |
216 | in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); | |
217 | if (err) | |
218 | goto out; | |
219 | ||
220 | end = msecs_to_jiffies(timeout) + jiffies; | |
221 | while (cmd_pending(dev) && time_before(jiffies, end)) | |
222 | cond_resched(); | |
223 | ||
224 | if (cmd_pending(dev)) { | |
225 | err = -ETIMEDOUT; | |
226 | goto out; | |
227 | } | |
228 | ||
229 | if (out_is_imm) | |
230 | *out_param = | |
231 | (u64) be32_to_cpu((__force __be32) | |
232 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | | |
233 | (u64) be32_to_cpu((__force __be32) | |
234 | __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); | |
235 | ||
236 | err = mlx4_status_to_errno(be32_to_cpu((__force __be32) | |
237 | __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); | |
238 | ||
239 | out: | |
240 | up(&priv->cmd.poll_sem); | |
241 | return err; | |
242 | } | |
243 | ||
244 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) | |
245 | { | |
246 | struct mlx4_priv *priv = mlx4_priv(dev); | |
247 | struct mlx4_cmd_context *context = | |
248 | &priv->cmd.context[token & priv->cmd.token_mask]; | |
249 | ||
250 | /* previously timed out command completing at long last */ | |
251 | if (token != context->token) | |
252 | return; | |
253 | ||
254 | context->result = mlx4_status_to_errno(status); | |
255 | context->out_param = out_param; | |
256 | ||
225c7b1f RD |
257 | complete(&context->done); |
258 | } | |
259 | ||
260 | static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
261 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
262 | u16 op, unsigned long timeout) | |
263 | { | |
264 | struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; | |
265 | struct mlx4_cmd_context *context; | |
266 | int err = 0; | |
267 | ||
268 | down(&cmd->event_sem); | |
269 | ||
270 | spin_lock(&cmd->context_lock); | |
271 | BUG_ON(cmd->free_head < 0); | |
272 | context = &cmd->context[cmd->free_head]; | |
0981582d | 273 | context->token += cmd->token_mask + 1; |
225c7b1f RD |
274 | cmd->free_head = context->next; |
275 | spin_unlock(&cmd->context_lock); | |
276 | ||
277 | init_completion(&context->done); | |
278 | ||
279 | mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, | |
280 | in_modifier, op_modifier, op, context->token, 1); | |
281 | ||
282 | if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { | |
283 | err = -EBUSY; | |
284 | goto out; | |
285 | } | |
286 | ||
287 | err = context->result; | |
288 | if (err) | |
289 | goto out; | |
290 | ||
291 | if (out_is_imm) | |
292 | *out_param = context->out_param; | |
293 | ||
294 | out: | |
295 | spin_lock(&cmd->context_lock); | |
296 | context->next = cmd->free_head; | |
297 | cmd->free_head = context - cmd->context; | |
298 | spin_unlock(&cmd->context_lock); | |
299 | ||
300 | up(&cmd->event_sem); | |
301 | return err; | |
302 | } | |
303 | ||
304 | int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |
305 | int out_is_imm, u32 in_modifier, u8 op_modifier, | |
306 | u16 op, unsigned long timeout) | |
307 | { | |
308 | if (mlx4_priv(dev)->cmd.use_events) | |
309 | return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, | |
310 | in_modifier, op_modifier, op, timeout); | |
311 | else | |
312 | return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, | |
313 | in_modifier, op_modifier, op, timeout); | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(__mlx4_cmd); | |
316 | ||
317 | int mlx4_cmd_init(struct mlx4_dev *dev) | |
318 | { | |
319 | struct mlx4_priv *priv = mlx4_priv(dev); | |
320 | ||
321 | mutex_init(&priv->cmd.hcr_mutex); | |
322 | sema_init(&priv->cmd.poll_sem, 1); | |
323 | priv->cmd.use_events = 0; | |
324 | priv->cmd.toggle = 1; | |
325 | ||
326 | priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, | |
327 | MLX4_HCR_SIZE); | |
328 | if (!priv->cmd.hcr) { | |
329 | mlx4_err(dev, "Couldn't map command register."); | |
330 | return -ENOMEM; | |
331 | } | |
332 | ||
333 | priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, | |
334 | MLX4_MAILBOX_SIZE, | |
335 | MLX4_MAILBOX_SIZE, 0); | |
336 | if (!priv->cmd.pool) { | |
337 | iounmap(priv->cmd.hcr); | |
338 | return -ENOMEM; | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | void mlx4_cmd_cleanup(struct mlx4_dev *dev) | |
345 | { | |
346 | struct mlx4_priv *priv = mlx4_priv(dev); | |
347 | ||
348 | pci_pool_destroy(priv->cmd.pool); | |
349 | iounmap(priv->cmd.hcr); | |
350 | } | |
351 | ||
352 | /* | |
353 | * Switch to using events to issue FW commands (can only be called | |
354 | * after event queue for command events has been initialized). | |
355 | */ | |
356 | int mlx4_cmd_use_events(struct mlx4_dev *dev) | |
357 | { | |
358 | struct mlx4_priv *priv = mlx4_priv(dev); | |
359 | int i; | |
360 | ||
361 | priv->cmd.context = kmalloc(priv->cmd.max_cmds * | |
362 | sizeof (struct mlx4_cmd_context), | |
363 | GFP_KERNEL); | |
364 | if (!priv->cmd.context) | |
365 | return -ENOMEM; | |
366 | ||
367 | for (i = 0; i < priv->cmd.max_cmds; ++i) { | |
368 | priv->cmd.context[i].token = i; | |
369 | priv->cmd.context[i].next = i + 1; | |
370 | } | |
371 | ||
372 | priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; | |
373 | priv->cmd.free_head = 0; | |
374 | ||
375 | sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); | |
376 | spin_lock_init(&priv->cmd.context_lock); | |
377 | ||
378 | for (priv->cmd.token_mask = 1; | |
379 | priv->cmd.token_mask < priv->cmd.max_cmds; | |
380 | priv->cmd.token_mask <<= 1) | |
381 | ; /* nothing */ | |
382 | --priv->cmd.token_mask; | |
383 | ||
384 | priv->cmd.use_events = 1; | |
385 | ||
386 | down(&priv->cmd.poll_sem); | |
387 | ||
388 | return 0; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Switch back to polling (used when shutting down the device) | |
393 | */ | |
394 | void mlx4_cmd_use_polling(struct mlx4_dev *dev) | |
395 | { | |
396 | struct mlx4_priv *priv = mlx4_priv(dev); | |
397 | int i; | |
398 | ||
399 | priv->cmd.use_events = 0; | |
400 | ||
401 | for (i = 0; i < priv->cmd.max_cmds; ++i) | |
402 | down(&priv->cmd.event_sem); | |
403 | ||
404 | kfree(priv->cmd.context); | |
405 | ||
406 | up(&priv->cmd.poll_sem); | |
407 | } | |
408 | ||
409 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) | |
410 | { | |
411 | struct mlx4_cmd_mailbox *mailbox; | |
412 | ||
413 | mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); | |
414 | if (!mailbox) | |
415 | return ERR_PTR(-ENOMEM); | |
416 | ||
417 | mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, | |
418 | &mailbox->dma); | |
419 | if (!mailbox->buf) { | |
420 | kfree(mailbox); | |
421 | return ERR_PTR(-ENOMEM); | |
422 | } | |
423 | ||
424 | return mailbox; | |
425 | } | |
426 | EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); | |
427 | ||
428 | void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) | |
429 | { | |
430 | if (!mailbox) | |
431 | return; | |
432 | ||
433 | pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); | |
434 | kfree(mailbox); | |
435 | } | |
436 | EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); |