Commit | Line | Data |
---|---|---|
e126ba97 | 1 | /* |
302bdf68 | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
e126ba97 EC |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/interrupt.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/mlx5/driver.h> | |
36 | #include <linux/mlx5/cmd.h> | |
37 | #include "mlx5_core.h" | |
073bb189 SM |
38 | #ifdef CONFIG_MLX5_CORE_EN |
39 | #include "eswitch.h" | |
40 | #endif | |
e126ba97 EC |
41 | |
42 | enum { | |
43 | MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), | |
44 | MLX5_EQE_OWNER_INIT_VAL = 0x1, | |
45 | }; | |
46 | ||
47 | enum { | |
48 | MLX5_EQ_STATE_ARMED = 0x9, | |
49 | MLX5_EQ_STATE_FIRED = 0xa, | |
50 | MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, | |
51 | }; | |
52 | ||
53 | enum { | |
54 | MLX5_NUM_SPARE_EQE = 0x80, | |
55 | MLX5_NUM_ASYNC_EQE = 0x100, | |
56 | MLX5_NUM_CMD_EQE = 32, | |
57 | }; | |
58 | ||
59 | enum { | |
60 | MLX5_EQ_DOORBEL_OFFSET = 0x40, | |
61 | }; | |
62 | ||
63 | #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ | |
64 | (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ | |
65 | (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ | |
66 | (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ | |
67 | (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
68 | (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
69 | (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
70 | (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
71 | (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ | |
72 | (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | |
73 | (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ | |
74 | (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) | |
75 | ||
76 | struct map_eq_in { | |
77 | u64 mask; | |
78 | u32 reserved; | |
79 | u32 unmap_eqn; | |
80 | }; | |
81 | ||
82 | struct cre_des_eq { | |
83 | u8 reserved[15]; | |
84 | u8 eqn; | |
85 | }; | |
86 | ||
87 | static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) | |
88 | { | |
89 | struct mlx5_destroy_eq_mbox_in in; | |
90 | struct mlx5_destroy_eq_mbox_out out; | |
91 | int err; | |
92 | ||
93 | memset(&in, 0, sizeof(in)); | |
94 | memset(&out, 0, sizeof(out)); | |
95 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); | |
96 | in.eqn = eqn; | |
97 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | |
98 | if (!err) | |
99 | goto ex; | |
100 | ||
101 | if (out.hdr.status) | |
102 | err = mlx5_cmd_status_to_err(&out.hdr); | |
103 | ||
104 | ex: | |
105 | return err; | |
106 | } | |
107 | ||
108 | static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) | |
109 | { | |
110 | return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); | |
111 | } | |
112 | ||
113 | static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) | |
114 | { | |
115 | struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); | |
116 | ||
117 | return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; | |
118 | } | |
119 | ||
120 | static const char *eqe_type_str(u8 type) | |
121 | { | |
122 | switch (type) { | |
123 | case MLX5_EVENT_TYPE_COMP: | |
124 | return "MLX5_EVENT_TYPE_COMP"; | |
125 | case MLX5_EVENT_TYPE_PATH_MIG: | |
126 | return "MLX5_EVENT_TYPE_PATH_MIG"; | |
127 | case MLX5_EVENT_TYPE_COMM_EST: | |
128 | return "MLX5_EVENT_TYPE_COMM_EST"; | |
129 | case MLX5_EVENT_TYPE_SQ_DRAINED: | |
130 | return "MLX5_EVENT_TYPE_SQ_DRAINED"; | |
131 | case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | |
132 | return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; | |
133 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
134 | return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; | |
135 | case MLX5_EVENT_TYPE_CQ_ERROR: | |
136 | return "MLX5_EVENT_TYPE_CQ_ERROR"; | |
137 | case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | |
138 | return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; | |
139 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | |
140 | return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; | |
141 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
142 | return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; | |
143 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | |
144 | return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; | |
145 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
146 | return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; | |
147 | case MLX5_EVENT_TYPE_INTERNAL_ERROR: | |
148 | return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; | |
149 | case MLX5_EVENT_TYPE_PORT_CHANGE: | |
150 | return "MLX5_EVENT_TYPE_PORT_CHANGE"; | |
151 | case MLX5_EVENT_TYPE_GPIO_EVENT: | |
152 | return "MLX5_EVENT_TYPE_GPIO_EVENT"; | |
153 | case MLX5_EVENT_TYPE_REMOTE_CONFIG: | |
154 | return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; | |
155 | case MLX5_EVENT_TYPE_DB_BF_CONGESTION: | |
156 | return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; | |
157 | case MLX5_EVENT_TYPE_STALL_EVENT: | |
158 | return "MLX5_EVENT_TYPE_STALL_EVENT"; | |
159 | case MLX5_EVENT_TYPE_CMD: | |
160 | return "MLX5_EVENT_TYPE_CMD"; | |
161 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | |
162 | return "MLX5_EVENT_TYPE_PAGE_REQUEST"; | |
e420f0c0 HE |
163 | case MLX5_EVENT_TYPE_PAGE_FAULT: |
164 | return "MLX5_EVENT_TYPE_PAGE_FAULT"; | |
e126ba97 EC |
165 | default: |
166 | return "Unrecognized event"; | |
167 | } | |
168 | } | |
169 | ||
170 | static enum mlx5_dev_event port_subtype_event(u8 subtype) | |
171 | { | |
172 | switch (subtype) { | |
173 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | |
174 | return MLX5_DEV_EVENT_PORT_DOWN; | |
175 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | |
176 | return MLX5_DEV_EVENT_PORT_UP; | |
177 | case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | |
178 | return MLX5_DEV_EVENT_PORT_INITIALIZED; | |
179 | case MLX5_PORT_CHANGE_SUBTYPE_LID: | |
180 | return MLX5_DEV_EVENT_LID_CHANGE; | |
181 | case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | |
182 | return MLX5_DEV_EVENT_PKEY_CHANGE; | |
183 | case MLX5_PORT_CHANGE_SUBTYPE_GUID: | |
184 | return MLX5_DEV_EVENT_GUID_CHANGE; | |
185 | case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | |
186 | return MLX5_DEV_EVENT_CLIENT_REREG; | |
187 | } | |
188 | return -1; | |
189 | } | |
190 | ||
191 | static void eq_update_ci(struct mlx5_eq *eq, int arm) | |
192 | { | |
193 | __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); | |
194 | u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); | |
195 | __raw_writel((__force u32) cpu_to_be32(val), addr); | |
196 | /* We still want ordering, just not swabbing, so add a barrier */ | |
197 | mb(); | |
198 | } | |
199 | ||
200 | static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |
201 | { | |
202 | struct mlx5_eqe *eqe; | |
203 | int eqes_found = 0; | |
204 | int set_ci = 0; | |
205 | u32 cqn; | |
5903325a | 206 | u32 rsn; |
e126ba97 EC |
207 | u8 port; |
208 | ||
209 | while ((eqe = next_eqe_sw(eq))) { | |
210 | /* | |
211 | * Make sure we read EQ entry contents after we've | |
212 | * checked the ownership bit. | |
213 | */ | |
12b3375f | 214 | dma_rmb(); |
e126ba97 | 215 | |
1a91de28 JP |
216 | mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", |
217 | eq->eqn, eqe_type_str(eqe->type)); | |
e126ba97 EC |
218 | switch (eqe->type) { |
219 | case MLX5_EVENT_TYPE_COMP: | |
220 | cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; | |
221 | mlx5_cq_completion(dev, cqn); | |
222 | break; | |
223 | ||
224 | case MLX5_EVENT_TYPE_PATH_MIG: | |
225 | case MLX5_EVENT_TYPE_COMM_EST: | |
226 | case MLX5_EVENT_TYPE_SQ_DRAINED: | |
227 | case MLX5_EVENT_TYPE_SRQ_LAST_WQE: | |
228 | case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: | |
229 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | |
230 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
231 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | |
5903325a | 232 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
e2013b21 | 233 | rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); |
ab62924e EC |
234 | mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", |
235 | eqe_type_str(eqe->type), eqe->type, rsn); | |
5903325a | 236 | mlx5_rsc_event(dev, rsn, eqe->type); |
e126ba97 EC |
237 | break; |
238 | ||
239 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | |
240 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | |
5903325a | 241 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
e126ba97 | 242 | mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", |
5903325a EC |
243 | eqe_type_str(eqe->type), eqe->type, rsn); |
244 | mlx5_srq_event(dev, rsn, eqe->type); | |
e126ba97 EC |
245 | break; |
246 | ||
247 | case MLX5_EVENT_TYPE_CMD: | |
248 | mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); | |
249 | break; | |
250 | ||
251 | case MLX5_EVENT_TYPE_PORT_CHANGE: | |
252 | port = (eqe->data.port.port >> 4) & 0xf; | |
253 | switch (eqe->sub_type) { | |
254 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | |
255 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | |
256 | case MLX5_PORT_CHANGE_SUBTYPE_LID: | |
257 | case MLX5_PORT_CHANGE_SUBTYPE_PKEY: | |
258 | case MLX5_PORT_CHANGE_SUBTYPE_GUID: | |
259 | case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: | |
260 | case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: | |
f241e749 | 261 | if (dev->event) |
4d2f9bbb JM |
262 | dev->event(dev, port_subtype_event(eqe->sub_type), |
263 | (unsigned long)port); | |
e126ba97 EC |
264 | break; |
265 | default: | |
266 | mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", | |
267 | port, eqe->sub_type); | |
268 | } | |
269 | break; | |
270 | case MLX5_EVENT_TYPE_CQ_ERROR: | |
271 | cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; | |
272 | mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", | |
273 | cqn, eqe->data.cq_err.syndrome); | |
274 | mlx5_cq_event(dev, cqn, eqe->type); | |
275 | break; | |
276 | ||
277 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | |
278 | { | |
279 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | |
0a324f31 | 280 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
e126ba97 | 281 | |
1a91de28 JP |
282 | mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", |
283 | func_id, npages); | |
e126ba97 EC |
284 | mlx5_core_req_pages_handler(dev, func_id, npages); |
285 | } | |
286 | break; | |
287 | ||
e420f0c0 HE |
288 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
289 | case MLX5_EVENT_TYPE_PAGE_FAULT: | |
290 | mlx5_eq_pagefault(dev, eqe); | |
291 | break; | |
292 | #endif | |
e126ba97 | 293 | |
073bb189 SM |
294 | #ifdef CONFIG_MLX5_CORE_EN |
295 | case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: | |
296 | mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); | |
297 | break; | |
298 | #endif | |
e126ba97 | 299 | default: |
1a91de28 JP |
300 | mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", |
301 | eqe->type, eq->eqn); | |
e126ba97 EC |
302 | break; |
303 | } | |
304 | ||
305 | ++eq->cons_index; | |
306 | eqes_found = 1; | |
307 | ++set_ci; | |
308 | ||
309 | /* The HCA will think the queue has overflowed if we | |
310 | * don't tell it we've been processing events. We | |
311 | * create our EQs with MLX5_NUM_SPARE_EQE extra | |
312 | * entries, so we must update our consumer index at | |
313 | * least that often. | |
314 | */ | |
315 | if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { | |
316 | eq_update_ci(eq, 0); | |
317 | set_ci = 0; | |
318 | } | |
319 | } | |
320 | ||
321 | eq_update_ci(eq, 1); | |
322 | ||
323 | return eqes_found; | |
324 | } | |
325 | ||
326 | static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) | |
327 | { | |
328 | struct mlx5_eq *eq = eq_ptr; | |
329 | struct mlx5_core_dev *dev = eq->dev; | |
330 | ||
331 | mlx5_eq_int(dev, eq); | |
332 | ||
333 | /* MSI-X vectors always belong to us */ | |
334 | return IRQ_HANDLED; | |
335 | } | |
336 | ||
337 | static void init_eq_buf(struct mlx5_eq *eq) | |
338 | { | |
339 | struct mlx5_eqe *eqe; | |
340 | int i; | |
341 | ||
342 | for (i = 0; i < eq->nent; i++) { | |
343 | eqe = get_eqe(eq, i); | |
344 | eqe->owner = MLX5_EQE_OWNER_INIT_VAL; | |
345 | } | |
346 | } | |
347 | ||
348 | int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | |
349 | int nent, u64 mask, const char *name, struct mlx5_uar *uar) | |
350 | { | |
db058a18 | 351 | struct mlx5_priv *priv = &dev->priv; |
e126ba97 EC |
352 | struct mlx5_create_eq_mbox_in *in; |
353 | struct mlx5_create_eq_mbox_out out; | |
354 | int err; | |
355 | int inlen; | |
356 | ||
357 | eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); | |
a31208b1 | 358 | eq->cons_index = 0; |
64ffaa21 | 359 | err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); |
e126ba97 EC |
360 | if (err) |
361 | return err; | |
362 | ||
363 | init_eq_buf(eq); | |
364 | ||
365 | inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; | |
366 | in = mlx5_vzalloc(inlen); | |
367 | if (!in) { | |
368 | err = -ENOMEM; | |
369 | goto err_buf; | |
370 | } | |
371 | memset(&out, 0, sizeof(out)); | |
372 | ||
373 | mlx5_fill_page_array(&eq->buf, in->pas); | |
374 | ||
375 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); | |
376 | in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); | |
377 | in->ctx.intr = vecidx; | |
1b77d2bd | 378 | in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
e126ba97 EC |
379 | in->events_mask = cpu_to_be64(mask); |
380 | ||
381 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | |
382 | if (err) | |
383 | goto err_in; | |
384 | ||
385 | if (out.hdr.status) { | |
386 | err = mlx5_cmd_status_to_err(&out.hdr); | |
387 | goto err_in; | |
388 | } | |
389 | ||
db058a18 | 390 | snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", |
ada9f5d0 | 391 | name, pci_name(dev->pdev)); |
db058a18 | 392 | |
e126ba97 | 393 | eq->eqn = out.eq_number; |
61d0e73e | 394 | eq->irqn = priv->msix_arr[vecidx].vector; |
a158906d EC |
395 | eq->dev = dev; |
396 | eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; | |
61d0e73e | 397 | err = request_irq(eq->irqn, mlx5_msix_handler, 0, |
db058a18 | 398 | priv->irq_info[vecidx].name, eq); |
e126ba97 EC |
399 | if (err) |
400 | goto err_eq; | |
401 | ||
e126ba97 EC |
402 | err = mlx5_debug_eq_add(dev, eq); |
403 | if (err) | |
404 | goto err_irq; | |
405 | ||
406 | /* EQs are created in ARMED state | |
407 | */ | |
408 | eq_update_ci(eq, 1); | |
409 | ||
479163f4 | 410 | kvfree(in); |
e126ba97 EC |
411 | return 0; |
412 | ||
413 | err_irq: | |
db058a18 | 414 | free_irq(priv->msix_arr[vecidx].vector, eq); |
e126ba97 EC |
415 | |
416 | err_eq: | |
417 | mlx5_cmd_destroy_eq(dev, eq->eqn); | |
418 | ||
419 | err_in: | |
479163f4 | 420 | kvfree(in); |
e126ba97 EC |
421 | |
422 | err_buf: | |
423 | mlx5_buf_free(dev, &eq->buf); | |
424 | return err; | |
425 | } | |
426 | EXPORT_SYMBOL_GPL(mlx5_create_map_eq); | |
427 | ||
428 | int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |
429 | { | |
e126ba97 EC |
430 | int err; |
431 | ||
432 | mlx5_debug_eq_remove(dev, eq); | |
61d0e73e | 433 | free_irq(eq->irqn, eq); |
e126ba97 EC |
434 | err = mlx5_cmd_destroy_eq(dev, eq->eqn); |
435 | if (err) | |
436 | mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", | |
437 | eq->eqn); | |
61d0e73e | 438 | synchronize_irq(eq->irqn); |
e126ba97 EC |
439 | mlx5_buf_free(dev, &eq->buf); |
440 | ||
441 | return err; | |
442 | } | |
443 | EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); | |
444 | ||
445 | int mlx5_eq_init(struct mlx5_core_dev *dev) | |
446 | { | |
447 | int err; | |
448 | ||
449 | spin_lock_init(&dev->priv.eq_table.lock); | |
450 | ||
451 | err = mlx5_eq_debugfs_init(dev); | |
452 | ||
453 | return err; | |
454 | } | |
455 | ||
456 | ||
457 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev) | |
458 | { | |
459 | mlx5_eq_debugfs_cleanup(dev); | |
460 | } | |
461 | ||
462 | int mlx5_start_eqs(struct mlx5_core_dev *dev) | |
463 | { | |
464 | struct mlx5_eq_table *table = &dev->priv.eq_table; | |
e420f0c0 | 465 | u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; |
e126ba97 EC |
466 | int err; |
467 | ||
938fe83c | 468 | if (MLX5_CAP_GEN(dev, pg)) |
e420f0c0 HE |
469 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); |
470 | ||
073bb189 SM |
471 | if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && |
472 | MLX5_CAP_GEN(dev, vport_group_manager) && | |
473 | mlx5_core_is_pf(dev)) | |
474 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); | |
475 | ||
e126ba97 EC |
476 | err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, |
477 | MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, | |
478 | "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); | |
479 | if (err) { | |
480 | mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); | |
481 | return err; | |
482 | } | |
483 | ||
484 | mlx5_cmd_use_events(dev); | |
485 | ||
486 | err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, | |
e420f0c0 | 487 | MLX5_NUM_ASYNC_EQE, async_event_mask, |
e126ba97 EC |
488 | "mlx5_async_eq", &dev->priv.uuari.uars[0]); |
489 | if (err) { | |
490 | mlx5_core_warn(dev, "failed to create async EQ %d\n", err); | |
491 | goto err1; | |
492 | } | |
493 | ||
494 | err = mlx5_create_map_eq(dev, &table->pages_eq, | |
495 | MLX5_EQ_VEC_PAGES, | |
938fe83c | 496 | /* TODO: sriov max_vf + */ 1, |
e126ba97 EC |
497 | 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", |
498 | &dev->priv.uuari.uars[0]); | |
499 | if (err) { | |
500 | mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); | |
501 | goto err2; | |
502 | } | |
503 | ||
504 | return err; | |
505 | ||
506 | err2: | |
507 | mlx5_destroy_unmap_eq(dev, &table->async_eq); | |
508 | ||
509 | err1: | |
510 | mlx5_cmd_use_polling(dev); | |
511 | mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | |
512 | return err; | |
513 | } | |
514 | ||
515 | int mlx5_stop_eqs(struct mlx5_core_dev *dev) | |
516 | { | |
517 | struct mlx5_eq_table *table = &dev->priv.eq_table; | |
518 | int err; | |
519 | ||
520 | err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); | |
521 | if (err) | |
522 | return err; | |
523 | ||
524 | mlx5_destroy_unmap_eq(dev, &table->async_eq); | |
525 | mlx5_cmd_use_polling(dev); | |
526 | ||
527 | err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); | |
528 | if (err) | |
529 | mlx5_cmd_use_events(dev); | |
530 | ||
531 | return err; | |
532 | } | |
533 | ||
534 | int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, | |
535 | struct mlx5_query_eq_mbox_out *out, int outlen) | |
536 | { | |
537 | struct mlx5_query_eq_mbox_in in; | |
538 | int err; | |
539 | ||
540 | memset(&in, 0, sizeof(in)); | |
541 | memset(out, 0, outlen); | |
542 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); | |
543 | in.eqn = eq->eqn; | |
544 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | |
545 | if (err) | |
546 | return err; | |
547 | ||
548 | if (out->hdr.status) | |
549 | err = mlx5_cmd_status_to_err(&out->hdr); | |
550 | ||
551 | return err; | |
552 | } | |
553 | EXPORT_SYMBOL_GPL(mlx5_core_eq_query); |