Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | * | |
33 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ | |
34 | */ | |
35 | ||
36 | #include <linux/init.h> | |
37 | #include <linux/errno.h> | |
38 | #include <linux/interrupt.h> | |
39 | #include <linux/pci.h> | |
40 | ||
41 | #include "mthca_dev.h" | |
42 | #include "mthca_cmd.h" | |
43 | #include "mthca_config_reg.h" | |
44 | ||
45 | enum { | |
46 | MTHCA_NUM_ASYNC_EQE = 0x80, | |
47 | MTHCA_NUM_CMD_EQE = 0x80, | |
92898522 | 48 | MTHCA_NUM_SPARE_EQE = 0x80, |
1da177e4 LT |
49 | MTHCA_EQ_ENTRY_SIZE = 0x20 |
50 | }; | |
51 | ||
52 | /* | |
53 | * Must be packed because start is 64 bits but only aligned to 32 bits. | |
54 | */ | |
55 | struct mthca_eq_context { | |
97f52eb4 SH |
56 | __be32 flags; |
57 | __be64 start; | |
58 | __be32 logsize_usrpage; | |
59 | __be32 tavor_pd; /* reserved for Arbel */ | |
60 | u8 reserved1[3]; | |
61 | u8 intr; | |
62 | __be32 arbel_pd; /* lost_count for Tavor */ | |
63 | __be32 lkey; | |
64 | u32 reserved2[2]; | |
65 | __be32 consumer_index; | |
66 | __be32 producer_index; | |
67 | u32 reserved3[4]; | |
1da177e4 LT |
68 | } __attribute__((packed)); |
69 | ||
70 | #define MTHCA_EQ_STATUS_OK ( 0 << 28) | |
71 | #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) | |
72 | #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) | |
73 | #define MTHCA_EQ_OWNER_SW ( 0 << 24) | |
74 | #define MTHCA_EQ_OWNER_HW ( 1 << 24) | |
75 | #define MTHCA_EQ_FLAG_TR ( 1 << 18) | |
76 | #define MTHCA_EQ_FLAG_OI ( 1 << 17) | |
77 | #define MTHCA_EQ_STATE_ARMED ( 1 << 8) | |
78 | #define MTHCA_EQ_STATE_FIRED ( 2 << 8) | |
79 | #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) | |
80 | #define MTHCA_EQ_STATE_ARBEL ( 8 << 8) | |
81 | ||
82 | enum { | |
83 | MTHCA_EVENT_TYPE_COMP = 0x00, | |
84 | MTHCA_EVENT_TYPE_PATH_MIG = 0x01, | |
85 | MTHCA_EVENT_TYPE_COMM_EST = 0x02, | |
86 | MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, | |
90f104da RD |
87 | MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, |
88 | MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14, | |
1da177e4 LT |
89 | MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, |
90 | MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, | |
91 | MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, | |
92 | MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, | |
93 | MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, | |
94 | MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, | |
95 | MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, | |
96 | MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, | |
97 | MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, | |
98 | MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, | |
99 | MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, | |
100 | MTHCA_EVENT_TYPE_CMD = 0x0a | |
101 | }; | |
102 | ||
103 | #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ | |
104 | (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ | |
105 | (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ | |
106 | (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ | |
107 | (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
108 | (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ | |
109 | (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
110 | (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
111 | (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
112 | (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ | |
113 | (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ | |
114 | (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) | |
90f104da RD |
115 | #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
116 | (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | |
117 | (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT)) | |
1da177e4 LT |
118 | #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) |
119 | ||
120 | #define MTHCA_EQ_DB_INC_CI (1 << 24) | |
121 | #define MTHCA_EQ_DB_REQ_NOT (2 << 24) | |
122 | #define MTHCA_EQ_DB_DISARM_CQ (3 << 24) | |
123 | #define MTHCA_EQ_DB_SET_CI (4 << 24) | |
124 | #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) | |
125 | ||
126 | struct mthca_eqe { | |
127 | u8 reserved1; | |
128 | u8 type; | |
129 | u8 reserved2; | |
130 | u8 subtype; | |
131 | union { | |
132 | u32 raw[6]; | |
133 | struct { | |
97f52eb4 | 134 | __be32 cqn; |
1da177e4 LT |
135 | } __attribute__((packed)) comp; |
136 | struct { | |
97f52eb4 SH |
137 | u16 reserved1; |
138 | __be16 token; | |
139 | u32 reserved2; | |
140 | u8 reserved3[3]; | |
141 | u8 status; | |
142 | __be64 out_param; | |
1da177e4 LT |
143 | } __attribute__((packed)) cmd; |
144 | struct { | |
97f52eb4 | 145 | __be32 qpn; |
1da177e4 | 146 | } __attribute__((packed)) qp; |
90f104da RD |
147 | struct { |
148 | __be32 srqn; | |
149 | } __attribute__((packed)) srq; | |
1da177e4 | 150 | struct { |
97f52eb4 SH |
151 | __be32 cqn; |
152 | u32 reserved1; | |
153 | u8 reserved2[3]; | |
154 | u8 syndrome; | |
1da177e4 LT |
155 | } __attribute__((packed)) cq_err; |
156 | struct { | |
97f52eb4 SH |
157 | u32 reserved1[2]; |
158 | __be32 port; | |
1da177e4 LT |
159 | } __attribute__((packed)) port_change; |
160 | } event; | |
161 | u8 reserved3[3]; | |
162 | u8 owner; | |
163 | } __attribute__((packed)); | |
164 | ||
165 | #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) | |
166 | #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) | |
167 | ||
168 | static inline u64 async_mask(struct mthca_dev *dev) | |
169 | { | |
170 | return dev->mthca_flags & MTHCA_FLAG_SRQ ? | |
171 | MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : | |
172 | MTHCA_ASYNC_EVENT_MASK; | |
173 | } | |
174 | ||
175 | static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
176 | { | |
97f52eb4 | 177 | __be32 doorbell[2]; |
1da177e4 LT |
178 | |
179 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); | |
180 | doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); | |
181 | ||
182 | /* | |
183 | * This barrier makes sure that all updates to ownership bits | |
184 | * done by set_eqe_hw() hit memory before the consumer index | |
185 | * is updated. set_eq_ci() allows the HCA to possibly write | |
186 | * more EQ entries, and we want to avoid the exceedingly | |
187 | * unlikely possibility of the HCA writing an entry and then | |
188 | * having set_eqe_hw() overwrite the owner field. | |
189 | */ | |
190 | wmb(); | |
191 | mthca_write64(doorbell, | |
192 | dev->kar + MTHCA_EQ_DOORBELL, | |
193 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
194 | } | |
195 | ||
196 | static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
197 | { | |
198 | /* See comment in tavor_set_eq_ci() above. */ | |
199 | wmb(); | |
97f52eb4 SH |
200 | __raw_writel((__force u32) cpu_to_be32(ci), |
201 | dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); | |
1da177e4 LT |
202 | /* We still want ordering, just not swabbing, so add a barrier */ |
203 | mb(); | |
204 | } | |
205 | ||
206 | static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) | |
207 | { | |
d10ddbf6 | 208 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
209 | arbel_set_eq_ci(dev, eq, ci); |
210 | else | |
211 | tavor_set_eq_ci(dev, eq, ci); | |
212 | } | |
213 | ||
214 | static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) | |
215 | { | |
97f52eb4 | 216 | __be32 doorbell[2]; |
1da177e4 LT |
217 | |
218 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); | |
219 | doorbell[1] = 0; | |
220 | ||
221 | mthca_write64(doorbell, | |
222 | dev->kar + MTHCA_EQ_DOORBELL, | |
223 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
224 | } | |
225 | ||
226 | static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) | |
227 | { | |
228 | writel(eqn_mask, dev->eq_regs.arbel.eq_arm); | |
229 | } | |
230 | ||
231 | static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) | |
232 | { | |
d10ddbf6 | 233 | if (!mthca_is_memfree(dev)) { |
97f52eb4 | 234 | __be32 doorbell[2]; |
1da177e4 LT |
235 | |
236 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); | |
237 | doorbell[1] = cpu_to_be32(cqn); | |
238 | ||
239 | mthca_write64(doorbell, | |
240 | dev->kar + MTHCA_EQ_DOORBELL, | |
241 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
242 | } | |
243 | } | |
244 | ||
245 | static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) | |
246 | { | |
247 | unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; | |
248 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; | |
249 | } | |
250 | ||
251 | static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) | |
252 | { | |
253 | struct mthca_eqe* eqe; | |
254 | eqe = get_eqe(eq, eq->cons_index); | |
255 | return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; | |
256 | } | |
257 | ||
258 | static inline void set_eqe_hw(struct mthca_eqe *eqe) | |
259 | { | |
260 | eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; | |
261 | } | |
262 | ||
263 | static void port_change(struct mthca_dev *dev, int port, int active) | |
264 | { | |
265 | struct ib_event record; | |
266 | ||
267 | mthca_dbg(dev, "Port change to %s for port %d\n", | |
268 | active ? "active" : "down", port); | |
269 | ||
270 | record.device = &dev->ib_dev; | |
271 | record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | |
272 | record.element.port_num = port; | |
273 | ||
274 | ib_dispatch_event(&record); | |
275 | } | |
276 | ||
277 | static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) | |
278 | { | |
279 | struct mthca_eqe *eqe; | |
280 | int disarm_cqn; | |
92898522 MT |
281 | int eqes_found = 0; |
282 | int set_ci = 0; | |
1da177e4 LT |
283 | |
284 | while ((eqe = next_eqe_sw(eq))) { | |
1da177e4 LT |
285 | /* |
286 | * Make sure we read EQ entry contents after we've | |
287 | * checked the ownership bit. | |
288 | */ | |
289 | rmb(); | |
290 | ||
291 | switch (eqe->type) { | |
292 | case MTHCA_EVENT_TYPE_COMP: | |
293 | disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | |
294 | disarm_cq(dev, eq->eqn, disarm_cqn); | |
affcd505 | 295 | mthca_cq_completion(dev, disarm_cqn); |
1da177e4 LT |
296 | break; |
297 | ||
298 | case MTHCA_EVENT_TYPE_PATH_MIG: | |
299 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
300 | IB_EVENT_PATH_MIG); | |
301 | break; | |
302 | ||
303 | case MTHCA_EVENT_TYPE_COMM_EST: | |
304 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
305 | IB_EVENT_COMM_EST); | |
306 | break; | |
307 | ||
308 | case MTHCA_EVENT_TYPE_SQ_DRAINED: | |
309 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
310 | IB_EVENT_SQ_DRAINED); | |
311 | break; | |
312 | ||
90f104da RD |
313 | case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: |
314 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
315 | IB_EVENT_QP_LAST_WQE_REACHED); | |
316 | break; | |
317 | ||
318 | case MTHCA_EVENT_TYPE_SRQ_LIMIT: | |
319 | mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | |
320 | IB_EVENT_SRQ_LIMIT_REACHED); | |
321 | break; | |
322 | ||
1da177e4 LT |
323 | case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: |
324 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
325 | IB_EVENT_QP_FATAL); | |
326 | break; | |
327 | ||
328 | case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: | |
329 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
330 | IB_EVENT_PATH_MIG_ERR); | |
331 | break; | |
332 | ||
333 | case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
334 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
335 | IB_EVENT_QP_REQ_ERR); | |
336 | break; | |
337 | ||
338 | case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: | |
339 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
340 | IB_EVENT_QP_ACCESS_ERR); | |
341 | break; | |
342 | ||
343 | case MTHCA_EVENT_TYPE_CMD: | |
344 | mthca_cmd_event(dev, | |
345 | be16_to_cpu(eqe->event.cmd.token), | |
346 | eqe->event.cmd.status, | |
347 | be64_to_cpu(eqe->event.cmd.out_param)); | |
1da177e4 LT |
348 | break; |
349 | ||
350 | case MTHCA_EVENT_TYPE_PORT_CHANGE: | |
351 | port_change(dev, | |
352 | (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, | |
353 | eqe->subtype == 0x4); | |
354 | break; | |
355 | ||
356 | case MTHCA_EVENT_TYPE_CQ_ERROR: | |
b87dcfba | 357 | mthca_warn(dev, "CQ %s on CQN %06x\n", |
1da177e4 LT |
358 | eqe->event.cq_err.syndrome == 1 ? |
359 | "overrun" : "access violation", | |
b87dcfba | 360 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); |
affcd505 MT |
361 | mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), |
362 | IB_EVENT_CQ_ERR); | |
1da177e4 LT |
363 | break; |
364 | ||
365 | case MTHCA_EVENT_TYPE_EQ_OVERFLOW: | |
366 | mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | |
367 | break; | |
368 | ||
369 | case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: | |
370 | case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: | |
371 | case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: | |
372 | case MTHCA_EVENT_TYPE_ECC_DETECT: | |
373 | default: | |
374 | mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", | |
375 | eqe->type, eqe->subtype, eq->eqn); | |
376 | break; | |
377 | }; | |
378 | ||
379 | set_eqe_hw(eqe); | |
380 | ++eq->cons_index; | |
381 | eqes_found = 1; | |
92898522 | 382 | ++set_ci; |
1da177e4 | 383 | |
92898522 MT |
384 | /* |
385 | * The HCA will think the queue has overflowed if we | |
386 | * don't tell it we've been processing events. We | |
387 | * create our EQs with MTHCA_NUM_SPARE_EQE extra | |
388 | * entries, so we must update our consumer index at | |
389 | * least that often. | |
390 | */ | |
391 | if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { | |
1da177e4 LT |
392 | /* |
393 | * Conditional on hca_type is OK here because | |
394 | * this is a rare case, not the fast path. | |
395 | */ | |
396 | set_eq_ci(dev, eq, eq->cons_index); | |
397 | set_ci = 0; | |
398 | } | |
399 | } | |
400 | ||
401 | /* | |
402 | * Rely on caller to set consumer index so that we don't have | |
403 | * to test hca_type in our interrupt handling fast path. | |
404 | */ | |
405 | return eqes_found; | |
406 | } | |
407 | ||
408 | static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) | |
409 | { | |
410 | struct mthca_dev *dev = dev_ptr; | |
411 | u32 ecr; | |
412 | int i; | |
413 | ||
414 | if (dev->eq_table.clr_mask) | |
415 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); | |
416 | ||
417 | ecr = readl(dev->eq_regs.tavor.ecr_base + 4); | |
c8e0ca68 RD |
418 | if (!ecr) |
419 | return IRQ_NONE; | |
1da177e4 | 420 | |
c8e0ca68 RD |
421 | writel(ecr, dev->eq_regs.tavor.ecr_base + |
422 | MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); | |
423 | ||
424 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
425 | if (ecr & dev->eq_table.eq[i].eqn_mask) { | |
426 | if (mthca_eq_int(dev, &dev->eq_table.eq[i])) | |
1da177e4 LT |
427 | tavor_set_eq_ci(dev, &dev->eq_table.eq[i], |
428 | dev->eq_table.eq[i].cons_index); | |
c8e0ca68 RD |
429 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); |
430 | } | |
1da177e4 | 431 | |
c8e0ca68 | 432 | return IRQ_HANDLED; |
1da177e4 LT |
433 | } |
434 | ||
435 | static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, | |
436 | struct pt_regs *regs) | |
437 | { | |
438 | struct mthca_eq *eq = eq_ptr; | |
439 | struct mthca_dev *dev = eq->dev; | |
440 | ||
441 | mthca_eq_int(dev, eq); | |
442 | tavor_set_eq_ci(dev, eq, eq->cons_index); | |
443 | tavor_eq_req_not(dev, eq->eqn); | |
444 | ||
445 | /* MSI-X vectors always belong to us */ | |
446 | return IRQ_HANDLED; | |
447 | } | |
448 | ||
449 | static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) | |
450 | { | |
451 | struct mthca_dev *dev = dev_ptr; | |
452 | int work = 0; | |
453 | int i; | |
454 | ||
455 | if (dev->eq_table.clr_mask) | |
456 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); | |
457 | ||
458 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
459 | if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { | |
460 | work = 1; | |
461 | arbel_set_eq_ci(dev, &dev->eq_table.eq[i], | |
462 | dev->eq_table.eq[i].cons_index); | |
463 | } | |
464 | ||
465 | arbel_eq_req_not(dev, dev->eq_table.arm_mask); | |
466 | ||
467 | return IRQ_RETVAL(work); | |
468 | } | |
469 | ||
470 | static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr, | |
471 | struct pt_regs *regs) | |
472 | { | |
473 | struct mthca_eq *eq = eq_ptr; | |
474 | struct mthca_dev *dev = eq->dev; | |
475 | ||
476 | mthca_eq_int(dev, eq); | |
477 | arbel_set_eq_ci(dev, eq, eq->cons_index); | |
478 | arbel_eq_req_not(dev, eq->eqn_mask); | |
479 | ||
480 | /* MSI-X vectors always belong to us */ | |
481 | return IRQ_HANDLED; | |
482 | } | |
483 | ||
484 | static int __devinit mthca_create_eq(struct mthca_dev *dev, | |
485 | int nent, | |
486 | u8 intr, | |
487 | struct mthca_eq *eq) | |
488 | { | |
46620056 | 489 | int npages; |
1da177e4 LT |
490 | u64 *dma_list = NULL; |
491 | dma_addr_t t; | |
ed878458 | 492 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
493 | struct mthca_eq_context *eq_context; |
494 | int err = -ENOMEM; | |
495 | int i; | |
496 | u8 status; | |
497 | ||
c915033f RD |
498 | eq->dev = dev; |
499 | eq->nent = roundup_pow_of_two(max(nent, 2)); | |
2fa5e2eb | 500 | npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; |
1da177e4 LT |
501 | |
502 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | |
503 | GFP_KERNEL); | |
504 | if (!eq->page_list) | |
505 | goto err_out; | |
506 | ||
507 | for (i = 0; i < npages; ++i) | |
508 | eq->page_list[i].buf = NULL; | |
509 | ||
510 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | |
511 | if (!dma_list) | |
512 | goto err_out_free; | |
513 | ||
ed878458 RD |
514 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
515 | if (IS_ERR(mailbox)) | |
1da177e4 | 516 | goto err_out_free; |
ed878458 | 517 | eq_context = mailbox->buf; |
1da177e4 LT |
518 | |
519 | for (i = 0; i < npages; ++i) { | |
64dc81fc RD |
520 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, |
521 | PAGE_SIZE, &t, GFP_KERNEL); | |
1da177e4 | 522 | if (!eq->page_list[i].buf) |
ed878458 | 523 | goto err_out_free_pages; |
1da177e4 LT |
524 | |
525 | dma_list[i] = t; | |
526 | pci_unmap_addr_set(&eq->page_list[i], mapping, t); | |
527 | ||
528 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | |
529 | } | |
530 | ||
c915033f | 531 | for (i = 0; i < eq->nent; ++i) |
1da177e4 LT |
532 | set_eqe_hw(get_eqe(eq, i)); |
533 | ||
534 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); | |
535 | if (eq->eqn == -1) | |
ed878458 | 536 | goto err_out_free_pages; |
1da177e4 LT |
537 | |
538 | err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, | |
539 | dma_list, PAGE_SHIFT, npages, | |
540 | 0, npages * PAGE_SIZE, | |
541 | MTHCA_MPT_FLAG_LOCAL_WRITE | | |
542 | MTHCA_MPT_FLAG_LOCAL_READ, | |
543 | &eq->mr); | |
544 | if (err) | |
545 | goto err_out_free_eq; | |
546 | ||
1da177e4 LT |
547 | memset(eq_context, 0, sizeof *eq_context); |
548 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | | |
549 | MTHCA_EQ_OWNER_HW | | |
550 | MTHCA_EQ_STATE_ARMED | | |
551 | MTHCA_EQ_FLAG_TR); | |
d10ddbf6 | 552 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
553 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); |
554 | ||
c915033f | 555 | eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); |
d10ddbf6 | 556 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
557 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); |
558 | } else { | |
559 | eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); | |
560 | eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); | |
561 | } | |
562 | eq_context->intr = intr; | |
563 | eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); | |
564 | ||
ed878458 | 565 | err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); |
1da177e4 LT |
566 | if (err) { |
567 | mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); | |
568 | goto err_out_free_mr; | |
569 | } | |
570 | if (status) { | |
571 | mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", | |
572 | status); | |
573 | err = -EINVAL; | |
574 | goto err_out_free_mr; | |
575 | } | |
576 | ||
577 | kfree(dma_list); | |
ed878458 | 578 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
579 | |
580 | eq->eqn_mask = swab32(1 << eq->eqn); | |
581 | eq->cons_index = 0; | |
582 | ||
583 | dev->eq_table.arm_mask |= eq->eqn_mask; | |
584 | ||
585 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", | |
c915033f | 586 | eq->eqn, eq->nent); |
1da177e4 LT |
587 | |
588 | return err; | |
589 | ||
590 | err_out_free_mr: | |
591 | mthca_free_mr(dev, &eq->mr); | |
592 | ||
593 | err_out_free_eq: | |
594 | mthca_free(&dev->eq_table.alloc, eq->eqn); | |
595 | ||
ed878458 | 596 | err_out_free_pages: |
1da177e4 LT |
597 | for (i = 0; i < npages; ++i) |
598 | if (eq->page_list[i].buf) | |
64dc81fc RD |
599 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
600 | eq->page_list[i].buf, | |
601 | pci_unmap_addr(&eq->page_list[i], | |
602 | mapping)); | |
1da177e4 | 603 | |
ed878458 RD |
604 | mthca_free_mailbox(dev, mailbox); |
605 | ||
606 | err_out_free: | |
1da177e4 LT |
607 | kfree(eq->page_list); |
608 | kfree(dma_list); | |
1da177e4 LT |
609 | |
610 | err_out: | |
611 | return err; | |
612 | } | |
613 | ||
614 | static void mthca_free_eq(struct mthca_dev *dev, | |
615 | struct mthca_eq *eq) | |
616 | { | |
ed878458 | 617 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
618 | int err; |
619 | u8 status; | |
620 | int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / | |
621 | PAGE_SIZE; | |
622 | int i; | |
623 | ||
ed878458 RD |
624 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
625 | if (IS_ERR(mailbox)) | |
1da177e4 LT |
626 | return; |
627 | ||
ed878458 | 628 | err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); |
1da177e4 LT |
629 | if (err) |
630 | mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); | |
631 | if (status) | |
177214af | 632 | mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); |
1da177e4 LT |
633 | |
634 | dev->eq_table.arm_mask &= ~eq->eqn_mask; | |
635 | ||
636 | if (0) { | |
637 | mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); | |
638 | for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { | |
639 | if (i % 4 == 0) | |
640 | printk("[%02x] ", i * 4); | |
ed878458 | 641 | printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); |
1da177e4 LT |
642 | if ((i + 1) % 4 == 0) |
643 | printk("\n"); | |
644 | } | |
645 | } | |
646 | ||
647 | mthca_free_mr(dev, &eq->mr); | |
648 | for (i = 0; i < npages; ++i) | |
649 | pci_free_consistent(dev->pdev, PAGE_SIZE, | |
650 | eq->page_list[i].buf, | |
651 | pci_unmap_addr(&eq->page_list[i], mapping)); | |
652 | ||
653 | kfree(eq->page_list); | |
ed878458 | 654 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
655 | } |
656 | ||
657 | static void mthca_free_irqs(struct mthca_dev *dev) | |
658 | { | |
659 | int i; | |
660 | ||
661 | if (dev->eq_table.have_irq) | |
662 | free_irq(dev->pdev->irq, dev); | |
663 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
664 | if (dev->eq_table.eq[i].have_irq) | |
665 | free_irq(dev->eq_table.eq[i].msi_x_vector, | |
666 | dev->eq_table.eq + i); | |
667 | } | |
668 | ||
669 | static int __devinit mthca_map_reg(struct mthca_dev *dev, | |
670 | unsigned long offset, unsigned long size, | |
671 | void __iomem **map) | |
672 | { | |
673 | unsigned long base = pci_resource_start(dev->pdev, 0); | |
674 | ||
675 | if (!request_mem_region(base + offset, size, DRV_NAME)) | |
676 | return -EBUSY; | |
677 | ||
678 | *map = ioremap(base + offset, size); | |
679 | if (!*map) { | |
680 | release_mem_region(base + offset, size); | |
681 | return -ENOMEM; | |
682 | } | |
683 | ||
684 | return 0; | |
685 | } | |
686 | ||
687 | static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, | |
688 | unsigned long size, void __iomem *map) | |
689 | { | |
690 | unsigned long base = pci_resource_start(dev->pdev, 0); | |
691 | ||
692 | release_mem_region(base + offset, size); | |
693 | iounmap(map); | |
694 | } | |
695 | ||
696 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) | |
697 | { | |
698 | unsigned long mthca_base; | |
699 | ||
700 | mthca_base = pci_resource_start(dev->pdev, 0); | |
701 | ||
d10ddbf6 | 702 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
703 | /* |
704 | * We assume that the EQ arm and EQ set CI registers | |
705 | * fall within the first BAR. We can't trust the | |
706 | * values firmware gives us, since those addresses are | |
707 | * valid on the HCA's side of the PCI bus but not | |
708 | * necessarily the host side. | |
709 | */ | |
710 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
711 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
712 | &dev->clr_base)) { | |
713 | mthca_err(dev, "Couldn't map interrupt clear register, " | |
714 | "aborting.\n"); | |
715 | return -ENOMEM; | |
716 | } | |
717 | ||
718 | /* | |
719 | * Add 4 because we limit ourselves to EQs 0 ... 31, | |
720 | * so we only need the low word of the register. | |
721 | */ | |
722 | if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & | |
723 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
724 | &dev->eq_regs.arbel.eq_arm)) { | |
177214af | 725 | mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); |
1da177e4 LT |
726 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
727 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
728 | dev->clr_base); | |
729 | return -ENOMEM; | |
730 | } | |
731 | ||
732 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
733 | dev->fw.arbel.eq_set_ci_base, | |
734 | MTHCA_EQ_SET_CI_SIZE, | |
735 | &dev->eq_regs.arbel.eq_set_ci_base)) { | |
177214af | 736 | mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); |
1da177e4 LT |
737 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & |
738 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
739 | dev->eq_regs.arbel.eq_arm); | |
740 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
741 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
742 | dev->clr_base); | |
743 | return -ENOMEM; | |
744 | } | |
745 | } else { | |
746 | if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
747 | &dev->clr_base)) { | |
748 | mthca_err(dev, "Couldn't map interrupt clear register, " | |
749 | "aborting.\n"); | |
750 | return -ENOMEM; | |
751 | } | |
752 | ||
753 | if (mthca_map_reg(dev, MTHCA_ECR_BASE, | |
754 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, | |
755 | &dev->eq_regs.tavor.ecr_base)) { | |
756 | mthca_err(dev, "Couldn't map ecr register, " | |
757 | "aborting.\n"); | |
758 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
759 | dev->clr_base); | |
760 | return -ENOMEM; | |
761 | } | |
762 | } | |
763 | ||
764 | return 0; | |
765 | ||
766 | } | |
767 | ||
e1f7868c | 768 | static void mthca_unmap_eq_regs(struct mthca_dev *dev) |
1da177e4 | 769 | { |
d10ddbf6 | 770 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
771 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
772 | dev->fw.arbel.eq_set_ci_base, | |
773 | MTHCA_EQ_SET_CI_SIZE, | |
774 | dev->eq_regs.arbel.eq_set_ci_base); | |
775 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & | |
776 | dev->fw.arbel.eq_arm_base) + 4, 4, | |
777 | dev->eq_regs.arbel.eq_arm); | |
778 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & | |
779 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, | |
780 | dev->clr_base); | |
781 | } else { | |
782 | mthca_unmap_reg(dev, MTHCA_ECR_BASE, | |
783 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, | |
784 | dev->eq_regs.tavor.ecr_base); | |
785 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, | |
786 | dev->clr_base); | |
787 | } | |
788 | } | |
789 | ||
790 | int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) | |
791 | { | |
792 | int ret; | |
793 | u8 status; | |
794 | ||
795 | /* | |
796 | * We assume that mapping one page is enough for the whole EQ | |
797 | * context table. This is fine with all current HCAs, because | |
798 | * we only use 32 EQs and each EQ uses 32 bytes of context | |
799 | * memory, or 1 KB total. | |
800 | */ | |
801 | dev->eq_table.icm_virt = icm_virt; | |
802 | dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | |
803 | if (!dev->eq_table.icm_page) | |
804 | return -ENOMEM; | |
805 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, | |
806 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
807 | if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { | |
808 | __free_page(dev->eq_table.icm_page); | |
809 | return -ENOMEM; | |
810 | } | |
811 | ||
812 | ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); | |
813 | if (!ret && status) | |
814 | ret = -EINVAL; | |
815 | if (ret) { | |
816 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, | |
817 | PCI_DMA_BIDIRECTIONAL); | |
818 | __free_page(dev->eq_table.icm_page); | |
819 | } | |
820 | ||
821 | return ret; | |
822 | } | |
823 | ||
e1f7868c | 824 | void mthca_unmap_eq_icm(struct mthca_dev *dev) |
1da177e4 LT |
825 | { |
826 | u8 status; | |
827 | ||
8d3ef29d | 828 | mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status); |
1da177e4 LT |
829 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, |
830 | PCI_DMA_BIDIRECTIONAL); | |
831 | __free_page(dev->eq_table.icm_page); | |
832 | } | |
833 | ||
834 | int __devinit mthca_init_eq_table(struct mthca_dev *dev) | |
835 | { | |
836 | int err; | |
837 | u8 status; | |
838 | u8 intr; | |
839 | int i; | |
840 | ||
841 | err = mthca_alloc_init(&dev->eq_table.alloc, | |
842 | dev->limits.num_eqs, | |
843 | dev->limits.num_eqs - 1, | |
844 | dev->limits.reserved_eqs); | |
845 | if (err) | |
846 | return err; | |
847 | ||
848 | err = mthca_map_eq_regs(dev); | |
849 | if (err) | |
850 | goto err_out_free; | |
851 | ||
852 | if (dev->mthca_flags & MTHCA_FLAG_MSI || | |
853 | dev->mthca_flags & MTHCA_FLAG_MSI_X) { | |
854 | dev->eq_table.clr_mask = 0; | |
855 | } else { | |
856 | dev->eq_table.clr_mask = | |
857 | swab32(1 << (dev->eq_table.inta_pin & 31)); | |
858 | dev->eq_table.clr_int = dev->clr_base + | |
f7ed3a59 | 859 | (dev->eq_table.inta_pin < 32 ? 4 : 0); |
1da177e4 LT |
860 | } |
861 | ||
862 | dev->eq_table.arm_mask = 0; | |
863 | ||
864 | intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? | |
865 | 128 : dev->eq_table.inta_pin; | |
866 | ||
92898522 | 867 | err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, |
1da177e4 LT |
868 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, |
869 | &dev->eq_table.eq[MTHCA_EQ_COMP]); | |
870 | if (err) | |
871 | goto err_out_unmap; | |
872 | ||
92898522 | 873 | err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, |
1da177e4 LT |
874 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, |
875 | &dev->eq_table.eq[MTHCA_EQ_ASYNC]); | |
876 | if (err) | |
877 | goto err_out_comp; | |
878 | ||
92898522 | 879 | err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, |
1da177e4 LT |
880 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, |
881 | &dev->eq_table.eq[MTHCA_EQ_CMD]); | |
882 | if (err) | |
883 | goto err_out_async; | |
884 | ||
885 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { | |
886 | static const char *eq_name[] = { | |
887 | [MTHCA_EQ_COMP] = DRV_NAME " (comp)", | |
888 | [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", | |
889 | [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" | |
890 | }; | |
891 | ||
892 | for (i = 0; i < MTHCA_NUM_EQ; ++i) { | |
893 | err = request_irq(dev->eq_table.eq[i].msi_x_vector, | |
d10ddbf6 | 894 | mthca_is_memfree(dev) ? |
1da177e4 LT |
895 | mthca_arbel_msi_x_interrupt : |
896 | mthca_tavor_msi_x_interrupt, | |
897 | 0, eq_name[i], dev->eq_table.eq + i); | |
898 | if (err) | |
899 | goto err_out_cmd; | |
900 | dev->eq_table.eq[i].have_irq = 1; | |
901 | } | |
902 | } else { | |
903 | err = request_irq(dev->pdev->irq, | |
d10ddbf6 | 904 | mthca_is_memfree(dev) ? |
1da177e4 LT |
905 | mthca_arbel_interrupt : |
906 | mthca_tavor_interrupt, | |
907 | SA_SHIRQ, DRV_NAME, dev); | |
908 | if (err) | |
909 | goto err_out_cmd; | |
910 | dev->eq_table.have_irq = 1; | |
911 | } | |
912 | ||
913 | err = mthca_MAP_EQ(dev, async_mask(dev), | |
914 | 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | |
915 | if (err) | |
916 | mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | |
917 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); | |
918 | if (status) | |
919 | mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", | |
920 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); | |
921 | ||
922 | err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | |
923 | 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | |
924 | if (err) | |
925 | mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", | |
926 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); | |
927 | if (status) | |
928 | mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", | |
929 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); | |
930 | ||
6b63e301 | 931 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
d10ddbf6 | 932 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
933 | arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); |
934 | else | |
935 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); | |
936 | ||
937 | return 0; | |
938 | ||
939 | err_out_cmd: | |
940 | mthca_free_irqs(dev); | |
941 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); | |
942 | ||
943 | err_out_async: | |
944 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); | |
945 | ||
946 | err_out_comp: | |
947 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); | |
948 | ||
949 | err_out_unmap: | |
950 | mthca_unmap_eq_regs(dev); | |
951 | ||
952 | err_out_free: | |
953 | mthca_alloc_cleanup(&dev->eq_table.alloc); | |
954 | return err; | |
955 | } | |
956 | ||
e1f7868c | 957 | void mthca_cleanup_eq_table(struct mthca_dev *dev) |
1da177e4 LT |
958 | { |
959 | u8 status; | |
960 | int i; | |
961 | ||
962 | mthca_free_irqs(dev); | |
963 | ||
964 | mthca_MAP_EQ(dev, async_mask(dev), | |
965 | 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); | |
966 | mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, | |
967 | 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); | |
968 | ||
969 | for (i = 0; i < MTHCA_NUM_EQ; ++i) | |
970 | mthca_free_eq(dev, &dev->eq_table.eq[i]); | |
971 | ||
972 | mthca_unmap_eq_regs(dev); | |
973 | ||
974 | mthca_alloc_cleanup(&dev->eq_table.alloc); | |
975 | } |