Commit | Line | Data |
---|---|---|
b038ced7 SW |
1 | /* |
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | |
b038ced7 SW |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <asm/delay.h> | |
33 | ||
34 | #include <linux/mutex.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/pci.h> | |
39 | ||
40 | #include "cxio_resource.h" | |
41 | #include "cxio_hal.h" | |
42 | #include "cxgb3_offload.h" | |
43 | #include "sge_defs.h" | |
44 | ||
45 | static LIST_HEAD(rdev_list); | |
46 | static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; | |
47 | ||
2b540355 | 48 | static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) |
b038ced7 SW |
49 | { |
50 | struct cxio_rdev *rdev; | |
51 | ||
52 | list_for_each_entry(rdev, &rdev_list, entry) | |
53 | if (!strcmp(rdev->dev_name, dev_name)) | |
54 | return rdev; | |
55 | return NULL; | |
56 | } | |
57 | ||
2b540355 | 58 | static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev) |
b038ced7 SW |
59 | { |
60 | struct cxio_rdev *rdev; | |
61 | ||
62 | list_for_each_entry(rdev, &rdev_list, entry) | |
63 | if (rdev->t3cdev_p == tdev) | |
64 | return rdev; | |
65 | return NULL; | |
66 | } | |
67 | ||
68 | int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, | |
69 | enum t3_cq_opcode op, u32 credit) | |
70 | { | |
71 | int ret; | |
72 | struct t3_cqe *cqe; | |
73 | u32 rptr; | |
74 | ||
75 | struct rdma_cq_op setup; | |
76 | setup.id = cq->cqid; | |
77 | setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0; | |
78 | setup.op = op; | |
79 | ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup); | |
80 | ||
81 | if ((ret < 0) || (op == CQ_CREDIT_UPDATE)) | |
82 | return ret; | |
83 | ||
84 | /* | |
85 | * If the rearm returned an index other than our current index, | |
86 | * then there might be CQE's in flight (being DMA'd). We must wait | |
87 | * here for them to complete or the consumer can miss a notification. | |
88 | */ | |
89 | if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { | |
90 | int i=0; | |
91 | ||
92 | rptr = cq->rptr; | |
93 | ||
94 | /* | |
95 | * Keep the generation correct by bumping rptr until it | |
96 | * matches the index returned by the rearm - 1. | |
97 | */ | |
98 | while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) | |
99 | rptr++; | |
100 | ||
101 | /* | |
102 | * Now rptr is the index for the (last) cqe that was | |
103 | * in-flight at the time the HW rearmed the CQ. We | |
104 | * spin until that CQE is valid. | |
105 | */ | |
106 | cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); | |
107 | while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { | |
108 | udelay(1); | |
109 | if (i++ > 1000000) { | |
110 | BUG_ON(1); | |
111 | printk(KERN_ERR "%s: stalled rnic\n", | |
112 | rdev_p->dev_name); | |
113 | return -EIO; | |
114 | } | |
115 | } | |
116 | } | |
117 | return 0; | |
118 | } | |
119 | ||
2b540355 | 120 | static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) |
b038ced7 SW |
121 | { |
122 | struct rdma_cq_setup setup; | |
123 | setup.id = cqid; | |
124 | setup.base_addr = 0; /* NULL address */ | |
125 | setup.size = 0; /* disaable the CQ */ | |
126 | setup.credits = 0; | |
127 | setup.credit_thres = 0; | |
128 | setup.ovfl_mode = 0; | |
129 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
130 | } | |
131 | ||
2b540355 | 132 | static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) |
b038ced7 SW |
133 | { |
134 | u64 sge_cmd; | |
135 | struct t3_modify_qp_wr *wqe; | |
136 | struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); | |
137 | if (!skb) { | |
138 | PDBG("%s alloc_skb failed\n", __FUNCTION__); | |
139 | return -ENOMEM; | |
140 | } | |
141 | wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); | |
142 | memset(wqe, 0, sizeof(*wqe)); | |
143 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 1, qpid, 7); | |
144 | wqe->flags = cpu_to_be32(MODQP_WRITE_EC); | |
145 | sge_cmd = qpid << 8 | 3; | |
146 | wqe->sge_cmd = cpu_to_be64(sge_cmd); | |
147 | skb->priority = CPL_PRIORITY_CONTROL; | |
148 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | |
149 | } | |
150 | ||
151 | int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |
152 | { | |
153 | struct rdma_cq_setup setup; | |
154 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); | |
155 | ||
156 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); | |
157 | if (!cq->cqid) | |
158 | return -ENOMEM; | |
159 | cq->sw_queue = kzalloc(size, GFP_KERNEL); | |
160 | if (!cq->sw_queue) | |
161 | return -ENOMEM; | |
162 | cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), | |
163 | (1UL << (cq->size_log2)) * | |
164 | sizeof(struct t3_cqe), | |
165 | &(cq->dma_addr), GFP_KERNEL); | |
166 | if (!cq->queue) { | |
167 | kfree(cq->sw_queue); | |
168 | return -ENOMEM; | |
169 | } | |
170 | pci_unmap_addr_set(cq, mapping, cq->dma_addr); | |
171 | memset(cq->queue, 0, size); | |
172 | setup.id = cq->cqid; | |
173 | setup.base_addr = (u64) (cq->dma_addr); | |
174 | setup.size = 1UL << cq->size_log2; | |
175 | setup.credits = 65535; | |
176 | setup.credit_thres = 1; | |
177 | if (rdev_p->t3cdev_p->type == T3B) | |
178 | setup.ovfl_mode = 0; | |
179 | else | |
180 | setup.ovfl_mode = 1; | |
181 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
182 | } | |
183 | ||
184 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |
185 | { | |
186 | struct rdma_cq_setup setup; | |
187 | setup.id = cq->cqid; | |
188 | setup.base_addr = (u64) (cq->dma_addr); | |
189 | setup.size = 1UL << cq->size_log2; | |
190 | setup.credits = setup.size; | |
191 | setup.credit_thres = setup.size; /* TBD: overflow recovery */ | |
192 | setup.ovfl_mode = 1; | |
193 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
194 | } | |
195 | ||
196 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
197 | { | |
198 | struct cxio_qpid_list *entry; | |
199 | u32 qpid; | |
200 | int i; | |
201 | ||
202 | mutex_lock(&uctx->lock); | |
203 | if (!list_empty(&uctx->qpids)) { | |
204 | entry = list_entry(uctx->qpids.next, struct cxio_qpid_list, | |
205 | entry); | |
206 | list_del(&entry->entry); | |
207 | qpid = entry->qpid; | |
208 | kfree(entry); | |
209 | } else { | |
210 | qpid = cxio_hal_get_qpid(rdev_p->rscp); | |
211 | if (!qpid) | |
212 | goto out; | |
213 | for (i = qpid+1; i & rdev_p->qpmask; i++) { | |
214 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | |
215 | if (!entry) | |
216 | break; | |
217 | entry->qpid = i; | |
218 | list_add_tail(&entry->entry, &uctx->qpids); | |
219 | } | |
220 | } | |
221 | out: | |
222 | mutex_unlock(&uctx->lock); | |
223 | PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); | |
224 | return qpid; | |
225 | } | |
226 | ||
227 | static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid, | |
228 | struct cxio_ucontext *uctx) | |
229 | { | |
230 | struct cxio_qpid_list *entry; | |
231 | ||
232 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | |
233 | if (!entry) | |
234 | return; | |
235 | PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid); | |
236 | entry->qpid = qpid; | |
237 | mutex_lock(&uctx->lock); | |
238 | list_add_tail(&entry->entry, &uctx->qpids); | |
239 | mutex_unlock(&uctx->lock); | |
240 | } | |
241 | ||
242 | void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
243 | { | |
244 | struct list_head *pos, *nxt; | |
245 | struct cxio_qpid_list *entry; | |
246 | ||
247 | mutex_lock(&uctx->lock); | |
248 | list_for_each_safe(pos, nxt, &uctx->qpids) { | |
249 | entry = list_entry(pos, struct cxio_qpid_list, entry); | |
250 | list_del_init(&entry->entry); | |
251 | if (!(entry->qpid & rdev_p->qpmask)) | |
252 | cxio_hal_put_qpid(rdev_p->rscp, entry->qpid); | |
253 | kfree(entry); | |
254 | } | |
255 | mutex_unlock(&uctx->lock); | |
256 | } | |
257 | ||
258 | void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
259 | { | |
260 | INIT_LIST_HEAD(&uctx->qpids); | |
261 | mutex_init(&uctx->lock); | |
262 | } | |
263 | ||
264 | int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, | |
265 | struct t3_wq *wq, struct cxio_ucontext *uctx) | |
266 | { | |
267 | int depth = 1UL << wq->size_log2; | |
268 | int rqsize = 1UL << wq->rq_size_log2; | |
269 | ||
270 | wq->qpid = get_qpid(rdev_p, uctx); | |
271 | if (!wq->qpid) | |
272 | return -ENOMEM; | |
273 | ||
274 | wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL); | |
275 | if (!wq->rq) | |
276 | goto err1; | |
277 | ||
278 | wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); | |
279 | if (!wq->rq_addr) | |
280 | goto err2; | |
281 | ||
282 | wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); | |
283 | if (!wq->sq) | |
284 | goto err3; | |
285 | ||
286 | wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), | |
287 | depth * sizeof(union t3_wr), | |
288 | &(wq->dma_addr), GFP_KERNEL); | |
289 | if (!wq->queue) | |
290 | goto err4; | |
291 | ||
292 | memset(wq->queue, 0, depth * sizeof(union t3_wr)); | |
293 | pci_unmap_addr_set(wq, mapping, wq->dma_addr); | |
294 | wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; | |
295 | if (!kernel_domain) | |
296 | wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + | |
297 | (wq->qpid << rdev_p->qpshift); | |
298 | PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__, | |
299 | wq->qpid, wq->doorbell, (unsigned long long) wq->udb); | |
300 | return 0; | |
301 | err4: | |
302 | kfree(wq->sq); | |
303 | err3: | |
304 | cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize); | |
305 | err2: | |
306 | kfree(wq->rq); | |
307 | err1: | |
308 | put_qpid(rdev_p, wq->qpid, uctx); | |
309 | return -ENOMEM; | |
310 | } | |
311 | ||
312 | int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |
313 | { | |
314 | int err; | |
315 | err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid); | |
316 | kfree(cq->sw_queue); | |
317 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
318 | (1UL << (cq->size_log2)) | |
319 | * sizeof(struct t3_cqe), cq->queue, | |
320 | pci_unmap_addr(cq, mapping)); | |
321 | cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); | |
322 | return err; | |
323 | } | |
324 | ||
325 | int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, | |
326 | struct cxio_ucontext *uctx) | |
327 | { | |
328 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
329 | (1UL << (wq->size_log2)) | |
330 | * sizeof(union t3_wr), wq->queue, | |
331 | pci_unmap_addr(wq, mapping)); | |
332 | kfree(wq->sq); | |
333 | cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); | |
334 | kfree(wq->rq); | |
335 | put_qpid(rdev_p, wq->qpid, uctx); | |
336 | return 0; | |
337 | } | |
338 | ||
339 | static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) | |
340 | { | |
341 | struct t3_cqe cqe; | |
342 | ||
343 | PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, | |
344 | wq, cq, cq->sw_rptr, cq->sw_wptr); | |
345 | memset(&cqe, 0, sizeof(cqe)); | |
346 | cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | | |
347 | V_CQE_OPCODE(T3_SEND) | | |
348 | V_CQE_TYPE(0) | | |
349 | V_CQE_SWCQE(1) | | |
350 | V_CQE_QPID(wq->qpid) | | |
351 | V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, | |
352 | cq->size_log2))); | |
353 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; | |
354 | cq->sw_wptr++; | |
355 | } | |
356 | ||
357 | void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) | |
358 | { | |
359 | u32 ptr; | |
360 | ||
361 | PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq); | |
362 | ||
363 | /* flush RQ */ | |
364 | PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__, | |
365 | wq->rq_rptr, wq->rq_wptr, count); | |
366 | ptr = wq->rq_rptr + count; | |
367 | while (ptr++ != wq->rq_wptr) | |
368 | insert_recv_cqe(wq, cq); | |
369 | } | |
370 | ||
371 | static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, | |
372 | struct t3_swsq *sqp) | |
373 | { | |
374 | struct t3_cqe cqe; | |
375 | ||
376 | PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, | |
377 | wq, cq, cq->sw_rptr, cq->sw_wptr); | |
378 | memset(&cqe, 0, sizeof(cqe)); | |
379 | cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | | |
380 | V_CQE_OPCODE(sqp->opcode) | | |
381 | V_CQE_TYPE(1) | | |
382 | V_CQE_SWCQE(1) | | |
383 | V_CQE_QPID(wq->qpid) | | |
384 | V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, | |
385 | cq->size_log2))); | |
386 | cqe.u.scqe.wrid_hi = sqp->sq_wptr; | |
387 | ||
388 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; | |
389 | cq->sw_wptr++; | |
390 | } | |
391 | ||
392 | void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) | |
393 | { | |
394 | __u32 ptr; | |
395 | struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); | |
396 | ||
397 | ptr = wq->sq_rptr + count; | |
398 | sqp += count; | |
399 | while (ptr != wq->sq_wptr) { | |
400 | insert_sq_cqe(wq, cq, sqp); | |
401 | sqp++; | |
402 | ptr++; | |
403 | } | |
404 | } | |
405 | ||
406 | /* | |
407 | * Move all CQEs from the HWCQ into the SWCQ. | |
408 | */ | |
409 | void cxio_flush_hw_cq(struct t3_cq *cq) | |
410 | { | |
411 | struct t3_cqe *cqe, *swcqe; | |
412 | ||
413 | PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid); | |
414 | cqe = cxio_next_hw_cqe(cq); | |
415 | while (cqe) { | |
416 | PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", | |
417 | __FUNCTION__, cq->rptr, cq->sw_wptr); | |
418 | swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); | |
419 | *swcqe = *cqe; | |
420 | swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); | |
421 | cq->sw_wptr++; | |
422 | cq->rptr++; | |
423 | cqe = cxio_next_hw_cqe(cq); | |
424 | } | |
425 | } | |
426 | ||
2b540355 | 427 | static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) |
b038ced7 SW |
428 | { |
429 | if (CQE_OPCODE(*cqe) == T3_TERMINATE) | |
430 | return 0; | |
431 | ||
432 | if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe)) | |
433 | return 0; | |
434 | ||
435 | if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) | |
436 | return 0; | |
437 | ||
438 | if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) && | |
439 | Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) | |
440 | return 0; | |
441 | ||
442 | return 1; | |
443 | } | |
444 | ||
445 | void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) | |
446 | { | |
447 | struct t3_cqe *cqe; | |
448 | u32 ptr; | |
449 | ||
450 | *count = 0; | |
451 | ptr = cq->sw_rptr; | |
452 | while (!Q_EMPTY(ptr, cq->sw_wptr)) { | |
453 | cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); | |
454 | if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) && | |
455 | (CQE_QPID(*cqe) == wq->qpid)) | |
456 | (*count)++; | |
457 | ptr++; | |
458 | } | |
459 | PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); | |
460 | } | |
461 | ||
462 | void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) | |
463 | { | |
464 | struct t3_cqe *cqe; | |
465 | u32 ptr; | |
466 | ||
467 | *count = 0; | |
468 | PDBG("%s count zero %d\n", __FUNCTION__, *count); | |
469 | ptr = cq->sw_rptr; | |
470 | while (!Q_EMPTY(ptr, cq->sw_wptr)) { | |
471 | cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); | |
472 | if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) && | |
473 | (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq)) | |
474 | (*count)++; | |
475 | ptr++; | |
476 | } | |
477 | PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count); | |
478 | } | |
479 | ||
480 | static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) | |
481 | { | |
482 | struct rdma_cq_setup setup; | |
483 | setup.id = 0; | |
484 | setup.base_addr = 0; /* NULL address */ | |
485 | setup.size = 1; /* enable the CQ */ | |
486 | setup.credits = 0; | |
487 | ||
488 | /* force SGE to redirect to RspQ and interrupt */ | |
489 | setup.credit_thres = 0; | |
490 | setup.ovfl_mode = 1; | |
491 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
492 | } | |
493 | ||
494 | static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) | |
495 | { | |
496 | int err; | |
497 | u64 sge_cmd, ctx0, ctx1; | |
498 | u64 base_addr; | |
499 | struct t3_modify_qp_wr *wqe; | |
500 | struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); | |
501 | ||
502 | ||
503 | if (!skb) { | |
504 | PDBG("%s alloc_skb failed\n", __FUNCTION__); | |
505 | return -ENOMEM; | |
506 | } | |
507 | err = cxio_hal_init_ctrl_cq(rdev_p); | |
508 | if (err) { | |
509 | PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); | |
510 | return err; | |
511 | } | |
512 | rdev_p->ctrl_qp.workq = dma_alloc_coherent( | |
513 | &(rdev_p->rnic_info.pdev->dev), | |
514 | (1 << T3_CTRL_QP_SIZE_LOG2) * | |
515 | sizeof(union t3_wr), | |
516 | &(rdev_p->ctrl_qp.dma_addr), | |
517 | GFP_KERNEL); | |
518 | if (!rdev_p->ctrl_qp.workq) { | |
519 | PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); | |
520 | return -ENOMEM; | |
521 | } | |
522 | pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, | |
523 | rdev_p->ctrl_qp.dma_addr); | |
524 | rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; | |
525 | memset(rdev_p->ctrl_qp.workq, 0, | |
526 | (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr)); | |
527 | ||
528 | mutex_init(&rdev_p->ctrl_qp.lock); | |
529 | init_waitqueue_head(&rdev_p->ctrl_qp.waitq); | |
530 | ||
531 | /* update HW Ctrl QP context */ | |
532 | base_addr = rdev_p->ctrl_qp.dma_addr; | |
533 | base_addr >>= 12; | |
534 | ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) | | |
535 | V_EC_BASE_LO((u32) base_addr & 0xffff)); | |
536 | ctx0 <<= 32; | |
537 | ctx0 |= V_EC_CREDITS(FW_WR_NUM); | |
538 | base_addr >>= 16; | |
539 | ctx1 = (u32) base_addr; | |
540 | base_addr >>= 32; | |
541 | ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) | | |
542 | V_EC_TYPE(0) | V_EC_GEN(1) | | |
543 | V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32; | |
544 | wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); | |
545 | memset(wqe, 0, sizeof(*wqe)); | |
546 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 1, | |
547 | T3_CTL_QP_TID, 7); | |
548 | wqe->flags = cpu_to_be32(MODQP_WRITE_EC); | |
549 | sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3; | |
550 | wqe->sge_cmd = cpu_to_be64(sge_cmd); | |
551 | wqe->ctx1 = cpu_to_be64(ctx1); | |
552 | wqe->ctx0 = cpu_to_be64(ctx0); | |
553 | PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n", | |
554 | (unsigned long long) rdev_p->ctrl_qp.dma_addr, | |
555 | rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); | |
556 | skb->priority = CPL_PRIORITY_CONTROL; | |
557 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | |
558 | } | |
559 | ||
560 | static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) | |
561 | { | |
562 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
563 | (1UL << T3_CTRL_QP_SIZE_LOG2) | |
564 | * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, | |
565 | pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); | |
566 | return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); | |
567 | } | |
568 | ||
569 | /* write len bytes of data into addr (32B aligned address) | |
570 | * If data is NULL, clear len byte of memory to zero. | |
571 | * caller aquires the ctrl_qp lock before the call | |
572 | */ | |
573 | static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, | |
574 | u32 len, void *data, int completion) | |
575 | { | |
576 | u32 i, nr_wqe, copy_len; | |
577 | u8 *copy_data; | |
578 | u8 wr_len, utx_len; /* lenght in 8 byte flit */ | |
579 | enum t3_wr_flags flag; | |
580 | __be64 *wqe; | |
581 | u64 utx_cmd; | |
582 | addr &= 0x7FFFFFF; | |
583 | nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ | |
584 | PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", | |
585 | __FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, | |
586 | nr_wqe, data, addr); | |
587 | utx_len = 3; /* in 32B unit */ | |
588 | for (i = 0; i < nr_wqe; i++) { | |
589 | if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, | |
590 | T3_CTRL_QP_SIZE_LOG2)) { | |
591 | PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " | |
592 | "wait for more space i %d\n", __FUNCTION__, | |
593 | rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); | |
594 | if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, | |
595 | !Q_FULL(rdev_p->ctrl_qp.rptr, | |
596 | rdev_p->ctrl_qp.wptr, | |
597 | T3_CTRL_QP_SIZE_LOG2))) { | |
598 | PDBG("%s ctrl_qp workq interrupted\n", | |
599 | __FUNCTION__); | |
600 | return -ERESTARTSYS; | |
601 | } | |
602 | PDBG("%s ctrl_qp wakeup, continue posting work request " | |
603 | "i %d\n", __FUNCTION__, i); | |
604 | } | |
605 | wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % | |
606 | (1 << T3_CTRL_QP_SIZE_LOG2))); | |
607 | flag = 0; | |
608 | if (i == (nr_wqe - 1)) { | |
609 | /* last WQE */ | |
610 | flag = completion ? T3_COMPLETION_FLAG : 0; | |
611 | if (len % 32) | |
612 | utx_len = len / 32 + 1; | |
613 | else | |
614 | utx_len = len / 32; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Force a CQE to return the credit to the workq in case | |
619 | * we posted more than half the max QP size of WRs | |
620 | */ | |
621 | if ((i != 0) && | |
622 | (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { | |
623 | flag = T3_COMPLETION_FLAG; | |
624 | PDBG("%s force completion at i %d\n", __FUNCTION__, i); | |
625 | } | |
626 | ||
627 | /* build the utx mem command */ | |
628 | wqe += (sizeof(struct t3_bypass_wr) >> 3); | |
629 | utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3); | |
630 | utx_cmd <<= 32; | |
631 | utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1); | |
632 | *wqe = cpu_to_be64(utx_cmd); | |
633 | wqe++; | |
634 | copy_data = (u8 *) data + i * 96; | |
635 | copy_len = len > 96 ? 96 : len; | |
636 | ||
637 | /* clear memory content if data is NULL */ | |
638 | if (data) | |
639 | memcpy(wqe, copy_data, copy_len); | |
640 | else | |
641 | memset(wqe, 0, copy_len); | |
642 | if (copy_len % 32) | |
643 | memset(((u8 *) wqe) + copy_len, 0, | |
644 | 32 - (copy_len % 32)); | |
645 | wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 + | |
646 | (utx_len << 2); | |
647 | wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % | |
648 | (1 << T3_CTRL_QP_SIZE_LOG2))); | |
649 | ||
650 | /* wptr in the WRID[31:0] */ | |
651 | ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr; | |
652 | ||
653 | /* | |
654 | * This must be the last write with a memory barrier | |
655 | * for the genbit | |
656 | */ | |
657 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag, | |
658 | Q_GENBIT(rdev_p->ctrl_qp.wptr, | |
659 | T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID, | |
660 | wr_len); | |
661 | if (flag == T3_COMPLETION_FLAG) | |
662 | ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID); | |
663 | len -= 96; | |
664 | rdev_p->ctrl_qp.wptr++; | |
665 | } | |
666 | return 0; | |
667 | } | |
668 | ||
669 | /* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size | |
670 | * OUT: stag index, actual pbl_size, pbl_addr allocated. | |
671 | * TBD: shared memory region support | |
672 | */ | |
673 | static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |
674 | u32 *stag, u8 stag_state, u32 pdid, | |
675 | enum tpt_mem_type type, enum tpt_mem_perm perm, | |
676 | u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl, | |
677 | u32 *pbl_size, u32 *pbl_addr) | |
678 | { | |
679 | int err; | |
680 | struct tpt_entry tpt; | |
681 | u32 stag_idx; | |
682 | u32 wptr; | |
683 | int rereg = (*stag != T3_STAG_UNSET); | |
684 | ||
685 | stag_state = stag_state > 0; | |
686 | stag_idx = (*stag) >> 8; | |
687 | ||
688 | if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) { | |
689 | stag_idx = cxio_hal_get_stag(rdev_p->rscp); | |
690 | if (!stag_idx) | |
691 | return -ENOMEM; | |
692 | *stag = (stag_idx << 8) | ((*stag) & 0xFF); | |
693 | } | |
694 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", | |
695 | __FUNCTION__, stag_state, type, pdid, stag_idx); | |
696 | ||
697 | if (reset_tpt_entry) | |
698 | cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3); | |
699 | else if (!rereg) { | |
700 | *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3); | |
701 | if (!*pbl_addr) { | |
702 | return -ENOMEM; | |
703 | } | |
704 | } | |
705 | ||
706 | mutex_lock(&rdev_p->ctrl_qp.lock); | |
707 | ||
708 | /* write PBL first if any - update pbl only if pbl list exist */ | |
709 | if (pbl) { | |
710 | ||
711 | PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", | |
712 | __FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base, | |
713 | *pbl_size); | |
714 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, | |
715 | (*pbl_addr >> 5), | |
716 | (*pbl_size << 3), pbl, 0); | |
717 | if (err) | |
718 | goto ret; | |
719 | } | |
720 | ||
721 | /* write TPT entry */ | |
722 | if (reset_tpt_entry) | |
723 | memset(&tpt, 0, sizeof(tpt)); | |
724 | else { | |
725 | tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID | | |
726 | V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) | | |
727 | V_TPT_STAG_STATE(stag_state) | | |
728 | V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); | |
729 | BUG_ON(page_size >= 28); | |
730 | tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | | |
731 | F_TPT_MW_BIND_ENABLE | | |
732 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | | |
733 | V_TPT_PAGE_SIZE(page_size)); | |
734 | tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : | |
735 | cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3)); | |
736 | tpt.len = cpu_to_be32(len); | |
737 | tpt.va_hi = cpu_to_be32((u32) (to >> 32)); | |
738 | tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); | |
739 | tpt.rsvd_bind_cnt_or_pstag = 0; | |
740 | tpt.rsvd_pbl_size = reset_tpt_entry ? 0 : | |
741 | cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2)); | |
742 | } | |
743 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, | |
744 | stag_idx + | |
745 | (rdev_p->rnic_info.tpt_base >> 5), | |
746 | sizeof(tpt), &tpt, 1); | |
747 | ||
748 | /* release the stag index to free pool */ | |
749 | if (reset_tpt_entry) | |
750 | cxio_hal_put_stag(rdev_p->rscp, stag_idx); | |
751 | ret: | |
752 | wptr = rdev_p->ctrl_qp.wptr; | |
753 | mutex_unlock(&rdev_p->ctrl_qp.lock); | |
754 | if (!err) | |
755 | if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, | |
756 | SEQ32_GE(rdev_p->ctrl_qp.rptr, | |
757 | wptr))) | |
758 | return -ERESTARTSYS; | |
759 | return err; | |
760 | } | |
761 | ||
b038ced7 SW |
762 | int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, |
763 | enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, | |
764 | u8 page_size, __be64 *pbl, u32 *pbl_size, | |
765 | u32 *pbl_addr) | |
766 | { | |
767 | *stag = T3_STAG_UNSET; | |
768 | return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, | |
769 | zbva, to, len, page_size, pbl, pbl_size, pbl_addr); | |
770 | } | |
771 | ||
772 | int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, | |
773 | enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, | |
774 | u8 page_size, __be64 *pbl, u32 *pbl_size, | |
775 | u32 *pbl_addr) | |
776 | { | |
777 | return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, | |
778 | zbva, to, len, page_size, pbl, pbl_size, pbl_addr); | |
779 | } | |
780 | ||
781 | int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, | |
782 | u32 pbl_addr) | |
783 | { | |
784 | return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, | |
785 | &pbl_size, &pbl_addr); | |
786 | } | |
787 | ||
788 | int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) | |
789 | { | |
790 | u32 pbl_size = 0; | |
791 | *stag = T3_STAG_UNSET; | |
792 | return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, | |
793 | NULL, &pbl_size, NULL); | |
794 | } | |
795 | ||
796 | int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) | |
797 | { | |
798 | return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL, | |
799 | NULL, NULL); | |
800 | } | |
801 | ||
802 | int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) | |
803 | { | |
804 | struct t3_rdma_init_wr *wqe; | |
805 | struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); | |
806 | if (!skb) | |
807 | return -ENOMEM; | |
808 | PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p); | |
809 | wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); | |
810 | wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); | |
811 | wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | | |
812 | V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); | |
813 | wqe->wrid.id1 = 0; | |
814 | wqe->qpid = cpu_to_be32(attr->qpid); | |
815 | wqe->pdid = cpu_to_be32(attr->pdid); | |
816 | wqe->scqid = cpu_to_be32(attr->scqid); | |
817 | wqe->rcqid = cpu_to_be32(attr->rcqid); | |
818 | wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base); | |
819 | wqe->rq_size = cpu_to_be32(attr->rq_size); | |
820 | wqe->mpaattrs = attr->mpaattrs; | |
821 | wqe->qpcaps = attr->qpcaps; | |
822 | wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); | |
823 | wqe->flags = cpu_to_be32(attr->flags); | |
824 | wqe->ord = cpu_to_be32(attr->ord); | |
825 | wqe->ird = cpu_to_be32(attr->ird); | |
826 | wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); | |
827 | wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); | |
828 | wqe->rsvd = 0; | |
829 | skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ | |
830 | return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); | |
831 | } | |
832 | ||
833 | void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) | |
834 | { | |
835 | cxio_ev_cb = ev_cb; | |
836 | } | |
837 | ||
838 | void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb) | |
839 | { | |
840 | cxio_ev_cb = NULL; | |
841 | } | |
842 | ||
843 | static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb) | |
844 | { | |
845 | static int cnt; | |
846 | struct cxio_rdev *rdev_p = NULL; | |
847 | struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; | |
848 | PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" | |
849 | " se %0x notify %0x cqbranch %0x creditth %0x\n", | |
850 | cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), | |
851 | RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), | |
852 | RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), | |
853 | RSPQ_CREDIT_THRESH(rsp_msg)); | |
854 | PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d " | |
855 | "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | |
856 | CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe), | |
857 | CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), | |
858 | CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe), | |
859 | CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); | |
860 | rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; | |
861 | if (!rdev_p) { | |
862 | PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__, | |
863 | t3cdev_p); | |
864 | return 0; | |
865 | } | |
866 | if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) { | |
867 | rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1; | |
868 | wake_up_interruptible(&rdev_p->ctrl_qp.waitq); | |
869 | dev_kfree_skb_irq(skb); | |
870 | } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8) | |
871 | dev_kfree_skb_irq(skb); | |
872 | else if (cxio_ev_cb) | |
873 | (*cxio_ev_cb) (rdev_p, skb); | |
874 | else | |
875 | dev_kfree_skb_irq(skb); | |
876 | cnt++; | |
877 | return 0; | |
878 | } | |
879 | ||
880 | /* Caller takes care of locking if needed */ | |
881 | int cxio_rdev_open(struct cxio_rdev *rdev_p) | |
882 | { | |
883 | struct net_device *netdev_p = NULL; | |
884 | int err = 0; | |
885 | if (strlen(rdev_p->dev_name)) { | |
886 | if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) { | |
887 | return -EBUSY; | |
888 | } | |
889 | netdev_p = dev_get_by_name(rdev_p->dev_name); | |
890 | if (!netdev_p) { | |
891 | return -EINVAL; | |
892 | } | |
893 | dev_put(netdev_p); | |
894 | } else if (rdev_p->t3cdev_p) { | |
895 | if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) { | |
896 | return -EBUSY; | |
897 | } | |
898 | netdev_p = rdev_p->t3cdev_p->lldev; | |
899 | strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, | |
900 | T3_MAX_DEV_NAME_LEN); | |
901 | } else { | |
902 | PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__); | |
903 | return -EINVAL; | |
904 | } | |
905 | ||
906 | list_add_tail(&rdev_p->entry, &rdev_list); | |
907 | ||
908 | PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name); | |
909 | memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); | |
910 | if (!rdev_p->t3cdev_p) | |
911 | rdev_p->t3cdev_p = T3CDEV(netdev_p); | |
912 | rdev_p->t3cdev_p->ulp = (void *) rdev_p; | |
913 | err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, | |
914 | &(rdev_p->rnic_info)); | |
915 | if (err) { | |
916 | printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", | |
917 | __FUNCTION__, rdev_p->t3cdev_p, err); | |
918 | goto err1; | |
919 | } | |
920 | err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, | |
921 | &(rdev_p->port_info)); | |
922 | if (err) { | |
923 | printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", | |
924 | __FUNCTION__, rdev_p->t3cdev_p, err); | |
925 | goto err1; | |
926 | } | |
927 | ||
928 | /* | |
929 | * qpshift is the number of bits to shift the qpid left in order | |
930 | * to get the correct address of the doorbell for that qp. | |
931 | */ | |
932 | cxio_init_ucontext(rdev_p, &rdev_p->uctx); | |
933 | rdev_p->qpshift = PAGE_SHIFT - | |
934 | ilog2(65536 >> | |
935 | ilog2(rdev_p->rnic_info.udbell_len >> | |
936 | PAGE_SHIFT)); | |
937 | rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT; | |
938 | rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; | |
939 | PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " | |
940 | "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", | |
941 | __FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, | |
942 | rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), | |
943 | rdev_p->rnic_info.pbl_base, | |
944 | rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, | |
945 | rdev_p->rnic_info.rqt_top); | |
946 | PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu " | |
947 | "qpnr %d qpmask 0x%x\n", | |
948 | rdev_p->rnic_info.udbell_len, | |
949 | rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr, | |
950 | rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask); | |
951 | ||
952 | err = cxio_hal_init_ctrl_qp(rdev_p); | |
953 | if (err) { | |
954 | printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", | |
955 | __FUNCTION__, err); | |
956 | goto err1; | |
957 | } | |
958 | err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, | |
959 | 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ, | |
960 | T3_MAX_NUM_PD); | |
961 | if (err) { | |
962 | printk(KERN_ERR "%s error %d initializing hal resources.\n", | |
963 | __FUNCTION__, err); | |
964 | goto err2; | |
965 | } | |
966 | err = cxio_hal_pblpool_create(rdev_p); | |
967 | if (err) { | |
968 | printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", | |
969 | __FUNCTION__, err); | |
970 | goto err3; | |
971 | } | |
972 | err = cxio_hal_rqtpool_create(rdev_p); | |
973 | if (err) { | |
974 | printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", | |
975 | __FUNCTION__, err); | |
976 | goto err4; | |
977 | } | |
978 | return 0; | |
979 | err4: | |
980 | cxio_hal_pblpool_destroy(rdev_p); | |
981 | err3: | |
982 | cxio_hal_destroy_resource(rdev_p->rscp); | |
983 | err2: | |
984 | cxio_hal_destroy_ctrl_qp(rdev_p); | |
985 | err1: | |
986 | list_del(&rdev_p->entry); | |
987 | return err; | |
988 | } | |
989 | ||
990 | void cxio_rdev_close(struct cxio_rdev *rdev_p) | |
991 | { | |
992 | if (rdev_p) { | |
993 | cxio_hal_pblpool_destroy(rdev_p); | |
994 | cxio_hal_rqtpool_destroy(rdev_p); | |
995 | list_del(&rdev_p->entry); | |
996 | rdev_p->t3cdev_p->ulp = NULL; | |
997 | cxio_hal_destroy_ctrl_qp(rdev_p); | |
998 | cxio_hal_destroy_resource(rdev_p->rscp); | |
999 | } | |
1000 | } | |
1001 | ||
1002 | int __init cxio_hal_init(void) | |
1003 | { | |
1004 | if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI)) | |
1005 | return -ENOMEM; | |
1006 | t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler); | |
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | void __exit cxio_hal_exit(void) | |
1011 | { | |
1012 | struct cxio_rdev *rdev, *tmp; | |
1013 | ||
1014 | t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL); | |
1015 | list_for_each_entry_safe(rdev, tmp, &rdev_list, entry) | |
1016 | cxio_rdev_close(rdev); | |
1017 | cxio_hal_destroy_rhdl_resource(); | |
1018 | } | |
1019 | ||
2b540355 | 1020 | static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) |
b038ced7 SW |
1021 | { |
1022 | struct t3_swsq *sqp; | |
1023 | __u32 ptr = wq->sq_rptr; | |
1024 | int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr); | |
1025 | ||
1026 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); | |
1027 | while (count--) | |
1028 | if (!sqp->signaled) { | |
1029 | ptr++; | |
1030 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); | |
1031 | } else if (sqp->complete) { | |
1032 | ||
1033 | /* | |
1034 | * Insert this completed cqe into the swcq. | |
1035 | */ | |
1036 | PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", | |
1037 | __FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2), | |
1038 | Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); | |
1039 | sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); | |
1040 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) | |
1041 | = sqp->cqe; | |
1042 | cq->sw_wptr++; | |
1043 | sqp->signaled = 0; | |
1044 | break; | |
1045 | } else | |
1046 | break; | |
1047 | } | |
1048 | ||
2b540355 AB |
1049 | static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, |
1050 | struct t3_cqe *read_cqe) | |
b038ced7 SW |
1051 | { |
1052 | read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; | |
1053 | read_cqe->len = wq->oldest_read->read_len; | |
1054 | read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) | | |
1055 | V_CQE_SWCQE(SW_CQE(*hw_cqe)) | | |
1056 | V_CQE_OPCODE(T3_READ_REQ) | | |
1057 | V_CQE_TYPE(1)); | |
1058 | } | |
1059 | ||
1060 | /* | |
1061 | * Return a ptr to the next read wr in the SWSQ or NULL. | |
1062 | */ | |
2b540355 | 1063 | static void advance_oldest_read(struct t3_wq *wq) |
b038ced7 SW |
1064 | { |
1065 | ||
1066 | u32 rptr = wq->oldest_read - wq->sq + 1; | |
1067 | u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2); | |
1068 | ||
1069 | while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) { | |
1070 | wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); | |
1071 | ||
1072 | if (wq->oldest_read->opcode == T3_READ_REQ) | |
1073 | return; | |
1074 | rptr++; | |
1075 | } | |
1076 | wq->oldest_read = NULL; | |
1077 | } | |
1078 | ||
1079 | /* | |
1080 | * cxio_poll_cq | |
1081 | * | |
1082 | * Caller must: | |
1083 | * check the validity of the first CQE, | |
1084 | * supply the wq assicated with the qpid. | |
1085 | * | |
1086 | * credit: cq credit to return to sge. | |
1087 | * cqe_flushed: 1 iff the CQE is flushed. | |
1088 | * cqe: copy of the polled CQE. | |
1089 | * | |
1090 | * return value: | |
1091 | * 0 CQE returned, | |
1092 | * -1 CQE skipped, try again. | |
1093 | */ | |
1094 | int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, | |
1095 | u8 *cqe_flushed, u64 *cookie, u32 *credit) | |
1096 | { | |
1097 | int ret = 0; | |
1098 | struct t3_cqe *hw_cqe, read_cqe; | |
1099 | ||
1100 | *cqe_flushed = 0; | |
1101 | *credit = 0; | |
1102 | hw_cqe = cxio_next_cqe(cq); | |
1103 | ||
1104 | PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" | |
1105 | " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | |
1106 | __FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), | |
1107 | CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), | |
1108 | CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), | |
1109 | CQE_WRID_LOW(*hw_cqe)); | |
1110 | ||
1111 | /* | |
1112 | * skip cqe's not affiliated with a QP. | |
1113 | */ | |
1114 | if (wq == NULL) { | |
1115 | ret = -1; | |
1116 | goto skip_cqe; | |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * Gotta tweak READ completions: | |
1121 | * 1) the cqe doesn't contain the sq_wptr from the wr. | |
1122 | * 2) opcode not reflected from the wr. | |
1123 | * 3) read_len not reflected from the wr. | |
1124 | * 4) cq_type is RQ_TYPE not SQ_TYPE. | |
1125 | */ | |
1126 | if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) { | |
1127 | ||
1128 | /* | |
1129 | * Don't write to the HWCQ, so create a new read req CQE | |
1130 | * in local memory. | |
1131 | */ | |
1132 | create_read_req_cqe(wq, hw_cqe, &read_cqe); | |
1133 | hw_cqe = &read_cqe; | |
1134 | advance_oldest_read(wq); | |
1135 | } | |
1136 | ||
1137 | /* | |
1138 | * T3A: Discard TERMINATE CQEs. | |
1139 | */ | |
1140 | if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) { | |
1141 | ret = -1; | |
1142 | wq->error = 1; | |
1143 | goto skip_cqe; | |
1144 | } | |
1145 | ||
1146 | if (CQE_STATUS(*hw_cqe) || wq->error) { | |
1147 | *cqe_flushed = wq->error; | |
1148 | wq->error = 1; | |
1149 | ||
1150 | /* | |
1151 | * T3A inserts errors into the CQE. We cannot return | |
1152 | * these as work completions. | |
1153 | */ | |
1154 | /* incoming write failures */ | |
1155 | if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE) | |
1156 | && RQ_TYPE(*hw_cqe)) { | |
1157 | ret = -1; | |
1158 | goto skip_cqe; | |
1159 | } | |
1160 | /* incoming read request failures */ | |
1161 | if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) { | |
1162 | ret = -1; | |
1163 | goto skip_cqe; | |
1164 | } | |
1165 | ||
1166 | /* incoming SEND with no receive posted failures */ | |
1167 | if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) && | |
1168 | Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { | |
1169 | ret = -1; | |
1170 | goto skip_cqe; | |
1171 | } | |
1172 | goto proc_cqe; | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * RECV completion. | |
1177 | */ | |
1178 | if (RQ_TYPE(*hw_cqe)) { | |
1179 | ||
1180 | /* | |
1181 | * HW only validates 4 bits of MSN. So we must validate that | |
1182 | * the MSN in the SEND is the next expected MSN. If its not, | |
1183 | * then we complete this with TPT_ERR_MSN and mark the wq in | |
1184 | * error. | |
1185 | */ | |
1186 | if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { | |
1187 | wq->error = 1; | |
1188 | hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); | |
1189 | goto proc_cqe; | |
1190 | } | |
1191 | goto proc_cqe; | |
1192 | } | |
1193 | ||
1194 | /* | |
1195 | * If we get here its a send completion. | |
1196 | * | |
1197 | * Handle out of order completion. These get stuffed | |
1198 | * in the SW SQ. Then the SW SQ is walked to move any | |
1199 | * now in-order completions into the SW CQ. This handles | |
1200 | * 2 cases: | |
1201 | * 1) reaping unsignaled WRs when the first subsequent | |
1202 | * signaled WR is completed. | |
1203 | * 2) out of order read completions. | |
1204 | */ | |
1205 | if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) { | |
1206 | struct t3_swsq *sqp; | |
1207 | ||
1208 | PDBG("%s out of order completion going in swsq at idx %ld\n", | |
1209 | __FUNCTION__, | |
1210 | Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); | |
1211 | sqp = wq->sq + | |
1212 | Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); | |
1213 | sqp->cqe = *hw_cqe; | |
1214 | sqp->complete = 1; | |
1215 | ret = -1; | |
1216 | goto flush_wq; | |
1217 | } | |
1218 | ||
1219 | proc_cqe: | |
1220 | *cqe = *hw_cqe; | |
1221 | ||
1222 | /* | |
1223 | * Reap the associated WR(s) that are freed up with this | |
1224 | * completion. | |
1225 | */ | |
1226 | if (SQ_TYPE(*hw_cqe)) { | |
1227 | wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); | |
1228 | PDBG("%s completing sq idx %ld\n", __FUNCTION__, | |
1229 | Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); | |
1230 | *cookie = (wq->sq + | |
1231 | Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id; | |
1232 | wq->sq_rptr++; | |
1233 | } else { | |
1234 | PDBG("%s completing rq idx %ld\n", __FUNCTION__, | |
1235 | Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); | |
1236 | *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); | |
1237 | wq->rq_rptr++; | |
1238 | } | |
1239 | ||
1240 | flush_wq: | |
1241 | /* | |
1242 | * Flush any completed cqes that are now in-order. | |
1243 | */ | |
1244 | flush_completed_wrs(wq, cq); | |
1245 | ||
1246 | skip_cqe: | |
1247 | if (SW_CQE(*hw_cqe)) { | |
1248 | PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", | |
1249 | __FUNCTION__, cq, cq->cqid, cq->sw_rptr); | |
1250 | ++cq->sw_rptr; | |
1251 | } else { | |
1252 | PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", | |
1253 | __FUNCTION__, cq, cq->cqid, cq->rptr); | |
1254 | ++cq->rptr; | |
1255 | ||
1256 | /* | |
1257 | * T3A: compute credits. | |
1258 | */ | |
1259 | if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1))) | |
1260 | || ((cq->rptr - cq->wptr) >= 128)) { | |
1261 | *credit = cq->rptr - cq->wptr; | |
1262 | cq->wptr = cq->rptr; | |
1263 | } | |
1264 | } | |
1265 | return ret; | |
1266 | } |