2 * linux/drivers/net/ehea/ehea_qmr.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include "ehea_phyp.h"
34 static void *hw_qpageit_get_inc(struct hw_queue
*queue
)
36 void *retvalue
= hw_qeit_get(queue
);
38 queue
->current_q_offset
+= queue
->pagesize
;
39 if (queue
->current_q_offset
> queue
->queue_length
) {
40 queue
->current_q_offset
-= queue
->pagesize
;
42 } else if (((u64
) retvalue
) & (EHEA_PAGESIZE
-1)) {
43 ehea_error("not on pageboundary");
49 static int hw_queue_ctor(struct hw_queue
*queue
, const u32 nr_of_pages
,
50 const u32 pagesize
, const u32 qe_size
)
52 int pages_per_kpage
= PAGE_SIZE
/ pagesize
;
55 if ((pagesize
> PAGE_SIZE
) || (!pages_per_kpage
)) {
56 ehea_error("pagesize conflict! kernel pagesize=%d, "
57 "ehea pagesize=%d", (int)PAGE_SIZE
, (int)pagesize
);
61 queue
->queue_length
= nr_of_pages
* pagesize
;
62 queue
->queue_pages
= kmalloc(nr_of_pages
* sizeof(void*), GFP_KERNEL
);
63 if (!queue
->queue_pages
) {
64 ehea_error("no mem for queue_pages");
69 * allocate pages for queue:
70 * outer loop allocates whole kernel pages (page aligned) and
71 * inner loop divides a kernel page into smaller hea queue pages
74 while (i
< nr_of_pages
) {
75 u8
*kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
78 for (k
= 0; k
< pages_per_kpage
&& i
< nr_of_pages
; k
++) {
79 (queue
->queue_pages
)[i
] = (struct ehea_page
*)kpage
;
85 queue
->current_q_offset
= 0;
86 queue
->qe_size
= qe_size
;
87 queue
->pagesize
= pagesize
;
88 queue
->toggle_state
= 1;
92 for (i
= 0; i
< nr_of_pages
; i
+= pages_per_kpage
) {
93 if (!(queue
->queue_pages
)[i
])
95 free_page((unsigned long)(queue
->queue_pages
)[i
]);
100 static void hw_queue_dtor(struct hw_queue
*queue
)
102 int pages_per_kpage
= PAGE_SIZE
/ queue
->pagesize
;
105 if (!queue
|| !queue
->queue_pages
)
108 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
110 for (i
= 0; i
< nr_pages
; i
+= pages_per_kpage
)
111 free_page((unsigned long)(queue
->queue_pages
)[i
]);
113 kfree(queue
->queue_pages
);
116 struct ehea_cq
*ehea_create_cq(struct ehea_adapter
*adapter
,
117 int nr_of_cqe
, u64 eq_handle
, u32 cq_token
)
121 u64
*cq_handle_ref
, hret
, rpage
;
122 u32 act_nr_of_entries
, act_pages
, counter
;
126 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
128 ehea_error("no mem for cq");
132 cq
->attr
.max_nr_of_cqes
= nr_of_cqe
;
133 cq
->attr
.cq_token
= cq_token
;
134 cq
->attr
.eq_handle
= eq_handle
;
136 cq
->adapter
= adapter
;
138 cq_handle_ref
= &cq
->fw_handle
;
139 act_nr_of_entries
= 0;
142 hret
= ehea_h_alloc_resource_cq(adapter
->handle
, &cq
->attr
,
143 &cq
->fw_handle
, &cq
->epas
);
144 if (hret
!= H_SUCCESS
) {
145 ehea_error("alloc_resource_cq failed");
149 ret
= hw_queue_ctor(&cq
->hw_queue
, cq
->attr
.nr_pages
,
150 EHEA_PAGESIZE
, sizeof(struct ehea_cqe
));
154 for (counter
= 0; counter
< cq
->attr
.nr_pages
; counter
++) {
155 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
157 ehea_error("hw_qpageit_get_inc failed");
161 rpage
= virt_to_abs(vpage
);
162 hret
= ehea_h_register_rpage(adapter
->handle
,
163 0, EHEA_CQ_REGISTER_ORIG
,
164 cq
->fw_handle
, rpage
, 1);
165 if (hret
< H_SUCCESS
) {
166 ehea_error("register_rpage_cq failed ehea_cq=%p "
167 "hret=%lx counter=%i act_pages=%i",
168 cq
, hret
, counter
, cq
->attr
.nr_pages
);
172 if (counter
== (cq
->attr
.nr_pages
- 1)) {
173 vpage
= hw_qpageit_get_inc(&cq
->hw_queue
);
175 if ((hret
!= H_SUCCESS
) || (vpage
)) {
176 ehea_error("registration of pages not "
177 "complete hret=%lx\n", hret
);
181 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
)) {
182 ehea_error("CQ: registration of page failed "
189 hw_qeit_reset(&cq
->hw_queue
);
190 epa
= cq
->epas
.kernel
;
191 ehea_reset_cq_ep(cq
);
192 ehea_reset_cq_n1(cq
);
197 hw_queue_dtor(&cq
->hw_queue
);
200 ehea_h_free_resource(adapter
->handle
, cq
->fw_handle
);
209 int ehea_destroy_cq(struct ehea_cq
*cq
)
211 u64 adapter_handle
, hret
;
216 adapter_handle
= cq
->adapter
->handle
;
218 /* deregister all previous registered pages */
219 hret
= ehea_h_free_resource(adapter_handle
, cq
->fw_handle
);
220 if (hret
!= H_SUCCESS
) {
221 ehea_error("destroy CQ failed");
225 hw_queue_dtor(&cq
->hw_queue
);
231 struct ehea_eq
*ehea_create_eq(struct ehea_adapter
*adapter
,
232 const enum ehea_eq_type type
,
233 const u32 max_nr_of_eqes
, const u8 eqe_gen
)
240 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
242 ehea_error("no mem for eq");
246 eq
->adapter
= adapter
;
247 eq
->attr
.type
= type
;
248 eq
->attr
.max_nr_of_eqes
= max_nr_of_eqes
;
249 eq
->attr
.eqe_gen
= eqe_gen
;
250 spin_lock_init(&eq
->spinlock
);
252 hret
= ehea_h_alloc_resource_eq(adapter
->handle
,
253 &eq
->attr
, &eq
->fw_handle
);
254 if (hret
!= H_SUCCESS
) {
255 ehea_error("alloc_resource_eq failed");
259 ret
= hw_queue_ctor(&eq
->hw_queue
, eq
->attr
.nr_pages
,
260 EHEA_PAGESIZE
, sizeof(struct ehea_eqe
));
262 ehea_error("can't allocate eq pages");
266 for (i
= 0; i
< eq
->attr
.nr_pages
; i
++) {
267 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
269 ehea_error("hw_qpageit_get_inc failed");
274 rpage
= virt_to_abs(vpage
);
276 hret
= ehea_h_register_rpage(adapter
->handle
, 0,
277 EHEA_EQ_REGISTER_ORIG
,
278 eq
->fw_handle
, rpage
, 1);
280 if (i
== (eq
->attr
.nr_pages
- 1)) {
282 vpage
= hw_qpageit_get_inc(&eq
->hw_queue
);
283 if ((hret
!= H_SUCCESS
) || (vpage
)) {
287 if ((hret
!= H_PAGE_REGISTERED
) || (!vpage
)) {
293 hw_qeit_reset(&eq
->hw_queue
);
297 hw_queue_dtor(&eq
->hw_queue
);
300 ehea_h_free_resource(adapter
->handle
, eq
->fw_handle
);
307 struct ehea_eqe
*ehea_poll_eq(struct ehea_eq
*eq
)
309 struct ehea_eqe
*eqe
;
312 spin_lock_irqsave(&eq
->spinlock
, flags
);
313 eqe
= (struct ehea_eqe
*)hw_eqit_eq_get_inc_valid(&eq
->hw_queue
);
314 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
319 int ehea_destroy_eq(struct ehea_eq
*eq
)
327 spin_lock_irqsave(&eq
->spinlock
, flags
);
329 hret
= ehea_h_free_resource(eq
->adapter
->handle
, eq
->fw_handle
);
330 spin_unlock_irqrestore(&eq
->spinlock
, flags
);
332 if (hret
!= H_SUCCESS
) {
333 ehea_error("destroy_eq failed");
337 hw_queue_dtor(&eq
->hw_queue
);
344 * allocates memory for a queue and registers pages in phyp
346 int ehea_qp_alloc_register(struct ehea_qp
*qp
, struct hw_queue
*hw_queue
,
347 int nr_pages
, int wqe_size
, int act_nr_sges
,
348 struct ehea_adapter
*adapter
, int h_call_q_selector
)
354 ret
= hw_queue_ctor(hw_queue
, nr_pages
, EHEA_PAGESIZE
, wqe_size
);
358 for (cnt
= 0; cnt
< nr_pages
; cnt
++) {
359 vpage
= hw_qpageit_get_inc(hw_queue
);
361 ehea_error("hw_qpageit_get_inc failed");
364 rpage
= virt_to_abs(vpage
);
365 hret
= ehea_h_register_rpage(adapter
->handle
,
366 0, h_call_q_selector
,
367 qp
->fw_handle
, rpage
, 1);
368 if (hret
< H_SUCCESS
) {
369 ehea_error("register_rpage_qp failed");
373 hw_qeit_reset(hw_queue
);
377 hw_queue_dtor(hw_queue
);
381 static inline u32
map_wqe_size(u8 wqe_enc_size
)
383 return 128 << wqe_enc_size
;
386 struct ehea_qp
*ehea_create_qp(struct ehea_adapter
*adapter
,
387 u32 pd
, struct ehea_qp_init_attr
*init_attr
)
392 u32 wqe_size_in_bytes_sq
, wqe_size_in_bytes_rq1
;
393 u32 wqe_size_in_bytes_rq2
, wqe_size_in_bytes_rq3
;
396 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
398 ehea_error("no mem for qp");
402 qp
->adapter
= adapter
;
404 hret
= ehea_h_alloc_resource_qp(adapter
->handle
, init_attr
, pd
,
405 &qp
->fw_handle
, &qp
->epas
);
406 if (hret
!= H_SUCCESS
) {
407 ehea_error("ehea_h_alloc_resource_qp failed");
411 wqe_size_in_bytes_sq
= map_wqe_size(init_attr
->act_wqe_size_enc_sq
);
412 wqe_size_in_bytes_rq1
= map_wqe_size(init_attr
->act_wqe_size_enc_rq1
);
413 wqe_size_in_bytes_rq2
= map_wqe_size(init_attr
->act_wqe_size_enc_rq2
);
414 wqe_size_in_bytes_rq3
= map_wqe_size(init_attr
->act_wqe_size_enc_rq3
);
416 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_squeue
, init_attr
->nr_sq_pages
,
417 wqe_size_in_bytes_sq
,
418 init_attr
->act_wqe_size_enc_sq
, adapter
,
421 ehea_error("can't register for sq ret=%x", ret
);
425 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue1
,
426 init_attr
->nr_rq1_pages
,
427 wqe_size_in_bytes_rq1
,
428 init_attr
->act_wqe_size_enc_rq1
,
431 ehea_error("can't register for rq1 ret=%x", ret
);
435 if (init_attr
->rq_count
> 1) {
436 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue2
,
437 init_attr
->nr_rq2_pages
,
438 wqe_size_in_bytes_rq2
,
439 init_attr
->act_wqe_size_enc_rq2
,
442 ehea_error("can't register for rq2 ret=%x", ret
);
447 if (init_attr
->rq_count
> 2) {
448 ret
= ehea_qp_alloc_register(qp
, &qp
->hw_rqueue3
,
449 init_attr
->nr_rq3_pages
,
450 wqe_size_in_bytes_rq3
,
451 init_attr
->act_wqe_size_enc_rq3
,
454 ehea_error("can't register for rq3 ret=%x", ret
);
459 qp
->init_attr
= *init_attr
;
464 hw_queue_dtor(&qp
->hw_rqueue2
);
467 hw_queue_dtor(&qp
->hw_rqueue1
);
470 hw_queue_dtor(&qp
->hw_squeue
);
473 ehea_h_disable_and_get_hea(adapter
->handle
, qp
->fw_handle
);
474 ehea_h_free_resource(adapter
->handle
, qp
->fw_handle
);
481 int ehea_destroy_qp(struct ehea_qp
*qp
)
484 struct ehea_qp_init_attr
*qp_attr
= &qp
->init_attr
;
489 hret
= ehea_h_free_resource(qp
->adapter
->handle
, qp
->fw_handle
);
490 if (hret
!= H_SUCCESS
) {
491 ehea_error("destroy_qp failed");
495 hw_queue_dtor(&qp
->hw_squeue
);
496 hw_queue_dtor(&qp
->hw_rqueue1
);
498 if (qp_attr
->rq_count
> 1)
499 hw_queue_dtor(&qp
->hw_rqueue2
);
500 if (qp_attr
->rq_count
> 2)
501 hw_queue_dtor(&qp
->hw_rqueue3
);
507 int ehea_reg_mr_adapter(struct ehea_adapter
*adapter
)
510 u64 hret
, pt_abs
, start
, end
, nr_pages
;
511 u32 acc_ctrl
= EHEA_MR_ACC_CTRL
;
515 end
= (u64
)high_memory
;
516 nr_pages
= (end
- start
) / EHEA_PAGESIZE
;
518 pt
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
520 ehea_error("no mem");
524 pt_abs
= virt_to_abs(pt
);
526 hret
= ehea_h_alloc_resource_mr(adapter
->handle
, start
, end
- start
,
527 acc_ctrl
, adapter
->pd
,
528 &adapter
->mr
.handle
, &adapter
->mr
.lkey
);
529 if (hret
!= H_SUCCESS
) {
530 ehea_error("alloc_resource_mr failed");
535 adapter
->mr
.vaddr
= KERNELBASE
;
538 while (nr_pages
> 0) {
540 u64 num_pages
= min(nr_pages
, (u64
)512);
541 for (i
= 0; i
< num_pages
; i
++)
542 pt
[i
] = virt_to_abs((void*)(((u64
)start
) +
546 hret
= ehea_h_register_rpage_mr(adapter
->handle
,
547 adapter
->mr
.handle
, 0,
550 nr_pages
-= num_pages
;
552 u64 abs_adr
= virt_to_abs((void*)(((u64
)start
) +
553 (k
* EHEA_PAGESIZE
)));
555 hret
= ehea_h_register_rpage_mr(adapter
->handle
,
556 adapter
->mr
.handle
, 0,
561 if ((hret
!= H_SUCCESS
) && (hret
!= H_PAGE_REGISTERED
)) {
562 ehea_h_free_resource(adapter
->handle
,
564 ehea_error("register_rpage_mr failed: hret = %lX",
571 if (hret
!= H_SUCCESS
) {
572 ehea_h_free_resource(adapter
->handle
, adapter
->mr
.handle
);
573 ehea_error("register_rpage failed for last page: hret = %lX",