2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
24 #include "cxgb3i_ddp.h"
26 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
27 #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
28 #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
30 #ifdef __DEBUG_CXGB3I_DDP__
31 #define ddp_log_debug(fmt, args...) \
32 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
34 #define ddp_log_debug(fmt...)
38 * iSCSI Direct Data Placement
40 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
41 * pre-posted final destination host-memory buffers based on the Initiator
42 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
44 * The host memory address is programmed into h/w in the format of pagepod
46 * The location of the pagepod entry is encoded into ddp tag which is used or
47 * is the base for ITT/TTT.
50 #define DDP_PGIDX_MAX 4
51 #define DDP_THRESHOLD 2048
52 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
53 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
54 static unsigned char page_idx
= DDP_PGIDX_MAX
;
57 * functions to program the pagepod in h/w
59 static inline void ulp_mem_io_set_hdr(struct sk_buff
*skb
, unsigned int addr
)
61 struct ulp_mem_io
*req
= (struct ulp_mem_io
*)skb
->head
;
64 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_BYPASS
));
65 req
->cmd_lock_addr
= htonl(V_ULP_MEMIO_ADDR(addr
>> 5) |
66 V_ULPTX_CMD(ULP_MEM_WRITE
));
67 req
->len
= htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE
>> 5) |
68 V_ULPTX_NFLITS((PPOD_SIZE
>> 3) + 1));
71 static int set_ddp_map(struct cxgb3i_ddp_info
*ddp
, struct pagepod_hdr
*hdr
,
72 unsigned int idx
, unsigned int npods
,
73 struct cxgb3i_gather_list
*gl
)
75 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
78 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
79 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
83 /* hold on to the skb until we clear the ddp mapping */
86 ulp_mem_io_set_hdr(skb
, pm_addr
);
87 ppod
= (struct pagepod
*)
88 (skb
->head
+ sizeof(struct ulp_mem_io
));
89 memcpy(&(ppod
->hdr
), hdr
, sizeof(struct pagepod
));
90 for (pidx
= 4 * i
, j
= 0; j
< 5; ++j
, ++pidx
)
91 ppod
->addr
[j
] = pidx
< gl
->nelem
?
92 cpu_to_be64(gl
->phys_addr
[pidx
]) : 0UL;
94 skb
->priority
= CPL_PRIORITY_CONTROL
;
95 cxgb3_ofld_send(ddp
->tdev
, skb
);
100 static void clear_ddp_map(struct cxgb3i_ddp_info
*ddp
, unsigned int tag
,
101 unsigned int idx
, unsigned int npods
)
103 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
106 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
107 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
110 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
114 ddp
->gl_skb
[idx
] = NULL
;
115 memset((skb
->head
+ sizeof(struct ulp_mem_io
)), 0, PPOD_SIZE
);
116 ulp_mem_io_set_hdr(skb
, pm_addr
);
117 skb
->priority
= CPL_PRIORITY_CONTROL
;
118 cxgb3_ofld_send(ddp
->tdev
, skb
);
122 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info
*ddp
,
123 int start
, int max
, int count
,
124 struct cxgb3i_gather_list
*gl
)
128 spin_lock(&ddp
->map_lock
);
129 for (i
= start
; i
<= max
;) {
130 for (j
= 0; j
< count
; j
++) {
131 if (ddp
->gl_map
[i
+ j
])
135 for (j
= 0; j
< count
; j
++)
136 ddp
->gl_map
[i
+ j
] = gl
;
137 spin_unlock(&ddp
->map_lock
);
142 spin_unlock(&ddp
->map_lock
);
146 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info
*ddp
,
147 int start
, int count
)
149 spin_lock(&ddp
->map_lock
);
150 memset(&ddp
->gl_map
[start
], 0,
151 count
* sizeof(struct cxgb3i_gather_list
*));
152 spin_unlock(&ddp
->map_lock
);
155 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info
*ddp
,
160 for (i
= 0; i
< count
; i
++, idx
++)
161 if (ddp
->gl_skb
[idx
]) {
162 kfree_skb(ddp
->gl_skb
[idx
]);
163 ddp
->gl_skb
[idx
] = NULL
;
167 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info
*ddp
, int idx
,
168 int count
, gfp_t gfp
)
172 for (i
= 0; i
< count
; i
++) {
173 struct sk_buff
*skb
= alloc_skb(sizeof(struct ulp_mem_io
) +
176 ddp
->gl_skb
[idx
+ i
] = skb
;
177 skb_put(skb
, sizeof(struct ulp_mem_io
) + PPOD_SIZE
);
179 ddp_free_gl_skb(ddp
, idx
, i
);
187 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
189 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
191 int cxgb3i_ddp_find_page_index(unsigned long pgsz
)
195 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
196 if (pgsz
== (1UL << ddp_page_shift
[i
]))
199 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz
);
200 return DDP_PGIDX_MAX
;
203 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
204 struct cxgb3i_gather_list
*gl
)
208 for (i
= 0; i
< gl
->nelem
; i
++)
209 pci_unmap_page(pdev
, gl
->phys_addr
[i
], PAGE_SIZE
,
213 static inline int ddp_gl_map(struct pci_dev
*pdev
,
214 struct cxgb3i_gather_list
*gl
)
218 for (i
= 0; i
< gl
->nelem
; i
++) {
219 gl
->phys_addr
[i
] = pci_map_page(pdev
, gl
->pages
[i
], 0,
222 if (unlikely(pci_dma_mapping_error(pdev
, gl
->phys_addr
[i
])))
230 unsigned int nelem
= gl
->nelem
;
233 ddp_gl_unmap(pdev
, gl
);
240 * cxgb3i_ddp_make_gl - build ddp page buffer list
241 * @xferlen: total buffer length
242 * @sgl: page buffer scatter-gather list
243 * @sgcnt: # of page buffers
244 * @pdev: pci_dev, used for pci map
245 * @gfp: allocation mode
247 * construct a ddp page buffer list from the scsi scattergather list.
248 * coalesce buffers as much as possible, and obtain dma addresses for
251 * Return the cxgb3i_gather_list constructed from the page buffers if the
252 * memory can be used for ddp. Return NULL otherwise.
254 struct cxgb3i_gather_list
*cxgb3i_ddp_make_gl(unsigned int xferlen
,
255 struct scatterlist
*sgl
,
257 struct pci_dev
*pdev
,
260 struct cxgb3i_gather_list
*gl
;
261 struct scatterlist
*sg
= sgl
;
262 struct page
*sgpage
= sg_page(sg
);
263 unsigned int sglen
= sg
->length
;
264 unsigned int sgoffset
= sg
->offset
;
265 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
269 if (xferlen
< DDP_THRESHOLD
) {
270 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
271 xferlen
, DDP_THRESHOLD
);
275 gl
= kzalloc(sizeof(struct cxgb3i_gather_list
) +
276 npages
* (sizeof(dma_addr_t
) + sizeof(struct page
*)),
281 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
282 gl
->length
= xferlen
;
283 gl
->offset
= sgoffset
;
284 gl
->pages
[0] = sgpage
;
288 struct page
*page
= sg_page(sg
);
290 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
293 /* make sure the sgl is fit for ddp:
294 * each has the same page size, and
295 * all of the middle pages are used completely
297 if ((j
&& sgoffset
) ||
299 ((sglen
+ sgoffset
) & ~PAGE_MASK
)))
303 if (j
== gl
->nelem
|| sg
->offset
)
307 sgoffset
= sg
->offset
;
315 if (ddp_gl_map(pdev
, gl
) < 0)
326 * cxgb3i_ddp_release_gl - release a page buffer list
327 * @gl: a ddp page buffer list
328 * @pdev: pci_dev used for pci_unmap
329 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
331 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list
*gl
,
332 struct pci_dev
*pdev
)
334 ddp_gl_unmap(pdev
, gl
);
339 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
340 * @tdev: t3cdev adapter
341 * @tid: connection id
342 * @tformat: tag format
343 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
344 * @gl: the page momory list
345 * @gfp: allocation mode
347 * ddp setup for a given page buffer list and construct the ddp tag.
348 * return 0 if success, < 0 otherwise.
350 int cxgb3i_ddp_tag_reserve(struct t3cdev
*tdev
, unsigned int tid
,
351 struct cxgb3i_tag_format
*tformat
, u32
*tagp
,
352 struct cxgb3i_gather_list
*gl
, gfp_t gfp
)
354 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
355 struct pagepod_hdr hdr
;
357 int idx
= -1, idx_max
;
362 if (page_idx
>= DDP_PGIDX_MAX
|| !ddp
|| !gl
|| !gl
->nelem
||
363 gl
->length
< DDP_THRESHOLD
) {
364 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
365 page_idx
, gl
->length
, DDP_THRESHOLD
);
369 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
370 idx_max
= ddp
->nppods
- npods
+ 1;
372 if (ddp
->idx_last
== ddp
->nppods
)
373 idx
= ddp_find_unused_entries(ddp
, 0, idx_max
, npods
, gl
);
375 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
377 if (idx
< 0 && ddp
->idx_last
>= npods
)
378 idx
= ddp_find_unused_entries(ddp
, 0,
379 ddp
->idx_last
- npods
+ 1,
383 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
384 gl
->length
, gl
->nelem
, npods
);
388 err
= ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
392 tag
= cxgb3i_ddp_tag_base(tformat
, sw_tag
);
393 tag
|= idx
<< PPOD_IDX_SHIFT
;
396 hdr
.vld_tid
= htonl(F_PPOD_VALID
| V_PPOD_TID(tid
));
397 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
398 hdr
.maxoffset
= htonl(gl
->length
);
399 hdr
.pgoffset
= htonl(gl
->offset
);
401 err
= set_ddp_map(ddp
, &hdr
, idx
, npods
, gl
);
406 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
407 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
,
413 ddp_free_gl_skb(ddp
, idx
, npods
);
415 ddp_unmark_entries(ddp
, idx
, npods
);
420 * cxgb3i_ddp_tag_release - release a ddp tag
421 * @tdev: t3cdev adapter
423 * ddp cleanup for a given ddp tag and release all the resources held
425 void cxgb3i_ddp_tag_release(struct t3cdev
*tdev
, u32 tag
)
427 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
431 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag
);
435 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
436 if (idx
< ddp
->nppods
) {
437 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[idx
];
440 if (!gl
|| !gl
->nelem
) {
441 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
442 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
445 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
446 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
448 clear_ddp_map(ddp
, tag
, idx
, npods
);
449 ddp_unmark_entries(ddp
, idx
, npods
);
450 cxgb3i_ddp_release_gl(gl
, ddp
->pdev
);
452 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
453 tag
, idx
, ddp
->nppods
);
456 static int setup_conn_pgidx(struct t3cdev
*tdev
, unsigned int tid
, int pg_idx
,
459 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
461 struct cpl_set_tcb_field
*req
;
462 u64 val
= pg_idx
< DDP_PGIDX_MAX
? pg_idx
: 0;
467 /* set up ulp submode and page size */
468 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
469 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
470 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
471 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
473 req
->word
= htons(31);
474 req
->mask
= cpu_to_be64(0xF0000000);
475 req
->val
= cpu_to_be64(val
<< 28);
476 skb
->priority
= CPL_PRIORITY_CONTROL
;
478 cxgb3_ofld_send(tdev
, skb
);
483 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
484 * @tdev: t3cdev adapter
485 * @tid: connection id
486 * @reply: request reply from h/w
487 * set up the ddp page size based on the host PAGE_SIZE for a connection
490 int cxgb3i_setup_conn_host_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
493 return setup_conn_pgidx(tdev
, tid
, page_idx
, reply
);
497 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
498 * @tdev: t3cdev adapter
499 * @tid: connection id
500 * @reply: request reply from h/w
501 * @pgsz: ddp page size
502 * set up the ddp page size for a connection identified by tid
504 int cxgb3i_setup_conn_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
505 int reply
, unsigned long pgsz
)
507 int pgidx
= cxgb3i_ddp_find_page_index(pgsz
);
509 return setup_conn_pgidx(tdev
, tid
, pgidx
, reply
);
513 * cxgb3i_setup_conn_digest - setup conn. digest setting
514 * @tdev: t3cdev adapter
515 * @tid: connection id
516 * @hcrc: header digest enabled
517 * @dcrc: data digest enabled
518 * @reply: request reply from h/w
519 * set up the iscsi digest settings for a connection identified by tid
521 int cxgb3i_setup_conn_digest(struct t3cdev
*tdev
, unsigned int tid
,
522 int hcrc
, int dcrc
, int reply
)
524 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
526 struct cpl_set_tcb_field
*req
;
527 u64 val
= (hcrc
? 1 : 0) | (dcrc
? 2 : 0);
532 /* set up ulp submode and page size */
533 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
534 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
535 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
536 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
538 req
->word
= htons(31);
539 req
->mask
= cpu_to_be64(0x0F000000);
540 req
->val
= cpu_to_be64(val
<< 24);
541 skb
->priority
= CPL_PRIORITY_CONTROL
;
543 cxgb3_ofld_send(tdev
, skb
);
549 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
550 * @tdev: t3cdev adapter
551 * @tformat: tag format
552 * @txsz: max tx pdu payload size, filled in by this func.
553 * @rxsz: max rx pdu payload size, filled in by this func.
554 * setup the tag format for a given iscsi entity
556 int cxgb3i_adapter_ddp_info(struct t3cdev
*tdev
,
557 struct cxgb3i_tag_format
*tformat
,
558 unsigned int *txsz
, unsigned int *rxsz
)
560 struct cxgb3i_ddp_info
*ddp
;
561 unsigned char idx_bits
;
566 if (!tdev
->ulp_iscsi
)
569 ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
571 idx_bits
= 32 - tformat
->sw_bits
;
572 tformat
->rsvd_bits
= ddp
->idx_bits
;
573 tformat
->rsvd_shift
= PPOD_IDX_SHIFT
;
574 tformat
->rsvd_mask
= (1 << tformat
->rsvd_bits
) - 1;
576 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
577 tformat
->sw_bits
, tformat
->rsvd_bits
,
578 tformat
->rsvd_shift
, tformat
->rsvd_mask
);
580 *txsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
581 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
582 *rxsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
583 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
584 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
585 *txsz
, ddp
->max_txsz
, *rxsz
, ddp
->max_rxsz
);
590 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
591 * @tdev: t3cdev adapter
592 * release all the resource held by the ddp pagepod manager for a given
595 void cxgb3i_ddp_cleanup(struct t3cdev
*tdev
)
598 struct cxgb3i_ddp_info
*ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
600 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev
, ddp
);
603 tdev
->ulp_iscsi
= NULL
;
604 while (i
< ddp
->nppods
) {
605 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[i
];
607 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
609 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
612 ddp_free_gl_skb(ddp
, i
, npods
);
617 cxgb3i_free_big_mem(ddp
);
622 * ddp_init - initialize the cxgb3 adapter's ddp resource
623 * @tdev: t3cdev adapter
624 * initialize the ddp pagepod manager for a given adapter
626 static void ddp_init(struct t3cdev
*tdev
)
628 struct cxgb3i_ddp_info
*ddp
;
629 struct ulp_iscsi_info uinfo
;
630 unsigned int ppmax
, bits
;
633 if (tdev
->ulp_iscsi
) {
634 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
635 tdev
, tdev
->ulp_iscsi
);
639 err
= tdev
->ctl(tdev
, ULP_ISCSI_GET_PARAMS
, &uinfo
);
641 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
646 ppmax
= (uinfo
.ulimit
- uinfo
.llimit
+ 1) >> PPOD_SIZE_SHIFT
;
647 bits
= __ilog2_u32(ppmax
) + 1;
648 if (bits
> PPOD_IDX_MAX_SIZE
)
649 bits
= PPOD_IDX_MAX_SIZE
;
650 ppmax
= (1 << (bits
- 1)) - 1;
652 ddp
= cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info
) +
654 (sizeof(struct cxgb3i_gather_list
*) +
655 sizeof(struct sk_buff
*)),
658 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
662 ddp
->gl_map
= (struct cxgb3i_gather_list
**)(ddp
+ 1);
663 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
665 sizeof(struct cxgb3i_gather_list
*));
666 spin_lock_init(&ddp
->map_lock
);
669 ddp
->pdev
= uinfo
.pdev
;
670 ddp
->max_txsz
= min_t(unsigned int, uinfo
.max_txsz
, ULP2_MAX_PKT_SIZE
);
671 ddp
->max_rxsz
= min_t(unsigned int, uinfo
.max_rxsz
, ULP2_MAX_PKT_SIZE
);
672 ddp
->llimit
= uinfo
.llimit
;
673 ddp
->ulimit
= uinfo
.ulimit
;
675 ddp
->idx_last
= ppmax
;
676 ddp
->idx_bits
= bits
;
677 ddp
->idx_mask
= (1 << bits
) - 1;
678 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
680 uinfo
.tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
681 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
682 uinfo
.pgsz_factor
[i
] = ddp_page_order
[i
];
683 uinfo
.ulimit
= uinfo
.llimit
+ (ppmax
<< PPOD_SIZE_SHIFT
);
685 err
= tdev
->ctl(tdev
, ULP_ISCSI_SET_PARAMS
, &uinfo
);
687 ddp_log_warn("%s unable to set iscsi param err=%d, "
688 "ddp disabled.\n", tdev
->name
, err
);
692 tdev
->ulp_iscsi
= ddp
;
694 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
696 tdev
, ppmax
, ddp
->idx_bits
, ddp
->idx_mask
,
697 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, uinfo
.max_txsz
,
698 ddp
->max_rxsz
, uinfo
.max_rxsz
);
702 cxgb3i_free_big_mem(ddp
);
706 * cxgb3i_ddp_init - initialize ddp functions
708 void cxgb3i_ddp_init(struct t3cdev
*tdev
)
710 if (page_idx
== DDP_PGIDX_MAX
) {
711 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
712 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
713 PAGE_SIZE
, page_idx
);
This page took 0.062007 seconds and 6 git commands to generate.