svcrdma: Add a separate "max data segs macro for svcrdma
[deliverable/linux.git] / include / linux / sunrpc / svc_rdma.h
CommitLineData
d21b05f1
TT
1/*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Author: Tom Tucker <tom@opengridcomputing.com>
40 */
41
42#ifndef SVC_RDMA_H
43#define SVC_RDMA_H
44#include <linux/sunrpc/xdr.h>
45#include <linux/sunrpc/svcsock.h>
46#include <linux/sunrpc/rpc_rdma.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
49#define SVCRDMA_DEBUG
50
51/* RPC/RDMA parameters and stats */
52extern unsigned int svcrdma_ord;
53extern unsigned int svcrdma_max_requests;
54extern unsigned int svcrdma_max_req_size;
55
56extern atomic_t rdma_stat_recv;
57extern atomic_t rdma_stat_read;
58extern atomic_t rdma_stat_write;
59extern atomic_t rdma_stat_sq_starve;
60extern atomic_t rdma_stat_rq_starve;
61extern atomic_t rdma_stat_rq_poll;
62extern atomic_t rdma_stat_rq_prod;
63extern atomic_t rdma_stat_sq_poll;
64extern atomic_t rdma_stat_sq_prod;
65
d21b05f1
TT
66/*
67 * Contexts are built when an RDMA request is created and are a
68 * record of the resources that can be recovered when the request
69 * completes.
70 */
71struct svc_rdma_op_ctxt {
02e7452d 72 struct svc_rdma_op_ctxt *read_hdr;
0d3ebb9a 73 struct svc_rdma_fastreg_mr *frmr;
f820c57e 74 int hdr_count;
d21b05f1
TT
75 struct xdr_buf arg;
76 struct list_head dto_q;
77 enum ib_wr_opcode wr_op;
78 enum ib_wc_status wc_status;
79 u32 byte_len;
0b056c22 80 u32 position;
d21b05f1
TT
81 struct svcxprt_rdma *xprt;
82 unsigned long flags;
83 enum dma_data_direction direction;
84 int count;
85 struct ib_sge sge[RPCSVC_MAXPAGES];
86 struct page *pages[RPCSVC_MAXPAGES];
87};
88
ab96dddb
TT
89/*
90 * NFS_ requests are mapped on the client side by the chunk lists in
91 * the RPCRDMA header. During the fetching of the RPC from the client
92 * and the writing of the reply to the client, the memory in the
93 * client and the memory in the server must be mapped as contiguous
94 * vaddr/len for access by the hardware. These data strucures keep
95 * these mappings.
96 *
97 * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
98 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
99 * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
100 * mapping of the reply.
101 */
102struct svc_rdma_chunk_sge {
103 int start; /* sge no for this chunk */
104 int count; /* sge count for this chunk */
105};
0d3ebb9a
TT
106struct svc_rdma_fastreg_mr {
107 struct ib_mr *mr;
108 void *kva;
109 struct ib_fast_reg_page_list *page_list;
110 int page_list_len;
111 unsigned long access_flags;
112 unsigned long map_len;
113 enum dma_data_direction direction;
114 struct list_head frmr_list;
115};
ab96dddb
TT
116struct svc_rdma_req_map {
117 unsigned long count;
118 union {
119 struct kvec sge[RPCSVC_MAXPAGES];
120 struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
0bf48289 121 unsigned long lkey[RPCSVC_MAXPAGES];
ab96dddb
TT
122 };
123};
d21b05f1
TT
124#define RDMACTXT_F_LAST_CTXT 2
125
0d3ebb9a
TT
126#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
127#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */
128
d21b05f1
TT
129struct svcxprt_rdma {
130 struct svc_xprt sc_xprt; /* SVC transport structure */
131 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
132 struct list_head sc_accept_q; /* Conn. waiting accept */
133 int sc_ord; /* RDMA read limit */
d21b05f1
TT
134 int sc_max_sge;
135
136 int sc_sq_depth; /* Depth of SQ */
137 atomic_t sc_sq_count; /* Number of SQ WR on queue */
138
139 int sc_max_requests; /* Depth of RQ */
140 int sc_max_req_size; /* Size of each RQ WR buf */
141
142 struct ib_pd *sc_pd;
143
87295b6c 144 atomic_t sc_dma_used;
87407673 145 atomic_t sc_ctxt_used;
d21b05f1
TT
146 struct list_head sc_rq_dto_q;
147 spinlock_t sc_rq_dto_lock;
148 struct ib_qp *sc_qp;
149 struct ib_cq *sc_rq_cq;
150 struct ib_cq *sc_sq_cq;
151 struct ib_mr *sc_phys_mr; /* MR for server memory */
e5452411
CL
152 int (*sc_reader)(struct svcxprt_rdma *,
153 struct svc_rqst *,
154 struct svc_rdma_op_ctxt *,
155 int *, u32 *, u32, u32, u64, bool);
0d3ebb9a
TT
156 u32 sc_dev_caps; /* distilled device caps */
157 u32 sc_dma_lkey; /* local dma key */
158 unsigned int sc_frmr_pg_list_len;
159 struct list_head sc_frmr_q;
160 spinlock_t sc_frmr_q_lock;
d21b05f1
TT
161
162 spinlock_t sc_lock; /* transport lock */
163
164 wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
165 unsigned long sc_flags;
166 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
167 struct list_head sc_read_complete_q;
8da91ea8 168 struct work_struct sc_work;
d21b05f1
TT
169};
170/* sc_flags */
171#define RDMAXPRT_RQ_PENDING 1
172#define RDMAXPRT_SQ_PENDING 2
173#define RDMAXPRT_CONN_PENDING 3
174
0380a3f3
CL
175#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
176#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
177#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
178#else
179#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
180#endif
181
d21b05f1
TT
182#define RPCRDMA_LISTEN_BACKLOG 10
183/* The default ORD value is based on two outstanding full-size writes with a
184 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
185#define RPCRDMA_ORD (64/4)
186#define RPCRDMA_SQ_DEPTH_MULT 8
d9bb5a43 187#define RPCRDMA_MAX_REQUESTS 32
d21b05f1
TT
188#define RPCRDMA_MAX_REQ_SIZE 4096
189
190/* svc_rdma_marshal.c */
d21b05f1 191extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
d21b05f1
TT
192extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
193 struct rpcrdma_msg *,
30b7e246 194 enum rpcrdma_errcode, __be32 *);
d21b05f1
TT
195extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
196extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
197extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
1fa9c444 198 __be32, __be64, u32);
d21b05f1
TT
199extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
200 struct rpcrdma_msg *,
201 struct rpcrdma_msg *,
202 enum rpcrdma_proc);
203extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
204
205/* svc_rdma_recvfrom.c */
206extern int svc_rdma_recvfrom(struct svc_rqst *);
e5452411
CL
207extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
208 struct svc_rdma_op_ctxt *, int *, u32 *,
209 u32, u32, u64, bool);
210extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
211 struct svc_rdma_op_ctxt *, int *, u32 *,
212 u32, u32, u64, bool);
d21b05f1
TT
213
214/* svc_rdma_sendto.c */
215extern int svc_rdma_sendto(struct svc_rqst *);
216
217/* svc_rdma_transport.c */
218extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
008fdbc5
TT
219extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
220 enum rpcrdma_errcode);
d21b05f1
TT
221extern int svc_rdma_post_recv(struct svcxprt_rdma *);
222extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
223extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
224extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
146b6df6 225extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
ab96dddb
TT
226extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
227extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
e1183210 228extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
64be8608
TT
229extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
230extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
231 struct svc_rdma_fastreg_mr *);
d21b05f1
TT
232extern void svc_sq_reap(struct svcxprt_rdma *);
233extern void svc_rq_reap(struct svcxprt_rdma *);
234extern struct svc_xprt_class svc_rdma_class;
235extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
236
237/* svc_rdma.c */
238extern int svc_rdma_init(void);
239extern void svc_rdma_cleanup(void);
240
241/*
242 * Returns the address of the first read chunk or <nul> if no read chunk is
243 * present
244 */
245static inline struct rpcrdma_read_chunk *
246svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
247{
248 struct rpcrdma_read_chunk *ch =
249 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
250
251 if (ch->rc_discrim == 0)
252 return NULL;
253
254 return ch;
255}
256
257/*
258 * Returns the address of the first read write array element or <nul> if no
259 * write array list is present
260 */
261static inline struct rpcrdma_write_array *
262svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
263{
264 if (rmsgp->rm_body.rm_chunks[0] != 0
265 || rmsgp->rm_body.rm_chunks[1] == 0)
266 return NULL;
267
268 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
269}
270
271/*
272 * Returns the address of the first reply array element or <nul> if no
273 * reply array is present
274 */
275static inline struct rpcrdma_write_array *
276svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
277{
278 struct rpcrdma_read_chunk *rch;
279 struct rpcrdma_write_array *wr_ary;
280 struct rpcrdma_write_array *rp_ary;
281
282 /* XXX: Need to fix when reply list may occur with read-list and/or
283 * write list */
284 if (rmsgp->rm_body.rm_chunks[0] != 0 ||
285 rmsgp->rm_body.rm_chunks[1] != 0)
286 return NULL;
287
288 rch = svc_rdma_get_read_chunk(rmsgp);
289 if (rch) {
290 while (rch->rc_discrim)
291 rch++;
292
293 /* The reply list follows an empty write array located
294 * at 'rc_position' here. The reply array is at rc_target.
295 */
296 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
297
298 goto found_it;
299 }
300
301 wr_ary = svc_rdma_get_write_array(rmsgp);
302 if (wr_ary) {
303 rp_ary = (struct rpcrdma_write_array *)
304 &wr_ary->
cec56c8f 305 wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
d21b05f1
TT
306
307 goto found_it;
308 }
309
310 /* No read list, no write list */
311 rp_ary = (struct rpcrdma_write_array *)
312 &rmsgp->rm_body.rm_chunks[2];
313
314 found_it:
315 if (rp_ary->wc_discrim == 0)
316 return NULL;
317
318 return rp_ary;
319}
320#endif
This page took 0.773306 seconds and 5 git commands to generate.