Commit | Line | Data |
---|---|---|
a0ce85f5 CL |
1 | /* |
2 | * Copyright (c) 2015 Oracle. All rights reserved. | |
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
4 | */ | |
5 | ||
6 | /* No-op chunk preparation. All client memory is pre-registered. | |
7 | * Sometimes referred to as ALLPHYSICAL mode. | |
8 | * | |
9 | * Physical registration is simple because all client memory is | |
10 | * pre-registered and never deregistered. This mode is good for | |
11 | * adapter bring up, but is considered not safe: the server is | |
12 | * trusted not to abuse its access to client memory not involved | |
13 | * in RDMA I/O. | |
14 | */ | |
15 | ||
16 | #include "xprt_rdma.h" | |
17 | ||
18 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
19 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
20 | #endif | |
21 | ||
3968cb58 CL |
22 | static int |
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |
24 | struct rpcrdma_create_data_internal *cdata) | |
25 | { | |
d1ed857e CL |
26 | struct ib_mr *mr; |
27 | ||
28 | /* Obtain an rkey to use for RPC data payloads. | |
29 | */ | |
30 | mr = ib_get_dma_mr(ia->ri_pd, | |
31 | IB_ACCESS_LOCAL_WRITE | | |
32 | IB_ACCESS_REMOTE_WRITE | | |
33 | IB_ACCESS_REMOTE_READ); | |
34 | if (IS_ERR(mr)) { | |
35 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | |
36 | __func__, PTR_ERR(mr)); | |
37 | return -ENOMEM; | |
38 | } | |
bb6c96d7 | 39 | ia->ri_dma_mr = mr; |
302d3deb CL |
40 | |
41 | rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int, | |
42 | RPCRDMA_MAX_DATA_SEGS, | |
43 | RPCRDMA_MAX_HDR_SEGS)); | |
3968cb58 CL |
44 | return 0; |
45 | } | |
46 | ||
1c9351ee CL |
47 | /* PHYSICAL memory registration conveys one page per chunk segment. |
48 | */ | |
49 | static size_t | |
50 | physical_op_maxpages(struct rpcrdma_xprt *r_xprt) | |
51 | { | |
52 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
94931746 | 53 | RPCRDMA_MAX_HDR_SEGS); |
1c9351ee CL |
54 | } |
55 | ||
91e70e70 CL |
56 | static int |
57 | physical_op_init(struct rpcrdma_xprt *r_xprt) | |
58 | { | |
59 | return 0; | |
60 | } | |
61 | ||
9c1b4d77 CL |
62 | /* The client's physical memory is already exposed for |
63 | * remote access via RDMA READ or RDMA WRITE. | |
64 | */ | |
65 | static int | |
66 | physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |
67 | int nsegs, bool writing) | |
68 | { | |
69 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
70 | ||
89e0d112 | 71 | rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing)); |
d1ed857e | 72 | seg->mr_rkey = ia->ri_dma_mr->rkey; |
9c1b4d77 | 73 | seg->mr_base = seg->mr_dma; |
9c1b4d77 CL |
74 | return 1; |
75 | } | |
76 | ||
73eee9b2 CL |
77 | /* DMA unmap all memory regions that were mapped for "req". |
78 | */ | |
79 | static void | |
80 | physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |
81 | { | |
82 | struct ib_device *device = r_xprt->rx_ia.ri_device; | |
83 | unsigned int i; | |
84 | ||
85 | for (i = 0; req->rl_nchunks; --req->rl_nchunks) | |
86 | rpcrdma_unmap_one(device, &req->rl_segments[i++]); | |
87 | } | |
88 | ||
ead3f26e CL |
89 | /* Use a slow, safe mechanism to invalidate all memory regions |
90 | * that were registered for "req". | |
91 | * | |
92 | * For physical memory registration, there is no good way to | |
93 | * fence a single MR that has been advertised to the server. The | |
94 | * client has already handed the server an R_key that cannot be | |
95 | * invalidated and is shared by all MRs on this connection. | |
96 | * Tearing down the PD might be the only safe choice, but it's | |
97 | * not clear that a freshly acquired DMA R_key would be different | |
98 | * than the one used by the PD that was just destroyed. | |
99 | * FIXME. | |
100 | */ | |
101 | static void | |
102 | physical_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |
103 | bool sync) | |
104 | { | |
105 | physical_op_unmap_sync(r_xprt, req); | |
106 | } | |
107 | ||
4561f347 CL |
108 | static void |
109 | physical_op_destroy(struct rpcrdma_buffer *buf) | |
110 | { | |
111 | } | |
112 | ||
a0ce85f5 | 113 | const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { |
9c1b4d77 | 114 | .ro_map = physical_op_map, |
73eee9b2 | 115 | .ro_unmap_sync = physical_op_unmap_sync, |
ead3f26e | 116 | .ro_unmap_safe = physical_op_unmap_safe, |
3968cb58 | 117 | .ro_open = physical_op_open, |
1c9351ee | 118 | .ro_maxpages = physical_op_maxpages, |
91e70e70 | 119 | .ro_init = physical_op_init, |
4561f347 | 120 | .ro_destroy = physical_op_destroy, |
a0ce85f5 CL |
121 | .ro_displayname = "physical", |
122 | }; |