Commit | Line | Data |
---|---|---|
ec16227e AG |
1 | #ifndef _RDS_IB_H |
2 | #define _RDS_IB_H | |
3 | ||
4 | #include <rdma/ib_verbs.h> | |
5 | #include <rdma/rdma_cm.h> | |
e4c52c98 AG |
6 | #include <linux/pci.h> |
7 | #include <linux/slab.h> | |
ec16227e AG |
8 | #include "rds.h" |
9 | #include "rdma_transport.h" | |
10 | ||
11 | #define RDS_FMR_SIZE 256 | |
12 | #define RDS_FMR_POOL_SIZE 4096 | |
13 | ||
14 | #define RDS_IB_MAX_SGE 8 | |
15 | #define RDS_IB_RECV_SGE 2 | |
16 | ||
17 | #define RDS_IB_DEFAULT_RECV_WR 1024 | |
18 | #define RDS_IB_DEFAULT_SEND_WR 256 | |
19 | ||
3ba23ade AG |
20 | #define RDS_IB_DEFAULT_RETRY_COUNT 2 |
21 | ||
ec16227e AG |
22 | #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ |
23 | ||
24 | extern struct list_head rds_ib_devices; | |
25 | ||
26 | /* | |
27 | * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to | |
28 | * try and minimize the amount of memory tied up both the device and | |
29 | * socket receive queues. | |
30 | */ | |
31 | /* page offset of the final full frag that fits in the page */ | |
32 | #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE) | |
33 | struct rds_page_frag { | |
34 | struct list_head f_item; | |
35 | struct page *f_page; | |
36 | unsigned long f_offset; | |
37 | dma_addr_t f_mapped; | |
38 | }; | |
39 | ||
40 | struct rds_ib_incoming { | |
41 | struct list_head ii_frags; | |
42 | struct rds_incoming ii_inc; | |
43 | }; | |
44 | ||
45 | struct rds_ib_connect_private { | |
46 | /* Add new fields at the end, and don't permute existing fields. */ | |
47 | __be32 dp_saddr; | |
48 | __be32 dp_daddr; | |
49 | u8 dp_protocol_major; | |
50 | u8 dp_protocol_minor; | |
51 | __be16 dp_protocol_minor_mask; /* bitmask */ | |
52 | __be32 dp_reserved1; | |
53 | __be64 dp_ack_seq; | |
54 | __be32 dp_credit; /* non-zero enables flow ctl */ | |
55 | }; | |
56 | ||
57 | struct rds_ib_send_work { | |
ff3d7d36 | 58 | void *s_op; |
ec16227e AG |
59 | struct ib_send_wr s_wr; |
60 | struct ib_sge s_sge[RDS_IB_MAX_SGE]; | |
61 | unsigned long s_queued; | |
62 | }; | |
63 | ||
64 | struct rds_ib_recv_work { | |
65 | struct rds_ib_incoming *r_ibinc; | |
66 | struct rds_page_frag *r_frag; | |
67 | struct ib_recv_wr r_wr; | |
68 | struct ib_sge r_sge[2]; | |
69 | }; | |
70 | ||
71 | struct rds_ib_work_ring { | |
72 | u32 w_nr; | |
73 | u32 w_alloc_ptr; | |
74 | u32 w_alloc_ctr; | |
75 | u32 w_free_ptr; | |
76 | atomic_t w_free_ctr; | |
77 | }; | |
78 | ||
79 | struct rds_ib_device; | |
80 | ||
81 | struct rds_ib_connection { | |
82 | ||
83 | struct list_head ib_node; | |
84 | struct rds_ib_device *rds_ibdev; | |
85 | struct rds_connection *conn; | |
86 | ||
87 | /* alphabet soup, IBTA style */ | |
88 | struct rdma_cm_id *i_cm_id; | |
89 | struct ib_pd *i_pd; | |
90 | struct ib_mr *i_mr; | |
91 | struct ib_cq *i_send_cq; | |
92 | struct ib_cq *i_recv_cq; | |
93 | ||
94 | /* tx */ | |
95 | struct rds_ib_work_ring i_send_ring; | |
ff3d7d36 | 96 | struct rm_data_op *i_data_op; |
ec16227e AG |
97 | struct rds_header *i_send_hdrs; |
98 | u64 i_send_hdrs_dma; | |
99 | struct rds_ib_send_work *i_sends; | |
100 | ||
101 | /* rx */ | |
d521b63b | 102 | struct tasklet_struct i_recv_tasklet; |
ec16227e AG |
103 | struct mutex i_recv_mutex; |
104 | struct rds_ib_work_ring i_recv_ring; | |
105 | struct rds_ib_incoming *i_ibinc; | |
106 | u32 i_recv_data_rem; | |
107 | struct rds_header *i_recv_hdrs; | |
108 | u64 i_recv_hdrs_dma; | |
109 | struct rds_ib_recv_work *i_recvs; | |
110 | struct rds_page_frag i_frag; | |
111 | u64 i_ack_recv; /* last ACK received */ | |
112 | ||
113 | /* sending acks */ | |
114 | unsigned long i_ack_flags; | |
8cbd9606 AG |
115 | #ifdef KERNEL_HAS_ATOMIC64 |
116 | atomic64_t i_ack_next; /* next ACK to send */ | |
117 | #else | |
118 | spinlock_t i_ack_lock; /* protect i_ack_next */ | |
ec16227e | 119 | u64 i_ack_next; /* next ACK to send */ |
8cbd9606 | 120 | #endif |
ec16227e AG |
121 | struct rds_header *i_ack; |
122 | struct ib_send_wr i_ack_wr; | |
123 | struct ib_sge i_ack_sge; | |
124 | u64 i_ack_dma; | |
125 | unsigned long i_ack_queued; | |
126 | ||
127 | /* Flow control related information | |
128 | * | |
129 | * Our algorithm uses a pair variables that we need to access | |
130 | * atomically - one for the send credits, and one posted | |
131 | * recv credits we need to transfer to remote. | |
132 | * Rather than protect them using a slow spinlock, we put both into | |
133 | * a single atomic_t and update it using cmpxchg | |
134 | */ | |
135 | atomic_t i_credits; | |
136 | ||
137 | /* Protocol version specific information */ | |
138 | unsigned int i_flowctl:1; /* enable/disable flow ctl */ | |
139 | ||
140 | /* Batched completions */ | |
141 | unsigned int i_unsignaled_wrs; | |
ec16227e AG |
142 | }; |
143 | ||
144 | /* This assumes that atomic_t is at least 32 bits */ | |
145 | #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff) | |
146 | #define IB_GET_POST_CREDITS(v) ((v) >> 16) | |
147 | #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff) | |
148 | #define IB_SET_POST_CREDITS(v) ((v) << 16) | |
149 | ||
150 | struct rds_ib_ipaddr { | |
151 | struct list_head list; | |
152 | __be32 ipaddr; | |
153 | }; | |
154 | ||
155 | struct rds_ib_device { | |
156 | struct list_head list; | |
157 | struct list_head ipaddr_list; | |
158 | struct list_head conn_list; | |
159 | struct ib_device *dev; | |
160 | struct ib_pd *pd; | |
161 | struct ib_mr *mr; | |
162 | struct rds_ib_mr_pool *mr_pool; | |
ec16227e AG |
163 | unsigned int fmr_max_remaps; |
164 | unsigned int max_fmrs; | |
165 | int max_sge; | |
166 | unsigned int max_wrs; | |
40589e74 AG |
167 | unsigned int max_initiator_depth; |
168 | unsigned int max_responder_resources; | |
ec16227e AG |
169 | spinlock_t spinlock; /* protect the above */ |
170 | }; | |
171 | ||
e4c52c98 AG |
172 | #define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus) |
173 | #define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device)) | |
174 | #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) | |
175 | ||
ec16227e AG |
176 | /* bits for i_ack_flags */ |
177 | #define IB_ACK_IN_FLIGHT 0 | |
178 | #define IB_ACK_REQUESTED 1 | |
179 | ||
180 | /* Magic WR_ID for ACKs */ | |
181 | #define RDS_IB_ACK_WR_ID (~(u64) 0) | |
182 | ||
183 | struct rds_ib_statistics { | |
184 | uint64_t s_ib_connect_raced; | |
185 | uint64_t s_ib_listen_closed_stale; | |
186 | uint64_t s_ib_tx_cq_call; | |
187 | uint64_t s_ib_tx_cq_event; | |
188 | uint64_t s_ib_tx_ring_full; | |
189 | uint64_t s_ib_tx_throttle; | |
190 | uint64_t s_ib_tx_sg_mapping_failure; | |
191 | uint64_t s_ib_tx_stalled; | |
192 | uint64_t s_ib_tx_credit_updates; | |
193 | uint64_t s_ib_rx_cq_call; | |
194 | uint64_t s_ib_rx_cq_event; | |
195 | uint64_t s_ib_rx_ring_empty; | |
196 | uint64_t s_ib_rx_refill_from_cq; | |
197 | uint64_t s_ib_rx_refill_from_thread; | |
198 | uint64_t s_ib_rx_alloc_limit; | |
199 | uint64_t s_ib_rx_credit_updates; | |
200 | uint64_t s_ib_ack_sent; | |
201 | uint64_t s_ib_ack_send_failure; | |
202 | uint64_t s_ib_ack_send_delayed; | |
203 | uint64_t s_ib_ack_send_piggybacked; | |
204 | uint64_t s_ib_ack_received; | |
205 | uint64_t s_ib_rdma_mr_alloc; | |
206 | uint64_t s_ib_rdma_mr_free; | |
207 | uint64_t s_ib_rdma_mr_used; | |
208 | uint64_t s_ib_rdma_mr_pool_flush; | |
209 | uint64_t s_ib_rdma_mr_pool_wait; | |
210 | uint64_t s_ib_rdma_mr_pool_depleted; | |
51e2cba8 AG |
211 | uint64_t s_ib_atomic_cswp; |
212 | uint64_t s_ib_atomic_fadd; | |
ec16227e AG |
213 | }; |
214 | ||
215 | extern struct workqueue_struct *rds_ib_wq; | |
216 | ||
217 | /* | |
218 | * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h | |
219 | * doesn't define it. | |
220 | */ | |
221 | static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, | |
222 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | |
223 | { | |
224 | unsigned int i; | |
225 | ||
226 | for (i = 0; i < sg_dma_len; ++i) { | |
227 | ib_dma_sync_single_for_cpu(dev, | |
228 | ib_sg_dma_address(dev, &sg[i]), | |
229 | ib_sg_dma_len(dev, &sg[i]), | |
230 | direction); | |
231 | } | |
232 | } | |
233 | #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu | |
234 | ||
235 | static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev, | |
236 | struct scatterlist *sg, unsigned int sg_dma_len, int direction) | |
237 | { | |
238 | unsigned int i; | |
239 | ||
240 | for (i = 0; i < sg_dma_len; ++i) { | |
241 | ib_dma_sync_single_for_device(dev, | |
242 | ib_sg_dma_address(dev, &sg[i]), | |
243 | ib_sg_dma_len(dev, &sg[i]), | |
244 | direction); | |
245 | } | |
246 | } | |
247 | #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device | |
248 | ||
249 | ||
250 | /* ib.c */ | |
251 | extern struct rds_transport rds_ib_transport; | |
252 | extern void rds_ib_add_one(struct ib_device *device); | |
253 | extern void rds_ib_remove_one(struct ib_device *device); | |
254 | extern struct ib_client rds_ib_client; | |
255 | ||
256 | extern unsigned int fmr_pool_size; | |
257 | extern unsigned int fmr_message_size; | |
3ba23ade | 258 | extern unsigned int rds_ib_retry_count; |
ec16227e AG |
259 | |
260 | extern spinlock_t ib_nodev_conns_lock; | |
261 | extern struct list_head ib_nodev_conns; | |
262 | ||
263 | /* ib_cm.c */ | |
264 | int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); | |
265 | void rds_ib_conn_free(void *arg); | |
266 | int rds_ib_conn_connect(struct rds_connection *conn); | |
267 | void rds_ib_conn_shutdown(struct rds_connection *conn); | |
268 | void rds_ib_state_change(struct sock *sk); | |
269 | int __init rds_ib_listen_init(void); | |
270 | void rds_ib_listen_stop(void); | |
271 | void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); | |
272 | int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |
273 | struct rdma_cm_event *event); | |
274 | int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id); | |
275 | void rds_ib_cm_connect_complete(struct rds_connection *conn, | |
276 | struct rdma_cm_event *event); | |
277 | ||
278 | ||
279 | #define rds_ib_conn_error(conn, fmt...) \ | |
280 | __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) | |
281 | ||
282 | /* ib_rdma.c */ | |
283 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); | |
745cbcca AG |
284 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); |
285 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); | |
286 | void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock); | |
287 | static inline void rds_ib_destroy_nodev_conns(void) | |
288 | { | |
289 | __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock); | |
290 | } | |
291 | static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev) | |
292 | { | |
293 | __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock); | |
294 | } | |
ec16227e AG |
295 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *); |
296 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); | |
297 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); | |
298 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |
299 | struct rds_sock *rs, u32 *key_ret); | |
300 | void rds_ib_sync_mr(void *trans_private, int dir); | |
301 | void rds_ib_free_mr(void *trans_private, int invalidate); | |
302 | void rds_ib_flush_mrs(void); | |
303 | ||
304 | /* ib_recv.c */ | |
305 | int __init rds_ib_recv_init(void); | |
306 | void rds_ib_recv_exit(void); | |
307 | int rds_ib_recv(struct rds_connection *conn); | |
f17a1a55 | 308 | int rds_ib_recv_refill(struct rds_connection *conn, int prefill); |
ec16227e AG |
309 | void rds_ib_inc_free(struct rds_incoming *inc); |
310 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | |
311 | size_t size); | |
312 | void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); | |
d521b63b | 313 | void rds_ib_recv_tasklet_fn(unsigned long data); |
ec16227e AG |
314 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic); |
315 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); | |
316 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic); | |
317 | void rds_ib_attempt_ack(struct rds_ib_connection *ic); | |
318 | void rds_ib_ack_send_complete(struct rds_ib_connection *ic); | |
319 | u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); | |
320 | ||
321 | /* ib_ring.c */ | |
322 | void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr); | |
323 | void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr); | |
324 | u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos); | |
325 | void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val); | |
326 | void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val); | |
327 | int rds_ib_ring_empty(struct rds_ib_work_ring *ring); | |
328 | int rds_ib_ring_low(struct rds_ib_work_ring *ring); | |
329 | u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring); | |
330 | u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest); | |
331 | extern wait_queue_head_t rds_ib_ring_empty_wait; | |
332 | ||
333 | /* ib_send.c */ | |
334 | void rds_ib_xmit_complete(struct rds_connection *conn); | |
335 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |
336 | unsigned int hdr_off, unsigned int sg, unsigned int off); | |
337 | void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); | |
338 | void rds_ib_send_init_ring(struct rds_ib_connection *ic); | |
339 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic); | |
f8b3aaf2 | 340 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op); |
ec16227e AG |
341 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); |
342 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); | |
343 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, | |
7b70d033 | 344 | u32 *adv_credits, int need_posted, int max_posted); |
ff3d7d36 | 345 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
ec16227e AG |
346 | |
347 | /* ib_stats.c */ | |
348 | DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); | |
349 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) | |
350 | unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, | |
351 | unsigned int avail); | |
352 | ||
353 | /* ib_sysctl.c */ | |
354 | int __init rds_ib_sysctl_init(void); | |
355 | void rds_ib_sysctl_exit(void); | |
356 | extern unsigned long rds_ib_sysctl_max_send_wr; | |
357 | extern unsigned long rds_ib_sysctl_max_recv_wr; | |
358 | extern unsigned long rds_ib_sysctl_max_unsig_wrs; | |
359 | extern unsigned long rds_ib_sysctl_max_unsig_bytes; | |
360 | extern unsigned long rds_ib_sysctl_max_recv_allocation; | |
361 | extern unsigned int rds_ib_sysctl_flow_control; | |
362 | extern ctl_table rds_ib_sysctl_table[]; | |
363 | ||
ec16227e | 364 | #endif |