staging: delete non-required instances of include <linux/init.h>
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd.h
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/string.h>
45#include <linux/stat.h>
46#include <linux/errno.h>
47#include <linux/unistd.h>
48#include <linux/uio.h>
49
50#include <asm/uaccess.h>
51#include <asm/io.h>
52
d7e09d03
PT
53#include <linux/fs.h>
54#include <linux/file.h>
d7e09d03
PT
55#include <linux/list.h>
56#include <linux/kmod.h>
57#include <linux/sysctl.h>
58#include <linux/pci.h>
59
60#include <net/sock.h>
61#include <linux/in.h>
62
63#define DEBUG_SUBSYSTEM S_LND
64
65#include <linux/libcfs/libcfs.h>
66#include <linux/lnet/lnet.h>
67#include <linux/lnet/lib-lnet.h>
68#include <linux/lnet/lnet-sysctl.h>
69
70#include <rdma/rdma_cm.h>
71#include <rdma/ib_cm.h>
72#include <rdma/ib_verbs.h>
73#include <rdma/ib_fmr_pool.h>
74
75#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
76/* # scheduler loops before reschedule */
77#define IBLND_RESCHED 100
78
79#define IBLND_N_SCHED 2
80#define IBLND_N_SCHED_HIGH 4
81
82typedef struct
83{
84 int *kib_dev_failover; /* HCA failover */
85 unsigned int *kib_service; /* IB service number */
86 int *kib_min_reconnect_interval; /* first failed connection retry... */
87 int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
88 int *kib_cksum; /* checksum kib_msg_t? */
89 int *kib_timeout; /* comms timeout (seconds) */
90 int *kib_keepalive; /* keepalive timeout (seconds) */
91 int *kib_ntx; /* # tx descs */
92 int *kib_credits; /* # concurrent sends */
93 int *kib_peertxcredits; /* # concurrent sends to 1 peer */
94 int *kib_peerrtrcredits; /* # per-peer router buffer credits */
95 int *kib_peercredits_hiw; /* # when eagerly to return credits */
96 int *kib_peertimeout; /* seconds to consider peer dead */
97 char **kib_default_ipif; /* default IPoIB interface */
98 int *kib_retry_count;
99 int *kib_rnr_retry_count;
100 int *kib_concurrent_sends; /* send work queue sizing */
101 int *kib_ib_mtu; /* IB MTU */
102 int *kib_map_on_demand; /* map-on-demand if RD has more fragments
103 * than this value, 0 disable map-on-demand */
104 int *kib_pmr_pool_size; /* # physical MR in pool */
105 int *kib_fmr_pool_size; /* # FMRs in pool */
106 int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
107 int *kib_fmr_cache; /* enable FMR pool cache? */
d7e09d03
PT
108 int *kib_require_priv_port;/* accept only privileged ports */
109 int *kib_use_priv_port; /* use privileged port for active connect */
110 /* # threads on each CPT */
111 int *kib_nscheds;
112} kib_tunables_t;
113
114extern kib_tunables_t kiblnd_tunables;
115
116#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
117#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
118
119#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
120#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
121
122#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
123 IBLND_MSG_QUEUE_SIZE_V1 : \
124 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
125#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
126 IBLND_CREDIT_HIGHWATER_V1 : \
127 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
128
129#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
130
131static inline int
132kiblnd_concurrent_sends_v1(void)
133{
134 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
135 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
136
137 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
138 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
139
140 return *kiblnd_tunables.kib_concurrent_sends;
141}
142
143#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
144 kiblnd_concurrent_sends_v1() : \
145 *kiblnd_tunables.kib_concurrent_sends)
146/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
147#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
148#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
149
150#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
151#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
152#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
153 *kiblnd_tunables.kib_map_on_demand : \
154 IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
155#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
156 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
157
158/************************/
159/* derived constants... */
160/* Pools (shared by connections on each CPT) */
161/* These pools can grow at runtime, so don't need give a very large value */
162#define IBLND_TX_POOL 256
163#define IBLND_PMR_POOL 256
164#define IBLND_FMR_POOL 256
165#define IBLND_FMR_POOL_FLUSH 192
166
167/* TX messages (shared by all connections) */
168#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
169
170/* RX messages (per connection) */
171#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
172#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
173#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
174
175/* WRs and CQEs (per connection) */
176#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
177#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
178#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
179
180struct kib_hca_dev;
181
182/* o2iblnd can run over aliased interface */
183#ifdef IFALIASZ
184#define KIB_IFNAME_SIZE IFALIASZ
185#else
186#define KIB_IFNAME_SIZE 256
187#endif
188
189typedef struct
190{
191 struct list_head ibd_list; /* chain on kib_devs */
192 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
193 __u32 ibd_ifip; /* IPoIB interface IP */
194 /** IPoIB interface name */
195 char ibd_ifname[KIB_IFNAME_SIZE];
196 int ibd_nnets; /* # nets extant */
197
198 cfs_time_t ibd_next_failover;
199 int ibd_failed_failover; /* # failover failures */
200 unsigned int ibd_failover; /* failover in progress */
201 unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
202 struct list_head ibd_nets;
203 struct kib_hca_dev *ibd_hdev;
204} kib_dev_t;
205
206typedef struct kib_hca_dev
207{
208 struct rdma_cm_id *ibh_cmid; /* listener cmid */
209 struct ib_device *ibh_ibdev; /* IB device */
210 int ibh_page_shift; /* page shift of current HCA */
211 int ibh_page_size; /* page size of current HCA */
212 __u64 ibh_page_mask; /* page mask of current HCA */
213 int ibh_mr_shift; /* bits shift of max MR size */
214 __u64 ibh_mr_size; /* size of MR */
215 int ibh_nmrs; /* # of global MRs */
216 struct ib_mr **ibh_mrs; /* global MR */
217 struct ib_pd *ibh_pd; /* PD */
218 kib_dev_t *ibh_dev; /* owner */
219 atomic_t ibh_ref; /* refcount */
220} kib_hca_dev_t;
221
222/** # of seconds to keep pool alive */
223#define IBLND_POOL_DEADLINE 300
224/** # of seconds to retry if allocation failed */
225#define IBLND_POOL_RETRY 1
226
227typedef struct
228{
229 int ibp_npages; /* # pages */
230 struct page *ibp_pages[0]; /* page array */
231} kib_pages_t;
232
233struct kib_pmr_pool;
234
235typedef struct {
236 struct list_head pmr_list; /* chain node */
237 struct ib_phys_buf *pmr_ipb; /* physical buffer */
238 struct ib_mr *pmr_mr; /* IB MR */
239 struct kib_pmr_pool *pmr_pool; /* owner of this MR */
240 __u64 pmr_iova; /* Virtual I/O address */
241 int pmr_refcount; /* reference count */
242} kib_phys_mr_t;
243
244struct kib_pool;
245struct kib_poolset;
246
247typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
248 int inc, struct kib_pool **pp_po);
249typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
250typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
251typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
252
253struct kib_net;
254
255#define IBLND_POOL_NAME_LEN 32
256
257typedef struct kib_poolset
258{
259 spinlock_t ps_lock; /* serialize */
260 struct kib_net *ps_net; /* network it belongs to */
261 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
262 struct list_head ps_pool_list; /* list of pools */
263 struct list_head ps_failed_pool_list; /* failed pool list */
264 cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
265 int ps_increasing; /* is allocating new pool */
266 int ps_pool_size; /* new pool size */
267 int ps_cpt; /* CPT id */
268
269 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
270 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
271 kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
272 kib_ps_node_fini_t ps_node_fini; /* finalize node */
273} kib_poolset_t;
274
275typedef struct kib_pool
276{
277 struct list_head po_list; /* chain on pool list */
278 struct list_head po_free_list; /* pre-allocated node */
279 kib_poolset_t *po_owner; /* pool_set of this pool */
280 cfs_time_t po_deadline; /* deadline of this pool */
281 int po_allocated; /* # of elements in use */
282 int po_failed; /* pool is created on failed HCA */
283 int po_size; /* # of pre-allocated elements */
284} kib_pool_t;
285
286typedef struct {
287 kib_poolset_t tps_poolset; /* pool-set */
288 __u64 tps_next_tx_cookie; /* cookie of TX */
289} kib_tx_poolset_t;
290
291typedef struct {
292 kib_pool_t tpo_pool; /* pool */
293 struct kib_hca_dev *tpo_hdev; /* device for this pool */
294 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
295 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
296} kib_tx_pool_t;
297
298typedef struct {
299 kib_poolset_t pps_poolset; /* pool-set */
300} kib_pmr_poolset_t;
301
302typedef struct kib_pmr_pool {
303 struct kib_hca_dev *ppo_hdev; /* device for this pool */
304 kib_pool_t ppo_pool; /* pool */
305} kib_pmr_pool_t;
306
307typedef struct
308{
309 spinlock_t fps_lock; /* serialize */
310 struct kib_net *fps_net; /* IB network */
311 struct list_head fps_pool_list; /* FMR pool list */
312 struct list_head fps_failed_pool_list; /* FMR pool list */
313 __u64 fps_version; /* validity stamp */
314 int fps_cpt; /* CPT id */
315 int fps_pool_size;
316 int fps_flush_trigger;
317 /* is allocating new pool */
318 int fps_increasing;
319 /* time stamp for retry if failed to allocate */
320 cfs_time_t fps_next_retry;
321} kib_fmr_poolset_t;
322
323typedef struct
324{
325 struct list_head fpo_list; /* chain on pool list */
326 struct kib_hca_dev *fpo_hdev; /* device for this pool */
327 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
328 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
329 cfs_time_t fpo_deadline; /* deadline of this pool */
330 int fpo_failed; /* fmr pool is failed */
331 int fpo_map_count; /* # of mapped FMR */
332} kib_fmr_pool_t;
333
334typedef struct {
335 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
336 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
337} kib_fmr_t;
338
339typedef struct kib_net
340{
341 struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
342 __u64 ibn_incarnation; /* my epoch */
343 int ibn_init; /* initialisation state */
344 int ibn_shutdown; /* shutting down? */
345
346 atomic_t ibn_npeers; /* # peers extant */
347 atomic_t ibn_nconns; /* # connections extant */
348
349 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
350 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
351 kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
352
353 kib_dev_t *ibn_dev; /* underlying IB device */
354} kib_net_t;
355
356#define KIB_THREAD_SHIFT 16
357#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
358#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
359#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
360
361struct kib_sched_info {
362 /* serialise */
363 spinlock_t ibs_lock;
364 /* schedulers sleep here */
365 wait_queue_head_t ibs_waitq;
366 /* conns to check for rx completions */
367 struct list_head ibs_conns;
368 /* number of scheduler threads */
369 int ibs_nthreads;
370 /* max allowed scheduler threads */
371 int ibs_nthreads_max;
372 int ibs_cpt; /* CPT id */
373};
374
375typedef struct
376{
377 int kib_init; /* initialisation state */
378 int kib_shutdown; /* shut down? */
379 struct list_head kib_devs; /* IB devices extant */
380 /* list head of failed devices */
381 struct list_head kib_failed_devs;
382 /* schedulers sleep here */
383 wait_queue_head_t kib_failover_waitq;
384 atomic_t kib_nthreads; /* # live threads */
385 /* stabilize net/dev/peer/conn ops */
386 rwlock_t kib_global_lock;
387 /* hash table of all my known peers */
388 struct list_head *kib_peers;
389 /* size of kib_peers */
390 int kib_peer_hash_size;
391 /* the connd task (serialisation assertions) */
392 void *kib_connd;
393 /* connections to setup/teardown */
394 struct list_head kib_connd_conns;
395 /* connections with zero refcount */
396 struct list_head kib_connd_zombies;
397 /* connection daemon sleeps here */
398 wait_queue_head_t kib_connd_waitq;
399 spinlock_t kib_connd_lock; /* serialise */
400 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
401 /* percpt data for schedulers */
402 struct kib_sched_info **kib_scheds;
403} kib_data_t;
404
405#define IBLND_INIT_NOTHING 0
406#define IBLND_INIT_DATA 1
407#define IBLND_INIT_ALL 2
408
409/************************************************************************
410 * IB Wire message format.
411 * These are sent in sender's byte order (i.e. receiver flips).
412 */
413
414typedef struct kib_connparams
415{
416 __u16 ibcp_queue_depth;
417 __u16 ibcp_max_frags;
418 __u32 ibcp_max_msg_size;
419} WIRE_ATTR kib_connparams_t;
420
421typedef struct
422{
423 lnet_hdr_t ibim_hdr; /* portals header */
424 char ibim_payload[0]; /* piggy-backed payload */
425} WIRE_ATTR kib_immediate_msg_t;
426
427typedef struct
428{
429 __u32 rf_nob; /* # bytes this frag */
430 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
431} WIRE_ATTR kib_rdma_frag_t;
432
433typedef struct
434{
435 __u32 rd_key; /* local/remote key */
436 __u32 rd_nfrags; /* # fragments */
437 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
438} WIRE_ATTR kib_rdma_desc_t;
439
440typedef struct
441{
442 lnet_hdr_t ibprm_hdr; /* portals header */
443 __u64 ibprm_cookie; /* opaque completion cookie */
444} WIRE_ATTR kib_putreq_msg_t;
445
446typedef struct
447{
448 __u64 ibpam_src_cookie; /* reflected completion cookie */
449 __u64 ibpam_dst_cookie; /* opaque completion cookie */
450 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
451} WIRE_ATTR kib_putack_msg_t;
452
453typedef struct
454{
455 lnet_hdr_t ibgm_hdr; /* portals header */
456 __u64 ibgm_cookie; /* opaque completion cookie */
457 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
458} WIRE_ATTR kib_get_msg_t;
459
460typedef struct
461{
462 __u64 ibcm_cookie; /* opaque completion cookie */
463 __s32 ibcm_status; /* < 0 failure: >= 0 length */
464} WIRE_ATTR kib_completion_msg_t;
465
466typedef struct
467{
468 /* First 2 fields fixed FOR ALL TIME */
469 __u32 ibm_magic; /* I'm an ibnal message */
470 __u16 ibm_version; /* this is my version number */
471
472 __u8 ibm_type; /* msg type */
473 __u8 ibm_credits; /* returned credits */
474 __u32 ibm_nob; /* # bytes in whole message */
475 __u32 ibm_cksum; /* checksum (0 == no checksum) */
476 __u64 ibm_srcnid; /* sender's NID */
477 __u64 ibm_srcstamp; /* sender's incarnation */
478 __u64 ibm_dstnid; /* destination's NID */
479 __u64 ibm_dststamp; /* destination's incarnation */
480
481 union {
482 kib_connparams_t connparams;
483 kib_immediate_msg_t immediate;
484 kib_putreq_msg_t putreq;
485 kib_putack_msg_t putack;
486 kib_get_msg_t get;
487 kib_completion_msg_t completion;
488 } WIRE_ATTR ibm_u;
489} WIRE_ATTR kib_msg_t;
490
491#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
492
493#define IBLND_MSG_VERSION_1 0x11
494#define IBLND_MSG_VERSION_2 0x12
495#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
496
497#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
498#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
499#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
500#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
501#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
502#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
503#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
504#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
505#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
506#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
507
508typedef struct {
509 __u32 ibr_magic; /* sender's magic */
510 __u16 ibr_version; /* sender's version */
511 __u8 ibr_why; /* reject reason */
512 __u8 ibr_padding; /* padding */
513 __u64 ibr_incarnation; /* incarnation of peer */
514 kib_connparams_t ibr_cp; /* connection parameters */
515} WIRE_ATTR kib_rej_t;
516
517/* connection rejection reasons */
518#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
519#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
520#define IBLND_REJECT_FATAL 3 /* Anything else */
521
522#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
523#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
524
525#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
526#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
527
528/***********************************************************************/
529
530typedef struct kib_rx /* receive message */
531{
532 struct list_head rx_list; /* queue for attention */
533 struct kib_conn *rx_conn; /* owning conn */
534 int rx_nob; /* # bytes received (-1 while posted) */
535 enum ib_wc_status rx_status; /* completion status */
536 kib_msg_t *rx_msg; /* message buffer (host vaddr) */
537 __u64 rx_msgaddr; /* message buffer (I/O addr) */
538 DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
539 struct ib_recv_wr rx_wrq; /* receive work item... */
540 struct ib_sge rx_sge; /* ...and its memory */
541} kib_rx_t;
542
543#define IBLND_POSTRX_DONT_POST 0 /* don't post */
544#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
545#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
546#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
547
548typedef struct kib_tx /* transmit message */
549{
550 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
551 kib_tx_pool_t *tx_pool; /* pool I'm from */
552 struct kib_conn *tx_conn; /* owning conn */
553 short tx_sending; /* # tx callbacks outstanding */
554 short tx_queued; /* queued for sending */
555 short tx_waiting; /* waiting for peer */
556 int tx_status; /* LNET completion status */
557 unsigned long tx_deadline; /* completion deadline */
558 __u64 tx_cookie; /* completion cookie */
559 lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
560 kib_msg_t *tx_msg; /* message buffer (host vaddr) */
561 __u64 tx_msgaddr; /* message buffer (I/O addr) */
562 DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
563 int tx_nwrq; /* # send work items */
564 struct ib_send_wr *tx_wrq; /* send work items... */
565 struct ib_sge *tx_sge; /* ...and their memory */
566 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
567 int tx_nfrags; /* # entries in... */
568 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
569 __u64 *tx_pages; /* rdma phys page addrs */
570 union {
571 kib_phys_mr_t *pmr; /* MR for physical buffer */
572 kib_fmr_t fmr; /* FMR */
573 } tx_u;
574 int tx_dmadir; /* dma direction */
575} kib_tx_t;
576
577typedef struct kib_connvars
578{
579 /* connection-in-progress variables */
580 kib_msg_t cv_msg;
581} kib_connvars_t;
582
583typedef struct kib_conn
584{
585 struct kib_sched_info *ibc_sched; /* scheduler information */
586 struct kib_peer *ibc_peer; /* owning peer */
587 kib_hca_dev_t *ibc_hdev; /* HCA bound on */
588 struct list_head ibc_list; /* stash on peer's conn list */
589 struct list_head ibc_sched_list; /* schedule for attention */
590 __u16 ibc_version; /* version of connection */
591 __u64 ibc_incarnation; /* which instance of the peer */
592 atomic_t ibc_refcount; /* # users */
593 int ibc_state; /* what's happening */
594 int ibc_nsends_posted; /* # uncompleted sends */
595 int ibc_noops_posted; /* # uncompleted NOOPs */
596 int ibc_credits; /* # credits I have */
597 int ibc_outstanding_credits; /* # credits to return */
598 int ibc_reserved_credits;/* # ACK/DONE msg credits */
599 int ibc_comms_error; /* set on comms error */
600 unsigned int ibc_nrx:16; /* receive buffers owned */
601 unsigned int ibc_scheduled:1; /* scheduled for attention */
602 unsigned int ibc_ready:1; /* CQ callback fired */
603 /* time of last send */
604 unsigned long ibc_last_send;
605 /** link chain for kiblnd_check_conns only */
606 struct list_head ibc_connd_list;
607 /** rxs completed before ESTABLISHED */
608 struct list_head ibc_early_rxs;
609 /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
610 struct list_head ibc_tx_noops;
611 struct list_head ibc_tx_queue; /* sends that need a credit */
612 struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
613 struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
614 struct list_head ibc_active_txs; /* active tx awaiting completion */
615 spinlock_t ibc_lock; /* serialise */
616 kib_rx_t *ibc_rxs; /* the rx descs */
617 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
618
619 struct rdma_cm_id *ibc_cmid; /* CM id */
620 struct ib_cq *ibc_cq; /* completion queue */
621
622 kib_connvars_t *ibc_connvars; /* in-progress connection state */
623} kib_conn_t;
624
625#define IBLND_CONN_INIT 0 /* being initialised */
626#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
627#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
628#define IBLND_CONN_ESTABLISHED 3 /* connection established */
629#define IBLND_CONN_CLOSING 4 /* being closed */
630#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
631
632typedef struct kib_peer
633{
634 struct list_head ibp_list; /* stash on global peer list */
635 lnet_nid_t ibp_nid; /* who's on the other end(s) */
636 lnet_ni_t *ibp_ni; /* LNet interface */
637 atomic_t ibp_refcount; /* # users */
638 struct list_head ibp_conns; /* all active connections */
639 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
640 __u16 ibp_version; /* version of peer */
641 __u64 ibp_incarnation; /* incarnation of peer */
642 int ibp_connecting; /* current active connection attempts */
643 int ibp_accepting; /* current passive connection attempts */
644 int ibp_error; /* errno on closing this peer */
645 cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
646} kib_peer_t;
647
648extern kib_data_t kiblnd_data;
649
650extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
651
652static inline void
653kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
654{
655 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
656 atomic_inc(&hdev->ibh_ref);
657}
658
659static inline void
660kiblnd_hdev_decref(kib_hca_dev_t *hdev)
661{
662 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
663 if (atomic_dec_and_test(&hdev->ibh_ref))
664 kiblnd_hdev_destroy(hdev);
665}
666
667static inline int
668kiblnd_dev_can_failover(kib_dev_t *dev)
669{
670 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
671 return 0;
672
673 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
674 return 0;
675
676 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
677 return 1;
678
679 return dev->ibd_can_failover;
680}
681
682#define kiblnd_conn_addref(conn) \
683do { \
684 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
685 (conn), atomic_read(&(conn)->ibc_refcount)); \
686 atomic_inc(&(conn)->ibc_refcount); \
687} while (0)
688
689#define kiblnd_conn_decref(conn) \
690do { \
691 unsigned long flags; \
692 \
693 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
694 (conn), atomic_read(&(conn)->ibc_refcount)); \
695 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
696 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
697 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
698 list_add_tail(&(conn)->ibc_list, \
699 &kiblnd_data.kib_connd_zombies); \
700 wake_up(&kiblnd_data.kib_connd_waitq); \
701 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
702 } \
703} while (0)
704
705#define kiblnd_peer_addref(peer) \
706do { \
707 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
708 (peer), libcfs_nid2str((peer)->ibp_nid), \
709 atomic_read (&(peer)->ibp_refcount)); \
710 atomic_inc(&(peer)->ibp_refcount); \
711} while (0)
712
713#define kiblnd_peer_decref(peer) \
714do { \
715 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
716 (peer), libcfs_nid2str((peer)->ibp_nid), \
717 atomic_read (&(peer)->ibp_refcount)); \
718 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
719 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
720 kiblnd_destroy_peer(peer); \
721} while (0)
722
723static inline struct list_head *
724kiblnd_nid2peerlist (lnet_nid_t nid)
725{
726 unsigned int hash =
727 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
728
729 return (&kiblnd_data.kib_peers [hash]);
730}
731
732static inline int
733kiblnd_peer_active (kib_peer_t *peer)
734{
735 /* Am I in the peer hash table? */
736 return (!list_empty(&peer->ibp_list));
737}
738
739static inline kib_conn_t *
740kiblnd_get_conn_locked (kib_peer_t *peer)
741{
742 LASSERT (!list_empty(&peer->ibp_conns));
743
744 /* just return the first connection */
745 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
746}
747
748static inline int
749kiblnd_send_keepalive(kib_conn_t *conn)
750{
751 return (*kiblnd_tunables.kib_keepalive > 0) &&
752 cfs_time_after(jiffies, conn->ibc_last_send +
753 *kiblnd_tunables.kib_keepalive*HZ);
754}
755
756static inline int
757kiblnd_need_noop(kib_conn_t *conn)
758{
759 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
760
761 if (conn->ibc_outstanding_credits <
762 IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
763 !kiblnd_send_keepalive(conn))
764 return 0; /* No need to send NOOP */
765
766 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
767 if (!list_empty(&conn->ibc_tx_queue_nocred))
768 return 0; /* NOOP can be piggybacked */
769
770 /* No tx to piggyback NOOP onto or no credit to send a tx */
771 return (list_empty(&conn->ibc_tx_queue) ||
772 conn->ibc_credits == 0);
773 }
774
775 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
776 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
777 conn->ibc_credits == 0) /* no credit */
778 return 0;
779
780 if (conn->ibc_credits == 1 && /* last credit reserved for */
781 conn->ibc_outstanding_credits == 0) /* giving back credits */
782 return 0;
783
784 /* No tx to piggyback NOOP onto or no credit to send a tx */
785 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
786}
787
788static inline void
789kiblnd_abort_receives(kib_conn_t *conn)
790{
791 ib_modify_qp(conn->ibc_cmid->qp,
792 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
793}
794
795static inline const char *
796kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
797{
798 if (q == &conn->ibc_tx_queue)
799 return "tx_queue";
800
801 if (q == &conn->ibc_tx_queue_rsrvd)
802 return "tx_queue_rsrvd";
803
804 if (q == &conn->ibc_tx_queue_nocred)
805 return "tx_queue_nocred";
806
807 if (q == &conn->ibc_active_txs)
808 return "active_txs";
809
810 LBUG();
811 return NULL;
812}
813
814/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
815 * lowest bits of the work request id to stash the work item type. */
816
817#define IBLND_WID_TX 0
818#define IBLND_WID_RDMA 1
819#define IBLND_WID_RX 2
820#define IBLND_WID_MASK 3UL
821
822static inline __u64
823kiblnd_ptr2wreqid (void *ptr, int type)
824{
825 unsigned long lptr = (unsigned long)ptr;
826
827 LASSERT ((lptr & IBLND_WID_MASK) == 0);
828 LASSERT ((type & ~IBLND_WID_MASK) == 0);
829 return (__u64)(lptr | type);
830}
831
832static inline void *
833kiblnd_wreqid2ptr (__u64 wreqid)
834{
835 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
836}
837
838static inline int
839kiblnd_wreqid2type (__u64 wreqid)
840{
841 return (wreqid & IBLND_WID_MASK);
842}
843
844static inline void
845kiblnd_set_conn_state (kib_conn_t *conn, int state)
846{
847 conn->ibc_state = state;
848 mb();
849}
850
851static inline void
852kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
853{
854 msg->ibm_type = type;
855 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
856}
857
858static inline int
859kiblnd_rd_size (kib_rdma_desc_t *rd)
860{
861 int i;
862 int size;
863
864 for (i = size = 0; i < rd->rd_nfrags; i++)
865 size += rd->rd_frags[i].rf_nob;
866
867 return size;
868}
869
870static inline __u64
871kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
872{
873 return rd->rd_frags[index].rf_addr;
874}
875
876static inline __u32
877kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
878{
879 return rd->rd_frags[index].rf_nob;
880}
881
882static inline __u32
883kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
884{
885 return rd->rd_key;
886}
887
888static inline int
889kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
890{
891 if (nob < rd->rd_frags[index].rf_nob) {
892 rd->rd_frags[index].rf_addr += nob;
893 rd->rd_frags[index].rf_nob -= nob;
894 } else {
895 index ++;
896 }
897
898 return index;
899}
900
901static inline int
902kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
903{
904 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
905 msgtype == IBLND_MSG_PUT_ACK);
906
907 return msgtype == IBLND_MSG_GET_REQ ?
908 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
909 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
910}
911
912
913static inline __u64
914kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
915{
916 return ib_dma_mapping_error(dev, dma_addr);
917}
918
919static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
920 void *msg, size_t size,
921 enum dma_data_direction direction)
922{
923 return ib_dma_map_single(dev, msg, size, direction);
924}
925
926static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
927 __u64 addr, size_t size,
928 enum dma_data_direction direction)
929{
930 ib_dma_unmap_single(dev, addr, size, direction);
931}
932
933#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
934#define KIBLND_UNMAP_ADDR(p, m, a) (a)
935
936static inline int kiblnd_dma_map_sg(struct ib_device *dev,
937 struct scatterlist *sg, int nents,
938 enum dma_data_direction direction)
939{
940 return ib_dma_map_sg(dev, sg, nents, direction);
941}
942
943static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
944 struct scatterlist *sg, int nents,
945 enum dma_data_direction direction)
946{
947 ib_dma_unmap_sg(dev, sg, nents, direction);
948}
949
950static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
951 struct scatterlist *sg)
952{
953 return ib_sg_dma_address(dev, sg);
954}
955
956static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
957 struct scatterlist *sg)
958{
959 return ib_sg_dma_len(dev, sg);
960}
961
962/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
963 * right because OFED1.2 defines it as const, to use it we have to add
964 * (void *) cast to overcome "const" */
965
966#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
967#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
968
969
970struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
971 kib_rdma_desc_t *rd);
972struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
973 __u64 addr, __u64 size);
974void kiblnd_map_rx_descs(kib_conn_t *conn);
975void kiblnd_unmap_rx_descs(kib_conn_t *conn);
976int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
977 kib_rdma_desc_t *rd, int nfrags);
978void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
979void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
980struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
981
982int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
983 int npages, __u64 iov, kib_fmr_t *fmr);
984void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
985
986int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
987 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
988void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
989
990int kiblnd_startup (lnet_ni_t *ni);
991void kiblnd_shutdown (lnet_ni_t *ni);
992int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
993void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
994
995int kiblnd_tunables_init(void);
996void kiblnd_tunables_fini(void);
997
998int kiblnd_connd (void *arg);
999int kiblnd_scheduler(void *arg);
1000int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
1001int kiblnd_failover_thread (void *arg);
1002
1003int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
1004void kiblnd_free_pages (kib_pages_t *p);
1005
1006int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1007 struct rdma_cm_event *event);
1008int kiblnd_translate_mtu(int value);
1009
1010int kiblnd_dev_failover(kib_dev_t *dev);
1011int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
1012void kiblnd_destroy_peer (kib_peer_t *peer);
1013void kiblnd_destroy_dev (kib_dev_t *dev);
1014void kiblnd_unlink_peer_locked (kib_peer_t *peer);
1015void kiblnd_peer_alive (kib_peer_t *peer);
1016kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
1017void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
1018int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
1019 int version, __u64 incarnation);
1020int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
1021
1022void kiblnd_connreq_done(kib_conn_t *conn, int status);
1023kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
1024 int state, int version);
1025void kiblnd_destroy_conn (kib_conn_t *conn);
1026void kiblnd_close_conn (kib_conn_t *conn, int error);
1027void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
1028
1029int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1030 int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
1031
1032void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
1033void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
1034void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
1035void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
1036void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
1037 int status);
1038void kiblnd_check_sends (kib_conn_t *conn);
1039
1040void kiblnd_qp_event(struct ib_event *event, void *arg);
1041void kiblnd_cq_event(struct ib_event *event, void *arg);
1042void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1043
1044void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
1045 int credits, lnet_nid_t dstnid, __u64 dststamp);
1046int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1047int kiblnd_post_rx (kib_rx_t *rx, int credit);
1048
1049int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1050int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1051 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1052 unsigned int offset, unsigned int mlen, unsigned int rlen);
This page took 0.167613 seconds and 5 git commands to generate.