staging: lustre: lnet: lnet: Module is LNet, not Portals
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd.h
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/string.h>
45#include <linux/stat.h>
46#include <linux/errno.h>
47#include <linux/unistd.h>
48#include <linux/uio.h>
e8fd99fd 49#include <linux/uaccess.h>
d7e09d03 50
d7e09d03
PT
51#include <asm/io.h>
52
d7e09d03
PT
53#include <linux/fs.h>
54#include <linux/file.h>
d7e09d03
PT
55#include <linux/list.h>
56#include <linux/kmod.h>
57#include <linux/sysctl.h>
58#include <linux/pci.h>
59
60#include <net/sock.h>
61#include <linux/in.h>
62
63#define DEBUG_SUBSYSTEM S_LND
64
490e7dd4
GKH
65#include "../../../include/linux/libcfs/libcfs.h"
66#include "../../../include/linux/lnet/lnet.h"
67#include "../../../include/linux/lnet/lib-lnet.h"
68#include "../../../include/linux/lnet/lnet-sysctl.h"
d7e09d03
PT
69
70#include <rdma/rdma_cm.h>
71#include <rdma/ib_cm.h>
72#include <rdma/ib_verbs.h>
73#include <rdma/ib_fmr_pool.h>
74
75#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
76/* # scheduler loops before reschedule */
77#define IBLND_RESCHED 100
78
79#define IBLND_N_SCHED 2
80#define IBLND_N_SCHED_HIGH 4
81
75c49d40 82typedef struct {
d7e09d03
PT
83 int *kib_dev_failover; /* HCA failover */
84 unsigned int *kib_service; /* IB service number */
85 int *kib_min_reconnect_interval; /* first failed connection retry... */
86 int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
87 int *kib_cksum; /* checksum kib_msg_t? */
88 int *kib_timeout; /* comms timeout (seconds) */
89 int *kib_keepalive; /* keepalive timeout (seconds) */
90 int *kib_ntx; /* # tx descs */
91 int *kib_credits; /* # concurrent sends */
92 int *kib_peertxcredits; /* # concurrent sends to 1 peer */
93 int *kib_peerrtrcredits; /* # per-peer router buffer credits */
94 int *kib_peercredits_hiw; /* # when eagerly to return credits */
95 int *kib_peertimeout; /* seconds to consider peer dead */
96 char **kib_default_ipif; /* default IPoIB interface */
97 int *kib_retry_count;
98 int *kib_rnr_retry_count;
99 int *kib_concurrent_sends; /* send work queue sizing */
100 int *kib_ib_mtu; /* IB MTU */
101 int *kib_map_on_demand; /* map-on-demand if RD has more fragments
102 * than this value, 0 disable map-on-demand */
103 int *kib_pmr_pool_size; /* # physical MR in pool */
104 int *kib_fmr_pool_size; /* # FMRs in pool */
105 int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
106 int *kib_fmr_cache; /* enable FMR pool cache? */
d7e09d03
PT
107 int *kib_require_priv_port;/* accept only privileged ports */
108 int *kib_use_priv_port; /* use privileged port for active connect */
109 /* # threads on each CPT */
110 int *kib_nscheds;
111} kib_tunables_t;
112
113extern kib_tunables_t kiblnd_tunables;
114
115#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
116#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
117
118#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
119#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
120
121#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
122 IBLND_MSG_QUEUE_SIZE_V1 : \
123 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
124#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
125 IBLND_CREDIT_HIGHWATER_V1 : \
126 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
127
128#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
129
130static inline int
131kiblnd_concurrent_sends_v1(void)
132{
133 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
134 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
135
136 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
137 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
138
139 return *kiblnd_tunables.kib_concurrent_sends;
140}
141
142#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
143 kiblnd_concurrent_sends_v1() : \
144 *kiblnd_tunables.kib_concurrent_sends)
145/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
146#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
147#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
148
149#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
150#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
151#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
152 *kiblnd_tunables.kib_map_on_demand : \
153 IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
154#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
155 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
156
157/************************/
158/* derived constants... */
159/* Pools (shared by connections on each CPT) */
160/* These pools can grow at runtime, so don't need give a very large value */
161#define IBLND_TX_POOL 256
162#define IBLND_PMR_POOL 256
163#define IBLND_FMR_POOL 256
164#define IBLND_FMR_POOL_FLUSH 192
165
166/* TX messages (shared by all connections) */
167#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
168
169/* RX messages (per connection) */
170#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
171#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
172#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
173
174/* WRs and CQEs (per connection) */
175#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
176#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
177#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
178
179struct kib_hca_dev;
180
181/* o2iblnd can run over aliased interface */
182#ifdef IFALIASZ
183#define KIB_IFNAME_SIZE IFALIASZ
184#else
185#define KIB_IFNAME_SIZE 256
186#endif
187
75c49d40 188typedef struct {
d7e09d03
PT
189 struct list_head ibd_list; /* chain on kib_devs */
190 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
191 __u32 ibd_ifip; /* IPoIB interface IP */
192 /** IPoIB interface name */
193 char ibd_ifname[KIB_IFNAME_SIZE];
194 int ibd_nnets; /* # nets extant */
195
a649ad1d 196 unsigned long ibd_next_failover;
d7e09d03
PT
197 int ibd_failed_failover; /* # failover failures */
198 unsigned int ibd_failover; /* failover in progress */
199 unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
200 struct list_head ibd_nets;
201 struct kib_hca_dev *ibd_hdev;
202} kib_dev_t;
203
75c49d40 204typedef struct kib_hca_dev {
d7e09d03
PT
205 struct rdma_cm_id *ibh_cmid; /* listener cmid */
206 struct ib_device *ibh_ibdev; /* IB device */
207 int ibh_page_shift; /* page shift of current HCA */
208 int ibh_page_size; /* page size of current HCA */
209 __u64 ibh_page_mask; /* page mask of current HCA */
210 int ibh_mr_shift; /* bits shift of max MR size */
211 __u64 ibh_mr_size; /* size of MR */
212 int ibh_nmrs; /* # of global MRs */
213 struct ib_mr **ibh_mrs; /* global MR */
214 struct ib_pd *ibh_pd; /* PD */
215 kib_dev_t *ibh_dev; /* owner */
216 atomic_t ibh_ref; /* refcount */
217} kib_hca_dev_t;
218
219/** # of seconds to keep pool alive */
220#define IBLND_POOL_DEADLINE 300
221/** # of seconds to retry if allocation failed */
222#define IBLND_POOL_RETRY 1
223
75c49d40 224typedef struct {
d7e09d03
PT
225 int ibp_npages; /* # pages */
226 struct page *ibp_pages[0]; /* page array */
227} kib_pages_t;
228
229struct kib_pmr_pool;
230
231typedef struct {
232 struct list_head pmr_list; /* chain node */
233 struct ib_phys_buf *pmr_ipb; /* physical buffer */
234 struct ib_mr *pmr_mr; /* IB MR */
235 struct kib_pmr_pool *pmr_pool; /* owner of this MR */
236 __u64 pmr_iova; /* Virtual I/O address */
237 int pmr_refcount; /* reference count */
238} kib_phys_mr_t;
239
240struct kib_pool;
241struct kib_poolset;
242
243typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
244 int inc, struct kib_pool **pp_po);
245typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
246typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
247typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
248
249struct kib_net;
250
251#define IBLND_POOL_NAME_LEN 32
252
75c49d40 253typedef struct kib_poolset {
d7e09d03
PT
254 spinlock_t ps_lock; /* serialize */
255 struct kib_net *ps_net; /* network it belongs to */
256 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
257 struct list_head ps_pool_list; /* list of pools */
258 struct list_head ps_failed_pool_list; /* failed pool list */
a649ad1d 259 unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */
d7e09d03
PT
260 int ps_increasing; /* is allocating new pool */
261 int ps_pool_size; /* new pool size */
262 int ps_cpt; /* CPT id */
263
264 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
265 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
266 kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
267 kib_ps_node_fini_t ps_node_fini; /* finalize node */
268} kib_poolset_t;
269
75c49d40 270typedef struct kib_pool {
d7e09d03
PT
271 struct list_head po_list; /* chain on pool list */
272 struct list_head po_free_list; /* pre-allocated node */
273 kib_poolset_t *po_owner; /* pool_set of this pool */
a649ad1d 274 unsigned long po_deadline; /* deadline of this pool */
d7e09d03
PT
275 int po_allocated; /* # of elements in use */
276 int po_failed; /* pool is created on failed HCA */
277 int po_size; /* # of pre-allocated elements */
278} kib_pool_t;
279
280typedef struct {
281 kib_poolset_t tps_poolset; /* pool-set */
282 __u64 tps_next_tx_cookie; /* cookie of TX */
283} kib_tx_poolset_t;
284
285typedef struct {
286 kib_pool_t tpo_pool; /* pool */
287 struct kib_hca_dev *tpo_hdev; /* device for this pool */
288 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
289 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
290} kib_tx_pool_t;
291
292typedef struct {
293 kib_poolset_t pps_poolset; /* pool-set */
294} kib_pmr_poolset_t;
295
296typedef struct kib_pmr_pool {
297 struct kib_hca_dev *ppo_hdev; /* device for this pool */
298 kib_pool_t ppo_pool; /* pool */
299} kib_pmr_pool_t;
300
75c49d40 301typedef struct {
d7e09d03
PT
302 spinlock_t fps_lock; /* serialize */
303 struct kib_net *fps_net; /* IB network */
304 struct list_head fps_pool_list; /* FMR pool list */
305 struct list_head fps_failed_pool_list; /* FMR pool list */
306 __u64 fps_version; /* validity stamp */
307 int fps_cpt; /* CPT id */
308 int fps_pool_size;
309 int fps_flush_trigger;
310 /* is allocating new pool */
311 int fps_increasing;
312 /* time stamp for retry if failed to allocate */
a649ad1d 313 unsigned long fps_next_retry;
d7e09d03
PT
314} kib_fmr_poolset_t;
315
75c49d40 316typedef struct {
d7e09d03
PT
317 struct list_head fpo_list; /* chain on pool list */
318 struct kib_hca_dev *fpo_hdev; /* device for this pool */
319 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
320 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
a649ad1d 321 unsigned long fpo_deadline; /* deadline of this pool */
d7e09d03
PT
322 int fpo_failed; /* fmr pool is failed */
323 int fpo_map_count; /* # of mapped FMR */
324} kib_fmr_pool_t;
325
326typedef struct {
327 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
328 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
329} kib_fmr_t;
330
75c49d40 331typedef struct kib_net {
d7e09d03
PT
332 struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
333 __u64 ibn_incarnation; /* my epoch */
334 int ibn_init; /* initialisation state */
335 int ibn_shutdown; /* shutting down? */
336
337 atomic_t ibn_npeers; /* # peers extant */
338 atomic_t ibn_nconns; /* # connections extant */
339
340 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
341 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
342 kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
343
344 kib_dev_t *ibn_dev; /* underlying IB device */
345} kib_net_t;
346
347#define KIB_THREAD_SHIFT 16
348#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
349#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
350#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
351
352struct kib_sched_info {
353 /* serialise */
354 spinlock_t ibs_lock;
355 /* schedulers sleep here */
356 wait_queue_head_t ibs_waitq;
357 /* conns to check for rx completions */
358 struct list_head ibs_conns;
359 /* number of scheduler threads */
360 int ibs_nthreads;
361 /* max allowed scheduler threads */
362 int ibs_nthreads_max;
363 int ibs_cpt; /* CPT id */
364};
365
75c49d40 366typedef struct {
d7e09d03
PT
367 int kib_init; /* initialisation state */
368 int kib_shutdown; /* shut down? */
369 struct list_head kib_devs; /* IB devices extant */
370 /* list head of failed devices */
371 struct list_head kib_failed_devs;
372 /* schedulers sleep here */
373 wait_queue_head_t kib_failover_waitq;
374 atomic_t kib_nthreads; /* # live threads */
375 /* stabilize net/dev/peer/conn ops */
376 rwlock_t kib_global_lock;
377 /* hash table of all my known peers */
378 struct list_head *kib_peers;
379 /* size of kib_peers */
380 int kib_peer_hash_size;
381 /* the connd task (serialisation assertions) */
382 void *kib_connd;
383 /* connections to setup/teardown */
384 struct list_head kib_connd_conns;
385 /* connections with zero refcount */
386 struct list_head kib_connd_zombies;
387 /* connection daemon sleeps here */
388 wait_queue_head_t kib_connd_waitq;
389 spinlock_t kib_connd_lock; /* serialise */
390 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
391 /* percpt data for schedulers */
392 struct kib_sched_info **kib_scheds;
393} kib_data_t;
394
395#define IBLND_INIT_NOTHING 0
396#define IBLND_INIT_DATA 1
397#define IBLND_INIT_ALL 2
398
399/************************************************************************
400 * IB Wire message format.
401 * These are sent in sender's byte order (i.e. receiver flips).
402 */
403
75c49d40 404typedef struct kib_connparams {
d7e09d03
PT
405 __u16 ibcp_queue_depth;
406 __u16 ibcp_max_frags;
407 __u32 ibcp_max_msg_size;
408} WIRE_ATTR kib_connparams_t;
409
75c49d40 410typedef struct {
d7e09d03
PT
411 lnet_hdr_t ibim_hdr; /* portals header */
412 char ibim_payload[0]; /* piggy-backed payload */
413} WIRE_ATTR kib_immediate_msg_t;
414
75c49d40 415typedef struct {
d7e09d03
PT
416 __u32 rf_nob; /* # bytes this frag */
417 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
418} WIRE_ATTR kib_rdma_frag_t;
419
75c49d40 420typedef struct {
d7e09d03
PT
421 __u32 rd_key; /* local/remote key */
422 __u32 rd_nfrags; /* # fragments */
423 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
424} WIRE_ATTR kib_rdma_desc_t;
425
75c49d40 426typedef struct {
d7e09d03
PT
427 lnet_hdr_t ibprm_hdr; /* portals header */
428 __u64 ibprm_cookie; /* opaque completion cookie */
429} WIRE_ATTR kib_putreq_msg_t;
430
75c49d40 431typedef struct {
d7e09d03
PT
432 __u64 ibpam_src_cookie; /* reflected completion cookie */
433 __u64 ibpam_dst_cookie; /* opaque completion cookie */
434 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
435} WIRE_ATTR kib_putack_msg_t;
436
75c49d40 437typedef struct {
d7e09d03
PT
438 lnet_hdr_t ibgm_hdr; /* portals header */
439 __u64 ibgm_cookie; /* opaque completion cookie */
440 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
441} WIRE_ATTR kib_get_msg_t;
442
75c49d40 443typedef struct {
d7e09d03
PT
444 __u64 ibcm_cookie; /* opaque completion cookie */
445 __s32 ibcm_status; /* < 0 failure: >= 0 length */
446} WIRE_ATTR kib_completion_msg_t;
447
75c49d40 448typedef struct {
d7e09d03
PT
449 /* First 2 fields fixed FOR ALL TIME */
450 __u32 ibm_magic; /* I'm an ibnal message */
451 __u16 ibm_version; /* this is my version number */
452
453 __u8 ibm_type; /* msg type */
454 __u8 ibm_credits; /* returned credits */
455 __u32 ibm_nob; /* # bytes in whole message */
456 __u32 ibm_cksum; /* checksum (0 == no checksum) */
457 __u64 ibm_srcnid; /* sender's NID */
458 __u64 ibm_srcstamp; /* sender's incarnation */
459 __u64 ibm_dstnid; /* destination's NID */
460 __u64 ibm_dststamp; /* destination's incarnation */
461
462 union {
463 kib_connparams_t connparams;
464 kib_immediate_msg_t immediate;
465 kib_putreq_msg_t putreq;
466 kib_putack_msg_t putack;
467 kib_get_msg_t get;
468 kib_completion_msg_t completion;
469 } WIRE_ATTR ibm_u;
470} WIRE_ATTR kib_msg_t;
471
472#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
473
474#define IBLND_MSG_VERSION_1 0x11
475#define IBLND_MSG_VERSION_2 0x12
476#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
477
478#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
479#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
480#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
481#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
482#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
483#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
484#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
485#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
486#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
487#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
488
489typedef struct {
490 __u32 ibr_magic; /* sender's magic */
491 __u16 ibr_version; /* sender's version */
492 __u8 ibr_why; /* reject reason */
493 __u8 ibr_padding; /* padding */
494 __u64 ibr_incarnation; /* incarnation of peer */
495 kib_connparams_t ibr_cp; /* connection parameters */
496} WIRE_ATTR kib_rej_t;
497
498/* connection rejection reasons */
499#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
500#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
501#define IBLND_REJECT_FATAL 3 /* Anything else */
502
503#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
504#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
505
506#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
507#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
508
509/***********************************************************************/
510
511typedef struct kib_rx /* receive message */
512{
513 struct list_head rx_list; /* queue for attention */
514 struct kib_conn *rx_conn; /* owning conn */
515 int rx_nob; /* # bytes received (-1 while posted) */
516 enum ib_wc_status rx_status; /* completion status */
517 kib_msg_t *rx_msg; /* message buffer (host vaddr) */
518 __u64 rx_msgaddr; /* message buffer (I/O addr) */
519 DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
520 struct ib_recv_wr rx_wrq; /* receive work item... */
521 struct ib_sge rx_sge; /* ...and its memory */
522} kib_rx_t;
523
524#define IBLND_POSTRX_DONT_POST 0 /* don't post */
525#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
526#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
527#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
528
529typedef struct kib_tx /* transmit message */
530{
531 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
532 kib_tx_pool_t *tx_pool; /* pool I'm from */
533 struct kib_conn *tx_conn; /* owning conn */
534 short tx_sending; /* # tx callbacks outstanding */
535 short tx_queued; /* queued for sending */
536 short tx_waiting; /* waiting for peer */
537 int tx_status; /* LNET completion status */
538 unsigned long tx_deadline; /* completion deadline */
539 __u64 tx_cookie; /* completion cookie */
540 lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
541 kib_msg_t *tx_msg; /* message buffer (host vaddr) */
542 __u64 tx_msgaddr; /* message buffer (I/O addr) */
543 DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
544 int tx_nwrq; /* # send work items */
545 struct ib_send_wr *tx_wrq; /* send work items... */
546 struct ib_sge *tx_sge; /* ...and their memory */
547 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
548 int tx_nfrags; /* # entries in... */
549 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
550 __u64 *tx_pages; /* rdma phys page addrs */
551 union {
552 kib_phys_mr_t *pmr; /* MR for physical buffer */
553 kib_fmr_t fmr; /* FMR */
554 } tx_u;
555 int tx_dmadir; /* dma direction */
556} kib_tx_t;
557
75c49d40 558typedef struct kib_connvars {
d7e09d03
PT
559 /* connection-in-progress variables */
560 kib_msg_t cv_msg;
561} kib_connvars_t;
562
75c49d40 563typedef struct kib_conn {
d7e09d03
PT
564 struct kib_sched_info *ibc_sched; /* scheduler information */
565 struct kib_peer *ibc_peer; /* owning peer */
566 kib_hca_dev_t *ibc_hdev; /* HCA bound on */
567 struct list_head ibc_list; /* stash on peer's conn list */
568 struct list_head ibc_sched_list; /* schedule for attention */
569 __u16 ibc_version; /* version of connection */
570 __u64 ibc_incarnation; /* which instance of the peer */
571 atomic_t ibc_refcount; /* # users */
572 int ibc_state; /* what's happening */
573 int ibc_nsends_posted; /* # uncompleted sends */
574 int ibc_noops_posted; /* # uncompleted NOOPs */
575 int ibc_credits; /* # credits I have */
576 int ibc_outstanding_credits; /* # credits to return */
577 int ibc_reserved_credits;/* # ACK/DONE msg credits */
578 int ibc_comms_error; /* set on comms error */
579 unsigned int ibc_nrx:16; /* receive buffers owned */
580 unsigned int ibc_scheduled:1; /* scheduled for attention */
581 unsigned int ibc_ready:1; /* CQ callback fired */
582 /* time of last send */
583 unsigned long ibc_last_send;
584 /** link chain for kiblnd_check_conns only */
585 struct list_head ibc_connd_list;
586 /** rxs completed before ESTABLISHED */
587 struct list_head ibc_early_rxs;
588 /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
589 struct list_head ibc_tx_noops;
590 struct list_head ibc_tx_queue; /* sends that need a credit */
591 struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
592 struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
593 struct list_head ibc_active_txs; /* active tx awaiting completion */
594 spinlock_t ibc_lock; /* serialise */
595 kib_rx_t *ibc_rxs; /* the rx descs */
596 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
597
598 struct rdma_cm_id *ibc_cmid; /* CM id */
599 struct ib_cq *ibc_cq; /* completion queue */
600
601 kib_connvars_t *ibc_connvars; /* in-progress connection state */
602} kib_conn_t;
603
604#define IBLND_CONN_INIT 0 /* being initialised */
605#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
606#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
607#define IBLND_CONN_ESTABLISHED 3 /* connection established */
608#define IBLND_CONN_CLOSING 4 /* being closed */
609#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
610
75c49d40 611typedef struct kib_peer {
d7e09d03
PT
612 struct list_head ibp_list; /* stash on global peer list */
613 lnet_nid_t ibp_nid; /* who's on the other end(s) */
614 lnet_ni_t *ibp_ni; /* LNet interface */
615 atomic_t ibp_refcount; /* # users */
616 struct list_head ibp_conns; /* all active connections */
617 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
618 __u16 ibp_version; /* version of peer */
619 __u64 ibp_incarnation; /* incarnation of peer */
620 int ibp_connecting; /* current active connection attempts */
621 int ibp_accepting; /* current passive connection attempts */
622 int ibp_error; /* errno on closing this peer */
a649ad1d 623 unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */
d7e09d03
PT
624} kib_peer_t;
625
626extern kib_data_t kiblnd_data;
627
628extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
629
630static inline void
631kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
632{
633 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
634 atomic_inc(&hdev->ibh_ref);
635}
636
637static inline void
638kiblnd_hdev_decref(kib_hca_dev_t *hdev)
639{
640 LASSERT (atomic_read(&hdev->ibh_ref) > 0);
641 if (atomic_dec_and_test(&hdev->ibh_ref))
642 kiblnd_hdev_destroy(hdev);
643}
644
645static inline int
646kiblnd_dev_can_failover(kib_dev_t *dev)
647{
648 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
649 return 0;
650
651 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
652 return 0;
653
654 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
655 return 1;
656
657 return dev->ibd_can_failover;
658}
659
660#define kiblnd_conn_addref(conn) \
661do { \
662 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
663 (conn), atomic_read(&(conn)->ibc_refcount)); \
664 atomic_inc(&(conn)->ibc_refcount); \
665} while (0)
666
667#define kiblnd_conn_decref(conn) \
668do { \
669 unsigned long flags; \
670 \
671 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
672 (conn), atomic_read(&(conn)->ibc_refcount)); \
673 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
674 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
675 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
676 list_add_tail(&(conn)->ibc_list, \
677 &kiblnd_data.kib_connd_zombies); \
678 wake_up(&kiblnd_data.kib_connd_waitq); \
679 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
680 } \
681} while (0)
682
683#define kiblnd_peer_addref(peer) \
684do { \
685 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
686 (peer), libcfs_nid2str((peer)->ibp_nid), \
687 atomic_read (&(peer)->ibp_refcount)); \
688 atomic_inc(&(peer)->ibp_refcount); \
689} while (0)
690
691#define kiblnd_peer_decref(peer) \
692do { \
693 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
694 (peer), libcfs_nid2str((peer)->ibp_nid), \
695 atomic_read (&(peer)->ibp_refcount)); \
696 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
697 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
698 kiblnd_destroy_peer(peer); \
699} while (0)
700
701static inline struct list_head *
702kiblnd_nid2peerlist (lnet_nid_t nid)
703{
704 unsigned int hash =
705 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
706
707 return (&kiblnd_data.kib_peers [hash]);
708}
709
710static inline int
711kiblnd_peer_active (kib_peer_t *peer)
712{
713 /* Am I in the peer hash table? */
714 return (!list_empty(&peer->ibp_list));
715}
716
717static inline kib_conn_t *
718kiblnd_get_conn_locked (kib_peer_t *peer)
719{
720 LASSERT (!list_empty(&peer->ibp_conns));
721
722 /* just return the first connection */
723 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
724}
725
726static inline int
727kiblnd_send_keepalive(kib_conn_t *conn)
728{
729 return (*kiblnd_tunables.kib_keepalive > 0) &&
730 cfs_time_after(jiffies, conn->ibc_last_send +
731 *kiblnd_tunables.kib_keepalive*HZ);
732}
733
734static inline int
735kiblnd_need_noop(kib_conn_t *conn)
736{
737 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
738
739 if (conn->ibc_outstanding_credits <
740 IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
741 !kiblnd_send_keepalive(conn))
742 return 0; /* No need to send NOOP */
743
744 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
745 if (!list_empty(&conn->ibc_tx_queue_nocred))
746 return 0; /* NOOP can be piggybacked */
747
748 /* No tx to piggyback NOOP onto or no credit to send a tx */
749 return (list_empty(&conn->ibc_tx_queue) ||
750 conn->ibc_credits == 0);
751 }
752
753 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
754 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
755 conn->ibc_credits == 0) /* no credit */
756 return 0;
757
758 if (conn->ibc_credits == 1 && /* last credit reserved for */
759 conn->ibc_outstanding_credits == 0) /* giving back credits */
760 return 0;
761
762 /* No tx to piggyback NOOP onto or no credit to send a tx */
763 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
764}
765
766static inline void
767kiblnd_abort_receives(kib_conn_t *conn)
768{
769 ib_modify_qp(conn->ibc_cmid->qp,
770 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
771}
772
773static inline const char *
774kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
775{
776 if (q == &conn->ibc_tx_queue)
777 return "tx_queue";
778
779 if (q == &conn->ibc_tx_queue_rsrvd)
780 return "tx_queue_rsrvd";
781
782 if (q == &conn->ibc_tx_queue_nocred)
783 return "tx_queue_nocred";
784
785 if (q == &conn->ibc_active_txs)
786 return "active_txs";
787
788 LBUG();
789 return NULL;
790}
791
792/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
793 * lowest bits of the work request id to stash the work item type. */
794
795#define IBLND_WID_TX 0
796#define IBLND_WID_RDMA 1
797#define IBLND_WID_RX 2
798#define IBLND_WID_MASK 3UL
799
800static inline __u64
801kiblnd_ptr2wreqid (void *ptr, int type)
802{
803 unsigned long lptr = (unsigned long)ptr;
804
805 LASSERT ((lptr & IBLND_WID_MASK) == 0);
806 LASSERT ((type & ~IBLND_WID_MASK) == 0);
807 return (__u64)(lptr | type);
808}
809
810static inline void *
811kiblnd_wreqid2ptr (__u64 wreqid)
812{
813 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
814}
815
816static inline int
817kiblnd_wreqid2type (__u64 wreqid)
818{
819 return (wreqid & IBLND_WID_MASK);
820}
821
822static inline void
823kiblnd_set_conn_state (kib_conn_t *conn, int state)
824{
825 conn->ibc_state = state;
826 mb();
827}
828
829static inline void
830kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
831{
832 msg->ibm_type = type;
833 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
834}
835
836static inline int
837kiblnd_rd_size (kib_rdma_desc_t *rd)
838{
839 int i;
840 int size;
841
842 for (i = size = 0; i < rd->rd_nfrags; i++)
843 size += rd->rd_frags[i].rf_nob;
844
845 return size;
846}
847
848static inline __u64
849kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
850{
851 return rd->rd_frags[index].rf_addr;
852}
853
854static inline __u32
855kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
856{
857 return rd->rd_frags[index].rf_nob;
858}
859
860static inline __u32
861kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
862{
863 return rd->rd_key;
864}
865
866static inline int
867kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
868{
869 if (nob < rd->rd_frags[index].rf_nob) {
870 rd->rd_frags[index].rf_addr += nob;
871 rd->rd_frags[index].rf_nob -= nob;
872 } else {
873 index ++;
874 }
875
876 return index;
877}
878
879static inline int
880kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
881{
882 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
883 msgtype == IBLND_MSG_PUT_ACK);
884
885 return msgtype == IBLND_MSG_GET_REQ ?
886 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
887 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
888}
889
890
891static inline __u64
892kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
893{
894 return ib_dma_mapping_error(dev, dma_addr);
895}
896
897static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
898 void *msg, size_t size,
899 enum dma_data_direction direction)
900{
901 return ib_dma_map_single(dev, msg, size, direction);
902}
903
904static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
905 __u64 addr, size_t size,
906 enum dma_data_direction direction)
907{
908 ib_dma_unmap_single(dev, addr, size, direction);
909}
910
911#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
912#define KIBLND_UNMAP_ADDR(p, m, a) (a)
913
914static inline int kiblnd_dma_map_sg(struct ib_device *dev,
915 struct scatterlist *sg, int nents,
916 enum dma_data_direction direction)
917{
918 return ib_dma_map_sg(dev, sg, nents, direction);
919}
920
921static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
922 struct scatterlist *sg, int nents,
923 enum dma_data_direction direction)
924{
925 ib_dma_unmap_sg(dev, sg, nents, direction);
926}
927
928static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
929 struct scatterlist *sg)
930{
931 return ib_sg_dma_address(dev, sg);
932}
933
934static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
935 struct scatterlist *sg)
936{
937 return ib_sg_dma_len(dev, sg);
938}
939
940/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
941 * right because OFED1.2 defines it as const, to use it we have to add
942 * (void *) cast to overcome "const" */
943
944#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
945#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
946
947
948struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
949 kib_rdma_desc_t *rd);
950struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
951 __u64 addr, __u64 size);
952void kiblnd_map_rx_descs(kib_conn_t *conn);
953void kiblnd_unmap_rx_descs(kib_conn_t *conn);
954int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
955 kib_rdma_desc_t *rd, int nfrags);
956void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
957void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
958struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
959
960int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
961 int npages, __u64 iov, kib_fmr_t *fmr);
962void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
963
964int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
965 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
966void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
967
968int kiblnd_startup (lnet_ni_t *ni);
969void kiblnd_shutdown (lnet_ni_t *ni);
970int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
a649ad1d 971void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
d7e09d03
PT
972
973int kiblnd_tunables_init(void);
974void kiblnd_tunables_fini(void);
975
976int kiblnd_connd (void *arg);
977int kiblnd_scheduler(void *arg);
978int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
979int kiblnd_failover_thread (void *arg);
980
981int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
982void kiblnd_free_pages (kib_pages_t *p);
983
984int kiblnd_cm_callback(struct rdma_cm_id *cmid,
985 struct rdma_cm_event *event);
986int kiblnd_translate_mtu(int value);
987
988int kiblnd_dev_failover(kib_dev_t *dev);
989int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
990void kiblnd_destroy_peer (kib_peer_t *peer);
991void kiblnd_destroy_dev (kib_dev_t *dev);
992void kiblnd_unlink_peer_locked (kib_peer_t *peer);
993void kiblnd_peer_alive (kib_peer_t *peer);
994kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
995void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
996int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
997 int version, __u64 incarnation);
998int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
999
1000void kiblnd_connreq_done(kib_conn_t *conn, int status);
1001kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
1002 int state, int version);
1003void kiblnd_destroy_conn (kib_conn_t *conn);
1004void kiblnd_close_conn (kib_conn_t *conn, int error);
1005void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
1006
1007int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1008 int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
1009
1010void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
1011void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
1012void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
1013void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
1014void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
1015 int status);
1016void kiblnd_check_sends (kib_conn_t *conn);
1017
1018void kiblnd_qp_event(struct ib_event *event, void *arg);
1019void kiblnd_cq_event(struct ib_event *event, void *arg);
1020void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1021
1022void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
1023 int credits, lnet_nid_t dstnid, __u64 dststamp);
1024int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1025int kiblnd_post_rx (kib_rx_t *rx, int credit);
1026
1027int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1028int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
f351bad2 1029 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
d7e09d03 1030 unsigned int offset, unsigned int mlen, unsigned int rlen);
This page took 0.455713 seconds and 5 git commands to generate.