staging: lustre: align all code properly for LNet core
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd.c
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41 #include <asm/div64.h>
42 #include <asm/page.h>
43 #include "o2iblnd.h"
44
45 static lnd_t the_o2iblnd = {
46 .lnd_type = O2IBLND,
47 .lnd_startup = kiblnd_startup,
48 .lnd_shutdown = kiblnd_shutdown,
49 .lnd_ctl = kiblnd_ctl,
50 .lnd_query = kiblnd_query,
51 .lnd_send = kiblnd_send,
52 .lnd_recv = kiblnd_recv,
53 };
54
55 kib_data_t kiblnd_data;
56
57 static __u32 kiblnd_cksum(void *ptr, int nob)
58 {
59 char *c = ptr;
60 __u32 sum = 0;
61
62 while (nob-- > 0)
63 sum = ((sum << 1) | (sum >> 31)) + *c++;
64
65 /* ensure I don't return 0 (== no checksum) */
66 return (sum == 0) ? 1 : sum;
67 }
68
69 static char *kiblnd_msgtype2str(int type)
70 {
71 switch (type) {
72 case IBLND_MSG_CONNREQ:
73 return "CONNREQ";
74
75 case IBLND_MSG_CONNACK:
76 return "CONNACK";
77
78 case IBLND_MSG_NOOP:
79 return "NOOP";
80
81 case IBLND_MSG_IMMEDIATE:
82 return "IMMEDIATE";
83
84 case IBLND_MSG_PUT_REQ:
85 return "PUT_REQ";
86
87 case IBLND_MSG_PUT_NAK:
88 return "PUT_NAK";
89
90 case IBLND_MSG_PUT_ACK:
91 return "PUT_ACK";
92
93 case IBLND_MSG_PUT_DONE:
94 return "PUT_DONE";
95
96 case IBLND_MSG_GET_REQ:
97 return "GET_REQ";
98
99 case IBLND_MSG_GET_DONE:
100 return "GET_DONE";
101
102 default:
103 return "???";
104 }
105 }
106
107 static int kiblnd_msgtype2size(int type)
108 {
109 const int hdr_size = offsetof(kib_msg_t, ibm_u);
110
111 switch (type) {
112 case IBLND_MSG_CONNREQ:
113 case IBLND_MSG_CONNACK:
114 return hdr_size + sizeof(kib_connparams_t);
115
116 case IBLND_MSG_NOOP:
117 return hdr_size;
118
119 case IBLND_MSG_IMMEDIATE:
120 return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
121
122 case IBLND_MSG_PUT_REQ:
123 return hdr_size + sizeof(kib_putreq_msg_t);
124
125 case IBLND_MSG_PUT_ACK:
126 return hdr_size + sizeof(kib_putack_msg_t);
127
128 case IBLND_MSG_GET_REQ:
129 return hdr_size + sizeof(kib_get_msg_t);
130
131 case IBLND_MSG_PUT_NAK:
132 case IBLND_MSG_PUT_DONE:
133 case IBLND_MSG_GET_DONE:
134 return hdr_size + sizeof(kib_completion_msg_t);
135 default:
136 return -1;
137 }
138 }
139
140 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
141 {
142 kib_rdma_desc_t *rd;
143 int nob;
144 int n;
145 int i;
146
147 LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
148 msg->ibm_type == IBLND_MSG_PUT_ACK);
149
150 rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
151 &msg->ibm_u.get.ibgm_rd :
152 &msg->ibm_u.putack.ibpam_rd;
153
154 if (flip) {
155 __swab32s(&rd->rd_key);
156 __swab32s(&rd->rd_nfrags);
157 }
158
159 n = rd->rd_nfrags;
160
161 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
162 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
163 n, IBLND_MAX_RDMA_FRAGS);
164 return 1;
165 }
166
167 nob = offsetof(kib_msg_t, ibm_u) +
168 kiblnd_rd_msg_size(rd, msg->ibm_type, n);
169
170 if (msg->ibm_nob < nob) {
171 CERROR("Short %s: %d(%d)\n",
172 kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
173 return 1;
174 }
175
176 if (!flip)
177 return 0;
178
179 for (i = 0; i < n; i++) {
180 __swab32s(&rd->rd_frags[i].rf_nob);
181 __swab64s(&rd->rd_frags[i].rf_addr);
182 }
183
184 return 0;
185 }
186
187 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
188 int credits, lnet_nid_t dstnid, __u64 dststamp)
189 {
190 kib_net_t *net = ni->ni_data;
191
192 /*
193 * CAVEAT EMPTOR! all message fields not set here should have been
194 * initialised previously.
195 */
196 msg->ibm_magic = IBLND_MSG_MAGIC;
197 msg->ibm_version = version;
198 /* ibm_type */
199 msg->ibm_credits = credits;
200 /* ibm_nob */
201 msg->ibm_cksum = 0;
202 msg->ibm_srcnid = ni->ni_nid;
203 msg->ibm_srcstamp = net->ibn_incarnation;
204 msg->ibm_dstnid = dstnid;
205 msg->ibm_dststamp = dststamp;
206
207 if (*kiblnd_tunables.kib_cksum) {
208 /* NB ibm_cksum zero while computing cksum */
209 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
210 }
211 }
212
213 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
214 {
215 const int hdr_size = offsetof(kib_msg_t, ibm_u);
216 __u32 msg_cksum;
217 __u16 version;
218 int msg_nob;
219 int flip;
220
221 /* 6 bytes are enough to have received magic + version */
222 if (nob < 6) {
223 CERROR("Short message: %d\n", nob);
224 return -EPROTO;
225 }
226
227 if (msg->ibm_magic == IBLND_MSG_MAGIC) {
228 flip = 0;
229 } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
230 flip = 1;
231 } else {
232 CERROR("Bad magic: %08x\n", msg->ibm_magic);
233 return -EPROTO;
234 }
235
236 version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
237 if (version != IBLND_MSG_VERSION &&
238 version != IBLND_MSG_VERSION_1) {
239 CERROR("Bad version: %x\n", version);
240 return -EPROTO;
241 }
242
243 if (nob < hdr_size) {
244 CERROR("Short message: %d\n", nob);
245 return -EPROTO;
246 }
247
248 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
249 if (msg_nob > nob) {
250 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
251 return -EPROTO;
252 }
253
254 /*
255 * checksum must be computed with ibm_cksum zero and BEFORE anything
256 * gets flipped
257 */
258 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
259 msg->ibm_cksum = 0;
260 if (msg_cksum != 0 &&
261 msg_cksum != kiblnd_cksum(msg, msg_nob)) {
262 CERROR("Bad checksum\n");
263 return -EPROTO;
264 }
265
266 msg->ibm_cksum = msg_cksum;
267
268 if (flip) {
269 /* leave magic unflipped as a clue to peer endianness */
270 msg->ibm_version = version;
271 CLASSERT(sizeof(msg->ibm_type) == 1);
272 CLASSERT(sizeof(msg->ibm_credits) == 1);
273 msg->ibm_nob = msg_nob;
274 __swab64s(&msg->ibm_srcnid);
275 __swab64s(&msg->ibm_srcstamp);
276 __swab64s(&msg->ibm_dstnid);
277 __swab64s(&msg->ibm_dststamp);
278 }
279
280 if (msg->ibm_srcnid == LNET_NID_ANY) {
281 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
282 return -EPROTO;
283 }
284
285 if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
286 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
287 msg_nob, kiblnd_msgtype2size(msg->ibm_type));
288 return -EPROTO;
289 }
290
291 switch (msg->ibm_type) {
292 default:
293 CERROR("Unknown message type %x\n", msg->ibm_type);
294 return -EPROTO;
295
296 case IBLND_MSG_NOOP:
297 case IBLND_MSG_IMMEDIATE:
298 case IBLND_MSG_PUT_REQ:
299 break;
300
301 case IBLND_MSG_PUT_ACK:
302 case IBLND_MSG_GET_REQ:
303 if (kiblnd_unpack_rd(msg, flip))
304 return -EPROTO;
305 break;
306
307 case IBLND_MSG_PUT_NAK:
308 case IBLND_MSG_PUT_DONE:
309 case IBLND_MSG_GET_DONE:
310 if (flip)
311 __swab32s(&msg->ibm_u.completion.ibcm_status);
312 break;
313
314 case IBLND_MSG_CONNREQ:
315 case IBLND_MSG_CONNACK:
316 if (flip) {
317 __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
318 __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
319 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
320 }
321 break;
322 }
323 return 0;
324 }
325
326 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
327 {
328 kib_peer_t *peer;
329 kib_net_t *net = ni->ni_data;
330 int cpt = lnet_cpt_of_nid(nid);
331 unsigned long flags;
332
333 LASSERT(net != NULL);
334 LASSERT(nid != LNET_NID_ANY);
335
336 LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
337 if (peer == NULL) {
338 CERROR("Cannot allocate peer\n");
339 return -ENOMEM;
340 }
341
342 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
343
344 peer->ibp_ni = ni;
345 peer->ibp_nid = nid;
346 peer->ibp_error = 0;
347 peer->ibp_last_alive = 0;
348 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
349
350 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
351 INIT_LIST_HEAD(&peer->ibp_conns);
352 INIT_LIST_HEAD(&peer->ibp_tx_queue);
353
354 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
355
356 /* always called with a ref on ni, which prevents ni being shutdown */
357 LASSERT(net->ibn_shutdown == 0);
358
359 /* npeers only grows with the global lock held */
360 atomic_inc(&net->ibn_npeers);
361
362 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
363
364 *peerp = peer;
365 return 0;
366 }
367
368 void kiblnd_destroy_peer(kib_peer_t *peer)
369 {
370 kib_net_t *net = peer->ibp_ni->ni_data;
371
372 LASSERT(net != NULL);
373 LASSERT(atomic_read(&peer->ibp_refcount) == 0);
374 LASSERT(!kiblnd_peer_active(peer));
375 LASSERT(peer->ibp_connecting == 0);
376 LASSERT(peer->ibp_accepting == 0);
377 LASSERT(list_empty(&peer->ibp_conns));
378 LASSERT(list_empty(&peer->ibp_tx_queue));
379
380 LIBCFS_FREE(peer, sizeof(*peer));
381
382 /*
383 * NB a peer's connections keep a reference on their peer until
384 * they are destroyed, so we can be assured that _all_ state to do
385 * with this peer has been cleaned up when its refcount drops to
386 * zero.
387 */
388 atomic_dec(&net->ibn_npeers);
389 }
390
391 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
392 {
393 /*
394 * the caller is responsible for accounting the additional reference
395 * that this creates
396 */
397 struct list_head *peer_list = kiblnd_nid2peerlist(nid);
398 struct list_head *tmp;
399 kib_peer_t *peer;
400
401 list_for_each(tmp, peer_list) {
402
403 peer = list_entry(tmp, kib_peer_t, ibp_list);
404
405 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
406 peer->ibp_accepting > 0 ||
407 !list_empty(&peer->ibp_conns)); /* active conn */
408
409 if (peer->ibp_nid != nid)
410 continue;
411
412 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
413 peer, libcfs_nid2str(nid),
414 atomic_read(&peer->ibp_refcount),
415 peer->ibp_version);
416 return peer;
417 }
418 return NULL;
419 }
420
421 void kiblnd_unlink_peer_locked(kib_peer_t *peer)
422 {
423 LASSERT(list_empty(&peer->ibp_conns));
424
425 LASSERT(kiblnd_peer_active(peer));
426 list_del_init(&peer->ibp_list);
427 /* lose peerlist's ref */
428 kiblnd_peer_decref(peer);
429 }
430
431 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
432 lnet_nid_t *nidp, int *count)
433 {
434 kib_peer_t *peer;
435 struct list_head *ptmp;
436 int i;
437 unsigned long flags;
438
439 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
440
441 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
442
443 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
444
445 peer = list_entry(ptmp, kib_peer_t, ibp_list);
446 LASSERT(peer->ibp_connecting > 0 ||
447 peer->ibp_accepting > 0 ||
448 !list_empty(&peer->ibp_conns));
449
450 if (peer->ibp_ni != ni)
451 continue;
452
453 if (index-- > 0)
454 continue;
455
456 *nidp = peer->ibp_nid;
457 *count = atomic_read(&peer->ibp_refcount);
458
459 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
460 flags);
461 return 0;
462 }
463 }
464
465 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
466 return -ENOENT;
467 }
468
469 static void kiblnd_del_peer_locked(kib_peer_t *peer)
470 {
471 struct list_head *ctmp;
472 struct list_head *cnxt;
473 kib_conn_t *conn;
474
475 if (list_empty(&peer->ibp_conns)) {
476 kiblnd_unlink_peer_locked(peer);
477 } else {
478 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
479 conn = list_entry(ctmp, kib_conn_t, ibc_list);
480
481 kiblnd_close_conn_locked(conn, 0);
482 }
483 /* NB closing peer's last conn unlinked it. */
484 }
485 /*
486 * NB peer now unlinked; might even be freed if the peer table had the
487 * last ref on it.
488 */
489 }
490
491 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
492 {
493 LIST_HEAD(zombies);
494 struct list_head *ptmp;
495 struct list_head *pnxt;
496 kib_peer_t *peer;
497 int lo;
498 int hi;
499 int i;
500 unsigned long flags;
501 int rc = -ENOENT;
502
503 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
504
505 if (nid != LNET_NID_ANY) {
506 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
507 } else {
508 lo = 0;
509 hi = kiblnd_data.kib_peer_hash_size - 1;
510 }
511
512 for (i = lo; i <= hi; i++) {
513 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
514 peer = list_entry(ptmp, kib_peer_t, ibp_list);
515 LASSERT(peer->ibp_connecting > 0 ||
516 peer->ibp_accepting > 0 ||
517 !list_empty(&peer->ibp_conns));
518
519 if (peer->ibp_ni != ni)
520 continue;
521
522 if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
523 continue;
524
525 if (!list_empty(&peer->ibp_tx_queue)) {
526 LASSERT(list_empty(&peer->ibp_conns));
527
528 list_splice_init(&peer->ibp_tx_queue,
529 &zombies);
530 }
531
532 kiblnd_del_peer_locked(peer);
533 rc = 0; /* matched something */
534 }
535 }
536
537 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
538
539 kiblnd_txlist_done(ni, &zombies, -EIO);
540
541 return rc;
542 }
543
544 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
545 {
546 kib_peer_t *peer;
547 struct list_head *ptmp;
548 kib_conn_t *conn;
549 struct list_head *ctmp;
550 int i;
551 unsigned long flags;
552
553 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
554
555 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
556 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
557
558 peer = list_entry(ptmp, kib_peer_t, ibp_list);
559 LASSERT(peer->ibp_connecting > 0 ||
560 peer->ibp_accepting > 0 ||
561 !list_empty(&peer->ibp_conns));
562
563 if (peer->ibp_ni != ni)
564 continue;
565
566 list_for_each(ctmp, &peer->ibp_conns) {
567 if (index-- > 0)
568 continue;
569
570 conn = list_entry(ctmp, kib_conn_t,
571 ibc_list);
572 kiblnd_conn_addref(conn);
573 read_unlock_irqrestore(
574 &kiblnd_data.kib_global_lock,
575 flags);
576 return conn;
577 }
578 }
579 }
580
581 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
582 return NULL;
583 }
584
585 int kiblnd_translate_mtu(int value)
586 {
587 switch (value) {
588 default:
589 return -1;
590 case 0:
591 return 0;
592 case 256:
593 return IB_MTU_256;
594 case 512:
595 return IB_MTU_512;
596 case 1024:
597 return IB_MTU_1024;
598 case 2048:
599 return IB_MTU_2048;
600 case 4096:
601 return IB_MTU_4096;
602 }
603 }
604
605 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
606 {
607 int mtu;
608
609 /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
610 if (cmid->route.path_rec == NULL)
611 return;
612
613 mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
614 LASSERT(mtu >= 0);
615 if (mtu != 0)
616 cmid->route.path_rec->mtu = mtu;
617 }
618
619 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
620 {
621 cpumask_t *mask;
622 int vectors;
623 int off;
624 int i;
625 lnet_nid_t nid = conn->ibc_peer->ibp_nid;
626
627 vectors = conn->ibc_cmid->device->num_comp_vectors;
628 if (vectors <= 1)
629 return 0;
630
631 mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
632 if (mask == NULL)
633 return 0;
634
635 /* hash NID to CPU id in this partition... */
636 off = do_div(nid, cpumask_weight(mask));
637 for_each_cpu(i, mask) {
638 if (off-- == 0)
639 return i % vectors;
640 }
641
642 LBUG();
643 return 1;
644 }
645
646 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
647 int state, int version)
648 {
649 /*
650 * CAVEAT EMPTOR:
651 * If the new conn is created successfully it takes over the caller's
652 * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
653 * is destroyed. On failure, the caller's ref on 'peer' remains and
654 * she must dispose of 'cmid'. (Actually I'd block forever if I tried
655 * to destroy 'cmid' here since I'm called from the CM which still has
656 * its ref on 'cmid').
657 */
658 rwlock_t *glock = &kiblnd_data.kib_global_lock;
659 kib_net_t *net = peer->ibp_ni->ni_data;
660 kib_dev_t *dev;
661 struct ib_qp_init_attr *init_qp_attr;
662 struct kib_sched_info *sched;
663 struct ib_cq_init_attr cq_attr = {};
664 kib_conn_t *conn;
665 struct ib_cq *cq;
666 unsigned long flags;
667 int cpt;
668 int rc;
669 int i;
670
671 LASSERT(net != NULL);
672 LASSERT(!in_interrupt());
673
674 dev = net->ibn_dev;
675
676 cpt = lnet_cpt_of_nid(peer->ibp_nid);
677 sched = kiblnd_data.kib_scheds[cpt];
678
679 LASSERT(sched->ibs_nthreads > 0);
680
681 LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
682 sizeof(*init_qp_attr));
683 if (init_qp_attr == NULL) {
684 CERROR("Can't allocate qp_attr for %s\n",
685 libcfs_nid2str(peer->ibp_nid));
686 goto failed_0;
687 }
688
689 LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
690 if (conn == NULL) {
691 CERROR("Can't allocate connection for %s\n",
692 libcfs_nid2str(peer->ibp_nid));
693 goto failed_1;
694 }
695
696 conn->ibc_state = IBLND_CONN_INIT;
697 conn->ibc_version = version;
698 conn->ibc_peer = peer; /* I take the caller's ref */
699 cmid->context = conn; /* for future CM callbacks */
700 conn->ibc_cmid = cmid;
701
702 INIT_LIST_HEAD(&conn->ibc_early_rxs);
703 INIT_LIST_HEAD(&conn->ibc_tx_noops);
704 INIT_LIST_HEAD(&conn->ibc_tx_queue);
705 INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
706 INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
707 INIT_LIST_HEAD(&conn->ibc_active_txs);
708 spin_lock_init(&conn->ibc_lock);
709
710 LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
711 sizeof(*conn->ibc_connvars));
712 if (conn->ibc_connvars == NULL) {
713 CERROR("Can't allocate in-progress connection state\n");
714 goto failed_2;
715 }
716
717 write_lock_irqsave(glock, flags);
718 if (dev->ibd_failover) {
719 write_unlock_irqrestore(glock, flags);
720 CERROR("%s: failover in progress\n", dev->ibd_ifname);
721 goto failed_2;
722 }
723
724 if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
725 /* wakeup failover thread and teardown connection */
726 if (kiblnd_dev_can_failover(dev)) {
727 list_add_tail(&dev->ibd_fail_list,
728 &kiblnd_data.kib_failed_devs);
729 wake_up(&kiblnd_data.kib_failover_waitq);
730 }
731
732 write_unlock_irqrestore(glock, flags);
733 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
734 cmid->device->name, dev->ibd_ifname);
735 goto failed_2;
736 }
737
738 kiblnd_hdev_addref_locked(dev->ibd_hdev);
739 conn->ibc_hdev = dev->ibd_hdev;
740
741 kiblnd_setup_mtu_locked(cmid);
742
743 write_unlock_irqrestore(glock, flags);
744
745 LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
746 IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
747 if (conn->ibc_rxs == NULL) {
748 CERROR("Cannot allocate RX buffers\n");
749 goto failed_2;
750 }
751
752 rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
753 IBLND_RX_MSG_PAGES(version));
754 if (rc != 0)
755 goto failed_2;
756
757 kiblnd_map_rx_descs(conn);
758
759 cq_attr.cqe = IBLND_CQ_ENTRIES(version);
760 cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
761 cq = ib_create_cq(cmid->device,
762 kiblnd_cq_completion, kiblnd_cq_event, conn,
763 &cq_attr);
764 if (IS_ERR(cq)) {
765 CERROR("Can't create CQ: %ld, cqe: %d\n",
766 PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
767 goto failed_2;
768 }
769
770 conn->ibc_cq = cq;
771
772 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
773 if (rc != 0) {
774 CERROR("Can't request completion notificiation: %d\n", rc);
775 goto failed_2;
776 }
777
778 init_qp_attr->event_handler = kiblnd_qp_event;
779 init_qp_attr->qp_context = conn;
780 init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
781 init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
782 init_qp_attr->cap.max_send_sge = 1;
783 init_qp_attr->cap.max_recv_sge = 1;
784 init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
785 init_qp_attr->qp_type = IB_QPT_RC;
786 init_qp_attr->send_cq = cq;
787 init_qp_attr->recv_cq = cq;
788
789 conn->ibc_sched = sched;
790
791 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
792 if (rc != 0) {
793 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
794 rc, init_qp_attr->cap.max_send_wr,
795 init_qp_attr->cap.max_recv_wr);
796 goto failed_2;
797 }
798
799 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
800
801 /* 1 ref for caller and each rxmsg */
802 atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
803 conn->ibc_nrx = IBLND_RX_MSGS(version);
804
805 /* post receives */
806 for (i = 0; i < IBLND_RX_MSGS(version); i++) {
807 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
808 IBLND_POSTRX_NO_CREDIT);
809 if (rc != 0) {
810 CERROR("Can't post rxmsg: %d\n", rc);
811
812 /* Make posted receives complete */
813 kiblnd_abort_receives(conn);
814
815 /*
816 * correct # of posted buffers
817 * NB locking needed now I'm racing with completion
818 */
819 spin_lock_irqsave(&sched->ibs_lock, flags);
820 conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
821 spin_unlock_irqrestore(&sched->ibs_lock, flags);
822
823 /*
824 * cmid will be destroyed by CM(ofed) after cm_callback
825 * returned, so we can't refer it anymore
826 * (by kiblnd_connd()->kiblnd_destroy_conn)
827 */
828 rdma_destroy_qp(conn->ibc_cmid);
829 conn->ibc_cmid = NULL;
830
831 /* Drop my own and unused rxbuffer refcounts */
832 while (i++ <= IBLND_RX_MSGS(version))
833 kiblnd_conn_decref(conn);
834
835 return NULL;
836 }
837 }
838
839 /* Init successful! */
840 LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
841 state == IBLND_CONN_PASSIVE_WAIT);
842 conn->ibc_state = state;
843
844 /* 1 more conn */
845 atomic_inc(&net->ibn_nconns);
846 return conn;
847
848 failed_2:
849 kiblnd_destroy_conn(conn);
850 failed_1:
851 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
852 failed_0:
853 return NULL;
854 }
855
856 void kiblnd_destroy_conn(kib_conn_t *conn)
857 {
858 struct rdma_cm_id *cmid = conn->ibc_cmid;
859 kib_peer_t *peer = conn->ibc_peer;
860 int rc;
861
862 LASSERT(!in_interrupt());
863 LASSERT(atomic_read(&conn->ibc_refcount) == 0);
864 LASSERT(list_empty(&conn->ibc_early_rxs));
865 LASSERT(list_empty(&conn->ibc_tx_noops));
866 LASSERT(list_empty(&conn->ibc_tx_queue));
867 LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
868 LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
869 LASSERT(list_empty(&conn->ibc_active_txs));
870 LASSERT(conn->ibc_noops_posted == 0);
871 LASSERT(conn->ibc_nsends_posted == 0);
872
873 switch (conn->ibc_state) {
874 default:
875 /* conn must be completely disengaged from the network */
876 LBUG();
877
878 case IBLND_CONN_DISCONNECTED:
879 /* connvars should have been freed already */
880 LASSERT(conn->ibc_connvars == NULL);
881 break;
882
883 case IBLND_CONN_INIT:
884 break;
885 }
886
887 /* conn->ibc_cmid might be destroyed by CM already */
888 if (cmid != NULL && cmid->qp != NULL)
889 rdma_destroy_qp(cmid);
890
891 if (conn->ibc_cq != NULL) {
892 rc = ib_destroy_cq(conn->ibc_cq);
893 if (rc != 0)
894 CWARN("Error destroying CQ: %d\n", rc);
895 }
896
897 if (conn->ibc_rx_pages != NULL)
898 kiblnd_unmap_rx_descs(conn);
899
900 if (conn->ibc_rxs != NULL) {
901 LIBCFS_FREE(conn->ibc_rxs,
902 IBLND_RX_MSGS(conn->ibc_version)
903 * sizeof(kib_rx_t));
904 }
905
906 if (conn->ibc_connvars != NULL)
907 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
908
909 if (conn->ibc_hdev != NULL)
910 kiblnd_hdev_decref(conn->ibc_hdev);
911
912 /* See CAVEAT EMPTOR above in kiblnd_create_conn */
913 if (conn->ibc_state != IBLND_CONN_INIT) {
914 kib_net_t *net = peer->ibp_ni->ni_data;
915
916 kiblnd_peer_decref(peer);
917 rdma_destroy_id(cmid);
918 atomic_dec(&net->ibn_nconns);
919 }
920
921 LIBCFS_FREE(conn, sizeof(*conn));
922 }
923
924 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
925 {
926 kib_conn_t *conn;
927 struct list_head *ctmp;
928 struct list_head *cnxt;
929 int count = 0;
930
931 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
932 conn = list_entry(ctmp, kib_conn_t, ibc_list);
933
934 CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
935 libcfs_nid2str(peer->ibp_nid),
936 conn->ibc_version, why);
937
938 kiblnd_close_conn_locked(conn, why);
939 count++;
940 }
941
942 return count;
943 }
944
945 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
946 int version, __u64 incarnation)
947 {
948 kib_conn_t *conn;
949 struct list_head *ctmp;
950 struct list_head *cnxt;
951 int count = 0;
952
953 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
954 conn = list_entry(ctmp, kib_conn_t, ibc_list);
955
956 if (conn->ibc_version == version &&
957 conn->ibc_incarnation == incarnation)
958 continue;
959
960 CDEBUG(D_NET,
961 "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
962 libcfs_nid2str(peer->ibp_nid),
963 conn->ibc_version, conn->ibc_incarnation,
964 version, incarnation);
965
966 kiblnd_close_conn_locked(conn, -ESTALE);
967 count++;
968 }
969
970 return count;
971 }
972
973 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
974 {
975 kib_peer_t *peer;
976 struct list_head *ptmp;
977 struct list_head *pnxt;
978 int lo;
979 int hi;
980 int i;
981 unsigned long flags;
982 int count = 0;
983
984 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
985
986 if (nid != LNET_NID_ANY)
987 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
988 else {
989 lo = 0;
990 hi = kiblnd_data.kib_peer_hash_size - 1;
991 }
992
993 for (i = lo; i <= hi; i++) {
994 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
995
996 peer = list_entry(ptmp, kib_peer_t, ibp_list);
997 LASSERT(peer->ibp_connecting > 0 ||
998 peer->ibp_accepting > 0 ||
999 !list_empty(&peer->ibp_conns));
1000
1001 if (peer->ibp_ni != ni)
1002 continue;
1003
1004 if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
1005 continue;
1006
1007 count += kiblnd_close_peer_conns_locked(peer, 0);
1008 }
1009 }
1010
1011 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1012
1013 /* wildcards always succeed */
1014 if (nid == LNET_NID_ANY)
1015 return 0;
1016
1017 return (count == 0) ? -ENOENT : 0;
1018 }
1019
1020 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1021 {
1022 struct libcfs_ioctl_data *data = arg;
1023 int rc = -EINVAL;
1024
1025 switch (cmd) {
1026 case IOC_LIBCFS_GET_PEER: {
1027 lnet_nid_t nid = 0;
1028 int count = 0;
1029
1030 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1031 &nid, &count);
1032 data->ioc_nid = nid;
1033 data->ioc_count = count;
1034 break;
1035 }
1036
1037 case IOC_LIBCFS_DEL_PEER: {
1038 rc = kiblnd_del_peer(ni, data->ioc_nid);
1039 break;
1040 }
1041 case IOC_LIBCFS_GET_CONN: {
1042 kib_conn_t *conn;
1043
1044 rc = 0;
1045 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1046 if (conn == NULL) {
1047 rc = -ENOENT;
1048 break;
1049 }
1050
1051 LASSERT(conn->ibc_cmid != NULL);
1052 data->ioc_nid = conn->ibc_peer->ibp_nid;
1053 if (conn->ibc_cmid->route.path_rec == NULL)
1054 data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1055 else
1056 data->ioc_u32[0] =
1057 ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1058 kiblnd_conn_decref(conn);
1059 break;
1060 }
1061 case IOC_LIBCFS_CLOSE_CONNECTION: {
1062 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1063 break;
1064 }
1065
1066 default:
1067 break;
1068 }
1069
1070 return rc;
1071 }
1072
1073 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1074 {
1075 unsigned long last_alive = 0;
1076 unsigned long now = cfs_time_current();
1077 rwlock_t *glock = &kiblnd_data.kib_global_lock;
1078 kib_peer_t *peer;
1079 unsigned long flags;
1080
1081 read_lock_irqsave(glock, flags);
1082
1083 peer = kiblnd_find_peer_locked(nid);
1084 if (peer != NULL) {
1085 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
1086 peer->ibp_accepting > 0 ||
1087 !list_empty(&peer->ibp_conns)); /* active conn */
1088 last_alive = peer->ibp_last_alive;
1089 }
1090
1091 read_unlock_irqrestore(glock, flags);
1092
1093 if (last_alive != 0)
1094 *when = last_alive;
1095
1096 /*
1097 * peer is not persistent in hash, trigger peer creation
1098 * and connection establishment with a NULL tx
1099 */
1100 if (peer == NULL)
1101 kiblnd_launch_tx(ni, NULL, nid);
1102
1103 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1104 libcfs_nid2str(nid), peer,
1105 last_alive ? cfs_duration_sec(now - last_alive) : -1);
1106 }
1107
1108 void kiblnd_free_pages(kib_pages_t *p)
1109 {
1110 int npages = p->ibp_npages;
1111 int i;
1112
1113 for (i = 0; i < npages; i++) {
1114 if (p->ibp_pages[i] != NULL)
1115 __free_page(p->ibp_pages[i]);
1116 }
1117
1118 LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
1119 }
1120
1121 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
1122 {
1123 kib_pages_t *p;
1124 int i;
1125
1126 LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1127 offsetof(kib_pages_t, ibp_pages[npages]));
1128 if (p == NULL) {
1129 CERROR("Can't allocate descriptor for %d pages\n", npages);
1130 return -ENOMEM;
1131 }
1132
1133 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1134 p->ibp_npages = npages;
1135
1136 for (i = 0; i < npages; i++) {
1137 p->ibp_pages[i] = alloc_pages_node(
1138 cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1139 GFP_NOFS, 0);
1140 if (p->ibp_pages[i] == NULL) {
1141 CERROR("Can't allocate page %d of %d\n", i, npages);
1142 kiblnd_free_pages(p);
1143 return -ENOMEM;
1144 }
1145 }
1146
1147 *pp = p;
1148 return 0;
1149 }
1150
1151 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
1152 {
1153 kib_rx_t *rx;
1154 int i;
1155
1156 LASSERT(conn->ibc_rxs != NULL);
1157 LASSERT(conn->ibc_hdev != NULL);
1158
1159 for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1160 rx = &conn->ibc_rxs[i];
1161
1162 LASSERT(rx->rx_nob >= 0); /* not posted */
1163
1164 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1165 KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1166 rx->rx_msgaddr),
1167 IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1168 }
1169
1170 kiblnd_free_pages(conn->ibc_rx_pages);
1171
1172 conn->ibc_rx_pages = NULL;
1173 }
1174
1175 void kiblnd_map_rx_descs(kib_conn_t *conn)
1176 {
1177 kib_rx_t *rx;
1178 struct page *pg;
1179 int pg_off;
1180 int ipg;
1181 int i;
1182
1183 for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1184 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1185 rx = &conn->ibc_rxs[i];
1186
1187 rx->rx_conn = conn;
1188 rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
1189
1190 rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1191 rx->rx_msg,
1192 IBLND_MSG_SIZE,
1193 DMA_FROM_DEVICE);
1194 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1195 rx->rx_msgaddr));
1196 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1197
1198 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1199 i, rx->rx_msg, rx->rx_msgaddr,
1200 (__u64)(page_to_phys(pg) + pg_off));
1201
1202 pg_off += IBLND_MSG_SIZE;
1203 LASSERT(pg_off <= PAGE_SIZE);
1204
1205 if (pg_off == PAGE_SIZE) {
1206 pg_off = 0;
1207 ipg++;
1208 LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
1209 }
1210 }
1211 }
1212
1213 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
1214 {
1215 kib_hca_dev_t *hdev = tpo->tpo_hdev;
1216 kib_tx_t *tx;
1217 int i;
1218
1219 LASSERT(tpo->tpo_pool.po_allocated == 0);
1220
1221 if (hdev == NULL)
1222 return;
1223
1224 for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1225 tx = &tpo->tpo_tx_descs[i];
1226 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1227 KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1228 tx->tx_msgaddr),
1229 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1230 }
1231
1232 kiblnd_hdev_decref(hdev);
1233 tpo->tpo_hdev = NULL;
1234 }
1235
1236 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
1237 {
1238 kib_hca_dev_t *hdev;
1239 unsigned long flags;
1240 int i = 0;
1241
1242 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1243 while (dev->ibd_failover) {
1244 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1245 if (i++ % 50 == 0)
1246 CDEBUG(D_NET, "%s: Wait for failover\n",
1247 dev->ibd_ifname);
1248 schedule_timeout(cfs_time_seconds(1) / 100);
1249
1250 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1251 }
1252
1253 kiblnd_hdev_addref_locked(dev->ibd_hdev);
1254 hdev = dev->ibd_hdev;
1255
1256 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1257
1258 return hdev;
1259 }
1260
1261 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1262 {
1263 kib_pages_t *txpgs = tpo->tpo_tx_pages;
1264 kib_pool_t *pool = &tpo->tpo_pool;
1265 kib_net_t *net = pool->po_owner->ps_net;
1266 kib_dev_t *dev;
1267 struct page *page;
1268 kib_tx_t *tx;
1269 int page_offset;
1270 int ipage;
1271 int i;
1272
1273 LASSERT(net != NULL);
1274
1275 dev = net->ibn_dev;
1276
1277 /* pre-mapped messages are not bigger than 1 page */
1278 CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
1279
1280 /* No fancy arithmetic when we do the buffer calculations */
1281 CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
1282
1283 tpo->tpo_hdev = kiblnd_current_hdev(dev);
1284
1285 for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1286 page = txpgs->ibp_pages[ipage];
1287 tx = &tpo->tpo_tx_descs[i];
1288
1289 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1290 page_offset);
1291
1292 tx->tx_msgaddr = kiblnd_dma_map_single(
1293 tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
1294 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1295 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1296 tx->tx_msgaddr));
1297 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1298
1299 list_add(&tx->tx_list, &pool->po_free_list);
1300
1301 page_offset += IBLND_MSG_SIZE;
1302 LASSERT(page_offset <= PAGE_SIZE);
1303
1304 if (page_offset == PAGE_SIZE) {
1305 page_offset = 0;
1306 ipage++;
1307 LASSERT(ipage <= txpgs->ibp_npages);
1308 }
1309 }
1310 }
1311
1312 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
1313 {
1314 __u64 index;
1315
1316 LASSERT(hdev->ibh_mrs[0] != NULL);
1317
1318 if (hdev->ibh_nmrs == 1)
1319 return hdev->ibh_mrs[0];
1320
1321 index = addr >> hdev->ibh_mr_shift;
1322
1323 if (index < hdev->ibh_nmrs &&
1324 index == ((addr + size - 1) >> hdev->ibh_mr_shift))
1325 return hdev->ibh_mrs[index];
1326
1327 return NULL;
1328 }
1329
1330 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
1331 {
1332 struct ib_mr *prev_mr;
1333 struct ib_mr *mr;
1334 int i;
1335
1336 LASSERT(hdev->ibh_mrs[0] != NULL);
1337
1338 if (*kiblnd_tunables.kib_map_on_demand > 0 &&
1339 *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
1340 return NULL;
1341
1342 if (hdev->ibh_nmrs == 1)
1343 return hdev->ibh_mrs[0];
1344
1345 for (i = 0, mr = prev_mr = NULL;
1346 i < rd->rd_nfrags; i++) {
1347 mr = kiblnd_find_dma_mr(hdev,
1348 rd->rd_frags[i].rf_addr,
1349 rd->rd_frags[i].rf_nob);
1350 if (prev_mr == NULL)
1351 prev_mr = mr;
1352
1353 if (mr == NULL || prev_mr != mr) {
1354 /* Can't covered by one single MR */
1355 mr = NULL;
1356 break;
1357 }
1358 }
1359
1360 return mr;
1361 }
1362
1363 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
1364 {
1365 LASSERT(pool->fpo_map_count == 0);
1366
1367 if (pool->fpo_fmr_pool != NULL)
1368 ib_destroy_fmr_pool(pool->fpo_fmr_pool);
1369
1370 if (pool->fpo_hdev != NULL)
1371 kiblnd_hdev_decref(pool->fpo_hdev);
1372
1373 LIBCFS_FREE(pool, sizeof(*pool));
1374 }
1375
1376 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
1377 {
1378 kib_fmr_pool_t *pool;
1379
1380 while (!list_empty(head)) {
1381 pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
1382 list_del(&pool->fpo_list);
1383 kiblnd_destroy_fmr_pool(pool);
1384 }
1385 }
1386
1387 static int kiblnd_fmr_pool_size(int ncpts)
1388 {
1389 int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
1390
1391 return max(IBLND_FMR_POOL, size);
1392 }
1393
1394 static int kiblnd_fmr_flush_trigger(int ncpts)
1395 {
1396 int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
1397
1398 return max(IBLND_FMR_POOL_FLUSH, size);
1399 }
1400
1401 static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1402 kib_fmr_pool_t **pp_fpo)
1403 {
1404 /* FMR pool for RDMA */
1405 kib_dev_t *dev = fps->fps_net->ibn_dev;
1406 kib_fmr_pool_t *fpo;
1407 struct ib_fmr_pool_param param = {
1408 .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1409 .page_shift = PAGE_SHIFT,
1410 .access = (IB_ACCESS_LOCAL_WRITE |
1411 IB_ACCESS_REMOTE_WRITE),
1412 .pool_size = fps->fps_pool_size,
1413 .dirty_watermark = fps->fps_flush_trigger,
1414 .flush_function = NULL,
1415 .flush_arg = NULL,
1416 .cache = !!*kiblnd_tunables.kib_fmr_cache};
1417 int rc;
1418
1419 LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1420 if (fpo == NULL)
1421 return -ENOMEM;
1422
1423 fpo->fpo_hdev = kiblnd_current_hdev(dev);
1424
1425 fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
1426 if (IS_ERR(fpo->fpo_fmr_pool)) {
1427 rc = PTR_ERR(fpo->fpo_fmr_pool);
1428 CERROR("Failed to create FMR pool: %d\n", rc);
1429
1430 kiblnd_hdev_decref(fpo->fpo_hdev);
1431 LIBCFS_FREE(fpo, sizeof(*fpo));
1432 return rc;
1433 }
1434
1435 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1436 fpo->fpo_owner = fps;
1437 *pp_fpo = fpo;
1438
1439 return 0;
1440 }
1441
1442 static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
1443 struct list_head *zombies)
1444 {
1445 if (fps->fps_net == NULL) /* intialized? */
1446 return;
1447
1448 spin_lock(&fps->fps_lock);
1449
1450 while (!list_empty(&fps->fps_pool_list)) {
1451 kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
1452 kib_fmr_pool_t, fpo_list);
1453 fpo->fpo_failed = 1;
1454 list_del(&fpo->fpo_list);
1455 if (fpo->fpo_map_count == 0)
1456 list_add(&fpo->fpo_list, zombies);
1457 else
1458 list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1459 }
1460
1461 spin_unlock(&fps->fps_lock);
1462 }
1463
1464 static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1465 {
1466 if (fps->fps_net != NULL) { /* initialized? */
1467 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1468 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1469 }
1470 }
1471
1472 static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
1473 kib_net_t *net, int pool_size,
1474 int flush_trigger)
1475 {
1476 kib_fmr_pool_t *fpo;
1477 int rc;
1478
1479 memset(fps, 0, sizeof(*fps));
1480
1481 fps->fps_net = net;
1482 fps->fps_cpt = cpt;
1483 fps->fps_pool_size = pool_size;
1484 fps->fps_flush_trigger = flush_trigger;
1485 spin_lock_init(&fps->fps_lock);
1486 INIT_LIST_HEAD(&fps->fps_pool_list);
1487 INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1488
1489 rc = kiblnd_create_fmr_pool(fps, &fpo);
1490 if (rc == 0)
1491 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1492
1493 return rc;
1494 }
1495
1496 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
1497 {
1498 if (fpo->fpo_map_count != 0) /* still in use */
1499 return 0;
1500 if (fpo->fpo_failed)
1501 return 1;
1502 return cfs_time_aftereq(now, fpo->fpo_deadline);
1503 }
1504
1505 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1506 {
1507 LIST_HEAD(zombies);
1508 kib_fmr_pool_t *fpo = fmr->fmr_pool;
1509 kib_fmr_poolset_t *fps = fpo->fpo_owner;
1510 unsigned long now = cfs_time_current();
1511 kib_fmr_pool_t *tmp;
1512 int rc;
1513
1514 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1515 LASSERT(rc == 0);
1516
1517 if (status != 0) {
1518 rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
1519 LASSERT(rc == 0);
1520 }
1521
1522 fmr->fmr_pool = NULL;
1523 fmr->fmr_pfmr = NULL;
1524
1525 spin_lock(&fps->fps_lock);
1526 fpo->fpo_map_count--; /* decref the pool */
1527
1528 list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1529 /* the first pool is persistent */
1530 if (fps->fps_pool_list.next == &fpo->fpo_list)
1531 continue;
1532
1533 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1534 list_move(&fpo->fpo_list, &zombies);
1535 fps->fps_version++;
1536 }
1537 }
1538 spin_unlock(&fps->fps_lock);
1539
1540 if (!list_empty(&zombies))
1541 kiblnd_destroy_fmr_pool_list(&zombies);
1542 }
1543
1544 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
1545 __u64 iov, kib_fmr_t *fmr)
1546 {
1547 struct ib_pool_fmr *pfmr;
1548 kib_fmr_pool_t *fpo;
1549 __u64 version;
1550 int rc;
1551
1552 again:
1553 spin_lock(&fps->fps_lock);
1554 version = fps->fps_version;
1555 list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1556 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1557 fpo->fpo_map_count++;
1558 spin_unlock(&fps->fps_lock);
1559
1560 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
1561 pages, npages, iov);
1562 if (likely(!IS_ERR(pfmr))) {
1563 fmr->fmr_pool = fpo;
1564 fmr->fmr_pfmr = pfmr;
1565 return 0;
1566 }
1567
1568 spin_lock(&fps->fps_lock);
1569 fpo->fpo_map_count--;
1570 if (PTR_ERR(pfmr) != -EAGAIN) {
1571 spin_unlock(&fps->fps_lock);
1572 return PTR_ERR(pfmr);
1573 }
1574
1575 /* EAGAIN and ... */
1576 if (version != fps->fps_version) {
1577 spin_unlock(&fps->fps_lock);
1578 goto again;
1579 }
1580 }
1581
1582 if (fps->fps_increasing) {
1583 spin_unlock(&fps->fps_lock);
1584 CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
1585 schedule();
1586 goto again;
1587
1588 }
1589
1590 if (time_before(cfs_time_current(), fps->fps_next_retry)) {
1591 /* someone failed recently */
1592 spin_unlock(&fps->fps_lock);
1593 return -EAGAIN;
1594 }
1595
1596 fps->fps_increasing = 1;
1597 spin_unlock(&fps->fps_lock);
1598
1599 CDEBUG(D_NET, "Allocate new FMR pool\n");
1600 rc = kiblnd_create_fmr_pool(fps, &fpo);
1601 spin_lock(&fps->fps_lock);
1602 fps->fps_increasing = 0;
1603 if (rc == 0) {
1604 fps->fps_version++;
1605 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1606 } else {
1607 fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1608 }
1609 spin_unlock(&fps->fps_lock);
1610
1611 goto again;
1612 }
1613
1614 static void kiblnd_fini_pool(kib_pool_t *pool)
1615 {
1616 LASSERT(list_empty(&pool->po_free_list));
1617 LASSERT(pool->po_allocated == 0);
1618
1619 CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1620 }
1621
1622 static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
1623 {
1624 CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1625
1626 memset(pool, 0, sizeof(*pool));
1627 INIT_LIST_HEAD(&pool->po_free_list);
1628 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1629 pool->po_owner = ps;
1630 pool->po_size = size;
1631 }
1632
1633 static void kiblnd_destroy_pool_list(struct list_head *head)
1634 {
1635 kib_pool_t *pool;
1636
1637 while (!list_empty(head)) {
1638 pool = list_entry(head->next, kib_pool_t, po_list);
1639 list_del(&pool->po_list);
1640
1641 LASSERT(pool->po_owner != NULL);
1642 pool->po_owner->ps_pool_destroy(pool);
1643 }
1644 }
1645
1646 static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
1647 {
1648 if (ps->ps_net == NULL) /* intialized? */
1649 return;
1650
1651 spin_lock(&ps->ps_lock);
1652 while (!list_empty(&ps->ps_pool_list)) {
1653 kib_pool_t *po = list_entry(ps->ps_pool_list.next,
1654 kib_pool_t, po_list);
1655 po->po_failed = 1;
1656 list_del(&po->po_list);
1657 if (po->po_allocated == 0)
1658 list_add(&po->po_list, zombies);
1659 else
1660 list_add(&po->po_list, &ps->ps_failed_pool_list);
1661 }
1662 spin_unlock(&ps->ps_lock);
1663 }
1664
1665 static void kiblnd_fini_poolset(kib_poolset_t *ps)
1666 {
1667 if (ps->ps_net != NULL) { /* initialized? */
1668 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
1669 kiblnd_destroy_pool_list(&ps->ps_pool_list);
1670 }
1671 }
1672
1673 static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
1674 kib_net_t *net, char *name, int size,
1675 kib_ps_pool_create_t po_create,
1676 kib_ps_pool_destroy_t po_destroy,
1677 kib_ps_node_init_t nd_init,
1678 kib_ps_node_fini_t nd_fini)
1679 {
1680 kib_pool_t *pool;
1681 int rc;
1682
1683 memset(ps, 0, sizeof(*ps));
1684
1685 ps->ps_cpt = cpt;
1686 ps->ps_net = net;
1687 ps->ps_pool_create = po_create;
1688 ps->ps_pool_destroy = po_destroy;
1689 ps->ps_node_init = nd_init;
1690 ps->ps_node_fini = nd_fini;
1691 ps->ps_pool_size = size;
1692 if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
1693 >= sizeof(ps->ps_name))
1694 return -E2BIG;
1695 spin_lock_init(&ps->ps_lock);
1696 INIT_LIST_HEAD(&ps->ps_pool_list);
1697 INIT_LIST_HEAD(&ps->ps_failed_pool_list);
1698
1699 rc = ps->ps_pool_create(ps, size, &pool);
1700 if (rc == 0)
1701 list_add(&pool->po_list, &ps->ps_pool_list);
1702 else
1703 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
1704
1705 return rc;
1706 }
1707
1708 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
1709 {
1710 if (pool->po_allocated != 0) /* still in use */
1711 return 0;
1712 if (pool->po_failed)
1713 return 1;
1714 return cfs_time_aftereq(now, pool->po_deadline);
1715 }
1716
1717 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
1718 {
1719 LIST_HEAD(zombies);
1720 kib_poolset_t *ps = pool->po_owner;
1721 kib_pool_t *tmp;
1722 unsigned long now = cfs_time_current();
1723
1724 spin_lock(&ps->ps_lock);
1725
1726 if (ps->ps_node_fini != NULL)
1727 ps->ps_node_fini(pool, node);
1728
1729 LASSERT(pool->po_allocated > 0);
1730 list_add(node, &pool->po_free_list);
1731 pool->po_allocated--;
1732
1733 list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
1734 /* the first pool is persistent */
1735 if (ps->ps_pool_list.next == &pool->po_list)
1736 continue;
1737
1738 if (kiblnd_pool_is_idle(pool, now))
1739 list_move(&pool->po_list, &zombies);
1740 }
1741 spin_unlock(&ps->ps_lock);
1742
1743 if (!list_empty(&zombies))
1744 kiblnd_destroy_pool_list(&zombies);
1745 }
1746
1747 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
1748 {
1749 struct list_head *node;
1750 kib_pool_t *pool;
1751 int rc;
1752
1753 again:
1754 spin_lock(&ps->ps_lock);
1755 list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
1756 if (list_empty(&pool->po_free_list))
1757 continue;
1758
1759 pool->po_allocated++;
1760 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1761 node = pool->po_free_list.next;
1762 list_del(node);
1763
1764 if (ps->ps_node_init != NULL) {
1765 /* still hold the lock */
1766 ps->ps_node_init(pool, node);
1767 }
1768 spin_unlock(&ps->ps_lock);
1769 return node;
1770 }
1771
1772 /* no available tx pool and ... */
1773 if (ps->ps_increasing) {
1774 /* another thread is allocating a new pool */
1775 spin_unlock(&ps->ps_lock);
1776 CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
1777 ps->ps_name);
1778 schedule();
1779 goto again;
1780 }
1781
1782 if (time_before(cfs_time_current(), ps->ps_next_retry)) {
1783 /* someone failed recently */
1784 spin_unlock(&ps->ps_lock);
1785 return NULL;
1786 }
1787
1788 ps->ps_increasing = 1;
1789 spin_unlock(&ps->ps_lock);
1790
1791 CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
1792
1793 rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
1794
1795 spin_lock(&ps->ps_lock);
1796 ps->ps_increasing = 0;
1797 if (rc == 0) {
1798 list_add_tail(&pool->po_list, &ps->ps_pool_list);
1799 } else {
1800 ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1801 CERROR("Can't allocate new %s pool because out of memory\n",
1802 ps->ps_name);
1803 }
1804 spin_unlock(&ps->ps_lock);
1805
1806 goto again;
1807 }
1808
1809 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
1810 {
1811 kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
1812 int i;
1813
1814 LASSERT(pool->po_allocated == 0);
1815
1816 if (tpo->tpo_tx_pages != NULL) {
1817 kiblnd_unmap_tx_pool(tpo);
1818 kiblnd_free_pages(tpo->tpo_tx_pages);
1819 }
1820
1821 if (tpo->tpo_tx_descs == NULL)
1822 goto out;
1823
1824 for (i = 0; i < pool->po_size; i++) {
1825 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1826
1827 list_del(&tx->tx_list);
1828 if (tx->tx_pages != NULL)
1829 LIBCFS_FREE(tx->tx_pages,
1830 LNET_MAX_IOV *
1831 sizeof(*tx->tx_pages));
1832 if (tx->tx_frags != NULL)
1833 LIBCFS_FREE(tx->tx_frags,
1834 IBLND_MAX_RDMA_FRAGS *
1835 sizeof(*tx->tx_frags));
1836 if (tx->tx_wrq != NULL)
1837 LIBCFS_FREE(tx->tx_wrq,
1838 (1 + IBLND_MAX_RDMA_FRAGS) *
1839 sizeof(*tx->tx_wrq));
1840 if (tx->tx_sge != NULL)
1841 LIBCFS_FREE(tx->tx_sge,
1842 (1 + IBLND_MAX_RDMA_FRAGS) *
1843 sizeof(*tx->tx_sge));
1844 if (tx->tx_rd != NULL)
1845 LIBCFS_FREE(tx->tx_rd,
1846 offsetof(kib_rdma_desc_t,
1847 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1848 }
1849
1850 LIBCFS_FREE(tpo->tpo_tx_descs,
1851 pool->po_size * sizeof(kib_tx_t));
1852 out:
1853 kiblnd_fini_pool(pool);
1854 LIBCFS_FREE(tpo, sizeof(*tpo));
1855 }
1856
1857 static int kiblnd_tx_pool_size(int ncpts)
1858 {
1859 int ntx = *kiblnd_tunables.kib_ntx / ncpts;
1860
1861 return max(IBLND_TX_POOL, ntx);
1862 }
1863
1864 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
1865 kib_pool_t **pp_po)
1866 {
1867 int i;
1868 int npg;
1869 kib_pool_t *pool;
1870 kib_tx_pool_t *tpo;
1871
1872 LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
1873 if (tpo == NULL) {
1874 CERROR("Failed to allocate TX pool\n");
1875 return -ENOMEM;
1876 }
1877
1878 pool = &tpo->tpo_pool;
1879 kiblnd_init_pool(ps, pool, size);
1880 tpo->tpo_tx_descs = NULL;
1881 tpo->tpo_tx_pages = NULL;
1882
1883 npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
1884 if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
1885 CERROR("Can't allocate tx pages: %d\n", npg);
1886 LIBCFS_FREE(tpo, sizeof(*tpo));
1887 return -ENOMEM;
1888 }
1889
1890 LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
1891 size * sizeof(kib_tx_t));
1892 if (tpo->tpo_tx_descs == NULL) {
1893 CERROR("Can't allocate %d tx descriptors\n", size);
1894 ps->ps_pool_destroy(pool);
1895 return -ENOMEM;
1896 }
1897
1898 memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
1899
1900 for (i = 0; i < size; i++) {
1901 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1902
1903 tx->tx_pool = tpo;
1904 if (ps->ps_net->ibn_fmr_ps != NULL) {
1905 LIBCFS_CPT_ALLOC(tx->tx_pages,
1906 lnet_cpt_table(), ps->ps_cpt,
1907 LNET_MAX_IOV * sizeof(*tx->tx_pages));
1908 if (tx->tx_pages == NULL)
1909 break;
1910 }
1911
1912 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
1913 IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
1914 if (tx->tx_frags == NULL)
1915 break;
1916
1917 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
1918
1919 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
1920 (1 + IBLND_MAX_RDMA_FRAGS) *
1921 sizeof(*tx->tx_wrq));
1922 if (tx->tx_wrq == NULL)
1923 break;
1924
1925 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
1926 (1 + IBLND_MAX_RDMA_FRAGS) *
1927 sizeof(*tx->tx_sge));
1928 if (tx->tx_sge == NULL)
1929 break;
1930
1931 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
1932 offsetof(kib_rdma_desc_t,
1933 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1934 if (tx->tx_rd == NULL)
1935 break;
1936 }
1937
1938 if (i == size) {
1939 kiblnd_map_tx_pool(tpo);
1940 *pp_po = pool;
1941 return 0;
1942 }
1943
1944 ps->ps_pool_destroy(pool);
1945 return -ENOMEM;
1946 }
1947
1948 static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
1949 {
1950 kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
1951 tps_poolset);
1952 kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
1953
1954 tx->tx_cookie = tps->tps_next_tx_cookie++;
1955 }
1956
1957 static void kiblnd_net_fini_pools(kib_net_t *net)
1958 {
1959 int i;
1960
1961 cfs_cpt_for_each(i, lnet_cpt_table()) {
1962 kib_tx_poolset_t *tps;
1963 kib_fmr_poolset_t *fps;
1964
1965 if (net->ibn_tx_ps != NULL) {
1966 tps = net->ibn_tx_ps[i];
1967 kiblnd_fini_poolset(&tps->tps_poolset);
1968 }
1969
1970 if (net->ibn_fmr_ps != NULL) {
1971 fps = net->ibn_fmr_ps[i];
1972 kiblnd_fini_fmr_poolset(fps);
1973 }
1974 }
1975
1976 if (net->ibn_tx_ps != NULL) {
1977 cfs_percpt_free(net->ibn_tx_ps);
1978 net->ibn_tx_ps = NULL;
1979 }
1980
1981 if (net->ibn_fmr_ps != NULL) {
1982 cfs_percpt_free(net->ibn_fmr_ps);
1983 net->ibn_fmr_ps = NULL;
1984 }
1985 }
1986
1987 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
1988 {
1989 unsigned long flags;
1990 int cpt;
1991 int rc = 0;
1992 int i;
1993
1994 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1995 if (*kiblnd_tunables.kib_map_on_demand == 0 &&
1996 net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
1997 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1998 goto create_tx_pool;
1999 }
2000
2001 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2002
2003 if (*kiblnd_tunables.kib_fmr_pool_size <
2004 *kiblnd_tunables.kib_ntx / 4) {
2005 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2006 *kiblnd_tunables.kib_fmr_pool_size,
2007 *kiblnd_tunables.kib_ntx / 4);
2008 rc = -EINVAL;
2009 goto failed;
2010 }
2011
2012 /*
2013 * TX pool must be created later than FMR, see LU-2268
2014 * for details
2015 */
2016 LASSERT(net->ibn_tx_ps == NULL);
2017
2018 /*
2019 * premapping can fail if ibd_nmr > 1, so we always create
2020 * FMR pool and map-on-demand if premapping failed
2021 */
2022
2023 net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2024 sizeof(kib_fmr_poolset_t));
2025 if (net->ibn_fmr_ps == NULL) {
2026 CERROR("Failed to allocate FMR pool array\n");
2027 rc = -ENOMEM;
2028 goto failed;
2029 }
2030
2031 for (i = 0; i < ncpts; i++) {
2032 cpt = (cpts == NULL) ? i : cpts[i];
2033 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
2034 kiblnd_fmr_pool_size(ncpts),
2035 kiblnd_fmr_flush_trigger(ncpts));
2036 if (rc == -ENOSYS && i == 0) /* no FMR */
2037 break;
2038
2039 if (rc != 0) { /* a real error */
2040 CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2041 cpt, rc);
2042 goto failed;
2043 }
2044 }
2045
2046 if (i > 0) {
2047 LASSERT(i == ncpts);
2048 goto create_tx_pool;
2049 }
2050
2051 cfs_percpt_free(net->ibn_fmr_ps);
2052 net->ibn_fmr_ps = NULL;
2053
2054 CWARN("Device does not support FMR\n");
2055 goto failed;
2056
2057 create_tx_pool:
2058 net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2059 sizeof(kib_tx_poolset_t));
2060 if (net->ibn_tx_ps == NULL) {
2061 CERROR("Failed to allocate tx pool array\n");
2062 rc = -ENOMEM;
2063 goto failed;
2064 }
2065
2066 for (i = 0; i < ncpts; i++) {
2067 cpt = (cpts == NULL) ? i : cpts[i];
2068 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2069 cpt, net, "TX",
2070 kiblnd_tx_pool_size(ncpts),
2071 kiblnd_create_tx_pool,
2072 kiblnd_destroy_tx_pool,
2073 kiblnd_tx_init, NULL);
2074 if (rc != 0) {
2075 CERROR("Can't initialize TX pool for CPT %d: %d\n",
2076 cpt, rc);
2077 goto failed;
2078 }
2079 }
2080
2081 return 0;
2082 failed:
2083 kiblnd_net_fini_pools(net);
2084 LASSERT(rc != 0);
2085 return rc;
2086 }
2087
2088 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
2089 {
2090 /*
2091 * It's safe to assume a HCA can handle a page size
2092 * matching that of the native system
2093 */
2094 hdev->ibh_page_shift = PAGE_SHIFT;
2095 hdev->ibh_page_size = 1 << PAGE_SHIFT;
2096 hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
2097
2098 hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
2099 if (hdev->ibh_mr_size == ~0ULL) {
2100 hdev->ibh_mr_shift = 64;
2101 return 0;
2102 }
2103
2104 for (hdev->ibh_mr_shift = 0;
2105 hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
2106 if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
2107 hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
2108 return 0;
2109 }
2110
2111 CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2112 return -EINVAL;
2113 }
2114
2115 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
2116 {
2117 int i;
2118
2119 if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
2120 return;
2121
2122 for (i = 0; i < hdev->ibh_nmrs; i++) {
2123 if (hdev->ibh_mrs[i] == NULL)
2124 break;
2125
2126 ib_dereg_mr(hdev->ibh_mrs[i]);
2127 }
2128
2129 LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2130 hdev->ibh_mrs = NULL;
2131 hdev->ibh_nmrs = 0;
2132 }
2133
2134 void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
2135 {
2136 kiblnd_hdev_cleanup_mrs(hdev);
2137
2138 if (hdev->ibh_pd != NULL)
2139 ib_dealloc_pd(hdev->ibh_pd);
2140
2141 if (hdev->ibh_cmid != NULL)
2142 rdma_destroy_id(hdev->ibh_cmid);
2143
2144 LIBCFS_FREE(hdev, sizeof(*hdev));
2145 }
2146
2147 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
2148 {
2149 struct ib_mr *mr;
2150 int rc;
2151 int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
2152
2153 rc = kiblnd_hdev_get_attr(hdev);
2154 if (rc != 0)
2155 return rc;
2156
2157 LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
2158 if (hdev->ibh_mrs == NULL) {
2159 CERROR("Failed to allocate MRs table\n");
2160 return -ENOMEM;
2161 }
2162
2163 hdev->ibh_mrs[0] = NULL;
2164 hdev->ibh_nmrs = 1;
2165
2166 mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2167 if (IS_ERR(mr)) {
2168 CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
2169 kiblnd_hdev_cleanup_mrs(hdev);
2170 return PTR_ERR(mr);
2171 }
2172
2173 hdev->ibh_mrs[0] = mr;
2174
2175 return 0;
2176 }
2177
2178 /* DUMMY */
2179 static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
2180 struct rdma_cm_event *event)
2181 {
2182 return 0;
2183 }
2184
2185 static int kiblnd_dev_need_failover(kib_dev_t *dev)
2186 {
2187 struct rdma_cm_id *cmid;
2188 struct sockaddr_in srcaddr;
2189 struct sockaddr_in dstaddr;
2190 int rc;
2191
2192 if (dev->ibd_hdev == NULL || /* initializing */
2193 dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2194 *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2195 return 1;
2196
2197 /*
2198 * XXX: it's UGLY, but I don't have better way to find
2199 * ib-bonding HCA failover because:
2200 *
2201 * a. no reliable CM event for HCA failover...
2202 * b. no OFED API to get ib_device for current net_device...
2203 *
2204 * We have only two choices at this point:
2205 *
2206 * a. rdma_bind_addr(), it will conflict with listener cmid
2207 * b. rdma_resolve_addr() to zero addr
2208 */
2209 cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2210 IB_QPT_RC);
2211 if (IS_ERR(cmid)) {
2212 rc = PTR_ERR(cmid);
2213 CERROR("Failed to create cmid for failover: %d\n", rc);
2214 return rc;
2215 }
2216
2217 memset(&srcaddr, 0, sizeof(srcaddr));
2218 srcaddr.sin_family = AF_INET;
2219 srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2220
2221 memset(&dstaddr, 0, sizeof(dstaddr));
2222 dstaddr.sin_family = AF_INET;
2223 rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2224 (struct sockaddr *)&dstaddr, 1);
2225 if (rc != 0 || cmid->device == NULL) {
2226 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2227 dev->ibd_ifname, &dev->ibd_ifip,
2228 cmid->device, rc);
2229 rdma_destroy_id(cmid);
2230 return rc;
2231 }
2232
2233 rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2234 rdma_destroy_id(cmid);
2235
2236 return rc;
2237 }
2238
2239 int kiblnd_dev_failover(kib_dev_t *dev)
2240 {
2241 LIST_HEAD(zombie_tpo);
2242 LIST_HEAD(zombie_ppo);
2243 LIST_HEAD(zombie_fpo);
2244 struct rdma_cm_id *cmid = NULL;
2245 kib_hca_dev_t *hdev = NULL;
2246 struct ib_pd *pd;
2247 kib_net_t *net;
2248 struct sockaddr_in addr;
2249 unsigned long flags;
2250 int rc = 0;
2251 int i;
2252
2253 LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
2254 dev->ibd_can_failover || dev->ibd_hdev == NULL);
2255
2256 rc = kiblnd_dev_need_failover(dev);
2257 if (rc <= 0)
2258 goto out;
2259
2260 if (dev->ibd_hdev != NULL &&
2261 dev->ibd_hdev->ibh_cmid != NULL) {
2262 /*
2263 * XXX it's not good to close old listener at here,
2264 * because we can fail to create new listener.
2265 * But we have to close it now, otherwise rdma_bind_addr
2266 * will return EADDRINUSE... How crap!
2267 */
2268 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2269
2270 cmid = dev->ibd_hdev->ibh_cmid;
2271 /*
2272 * make next schedule of kiblnd_dev_need_failover()
2273 * return 1 for me
2274 */
2275 dev->ibd_hdev->ibh_cmid = NULL;
2276 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2277
2278 rdma_destroy_id(cmid);
2279 }
2280
2281 cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2282 IB_QPT_RC);
2283 if (IS_ERR(cmid)) {
2284 rc = PTR_ERR(cmid);
2285 CERROR("Failed to create cmid for failover: %d\n", rc);
2286 goto out;
2287 }
2288
2289 memset(&addr, 0, sizeof(addr));
2290 addr.sin_family = AF_INET;
2291 addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2292 addr.sin_port = htons(*kiblnd_tunables.kib_service);
2293
2294 /* Bind to failover device or port */
2295 rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2296 if (rc != 0 || cmid->device == NULL) {
2297 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2298 dev->ibd_ifname, &dev->ibd_ifip,
2299 cmid->device, rc);
2300 rdma_destroy_id(cmid);
2301 goto out;
2302 }
2303
2304 LIBCFS_ALLOC(hdev, sizeof(*hdev));
2305 if (hdev == NULL) {
2306 CERROR("Failed to allocate kib_hca_dev\n");
2307 rdma_destroy_id(cmid);
2308 rc = -ENOMEM;
2309 goto out;
2310 }
2311
2312 atomic_set(&hdev->ibh_ref, 1);
2313 hdev->ibh_dev = dev;
2314 hdev->ibh_cmid = cmid;
2315 hdev->ibh_ibdev = cmid->device;
2316
2317 pd = ib_alloc_pd(cmid->device);
2318 if (IS_ERR(pd)) {
2319 rc = PTR_ERR(pd);
2320 CERROR("Can't allocate PD: %d\n", rc);
2321 goto out;
2322 }
2323
2324 hdev->ibh_pd = pd;
2325
2326 rc = rdma_listen(cmid, 0);
2327 if (rc != 0) {
2328 CERROR("Can't start new listener: %d\n", rc);
2329 goto out;
2330 }
2331
2332 rc = kiblnd_hdev_setup_mrs(hdev);
2333 if (rc != 0) {
2334 CERROR("Can't setup device: %d\n", rc);
2335 goto out;
2336 }
2337
2338 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2339
2340 swap(dev->ibd_hdev, hdev); /* take over the refcount */
2341
2342 list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2343 cfs_cpt_for_each(i, lnet_cpt_table()) {
2344 kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2345 &zombie_tpo);
2346
2347 if (net->ibn_fmr_ps)
2348 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2349 &zombie_fpo);
2350 }
2351 }
2352
2353 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2354 out:
2355 if (!list_empty(&zombie_tpo))
2356 kiblnd_destroy_pool_list(&zombie_tpo);
2357 if (!list_empty(&zombie_ppo))
2358 kiblnd_destroy_pool_list(&zombie_ppo);
2359 if (!list_empty(&zombie_fpo))
2360 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2361 if (hdev != NULL)
2362 kiblnd_hdev_decref(hdev);
2363
2364 if (rc != 0)
2365 dev->ibd_failed_failover++;
2366 else
2367 dev->ibd_failed_failover = 0;
2368
2369 return rc;
2370 }
2371
2372 void kiblnd_destroy_dev(kib_dev_t *dev)
2373 {
2374 LASSERT(dev->ibd_nnets == 0);
2375 LASSERT(list_empty(&dev->ibd_nets));
2376
2377 list_del(&dev->ibd_fail_list);
2378 list_del(&dev->ibd_list);
2379
2380 if (dev->ibd_hdev != NULL)
2381 kiblnd_hdev_decref(dev->ibd_hdev);
2382
2383 LIBCFS_FREE(dev, sizeof(*dev));
2384 }
2385
2386 static kib_dev_t *kiblnd_create_dev(char *ifname)
2387 {
2388 struct net_device *netdev;
2389 kib_dev_t *dev;
2390 __u32 netmask;
2391 __u32 ip;
2392 int up;
2393 int rc;
2394
2395 rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
2396 if (rc != 0) {
2397 CERROR("Can't query IPoIB interface %s: %d\n",
2398 ifname, rc);
2399 return NULL;
2400 }
2401
2402 if (!up) {
2403 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2404 return NULL;
2405 }
2406
2407 LIBCFS_ALLOC(dev, sizeof(*dev));
2408 if (dev == NULL)
2409 return NULL;
2410
2411 netdev = dev_get_by_name(&init_net, ifname);
2412 if (netdev == NULL) {
2413 dev->ibd_can_failover = 0;
2414 } else {
2415 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2416 dev_put(netdev);
2417 }
2418
2419 INIT_LIST_HEAD(&dev->ibd_nets);
2420 INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2421 INIT_LIST_HEAD(&dev->ibd_fail_list);
2422 dev->ibd_ifip = ip;
2423 strcpy(&dev->ibd_ifname[0], ifname);
2424
2425 /* initialize the device */
2426 rc = kiblnd_dev_failover(dev);
2427 if (rc != 0) {
2428 CERROR("Can't initialize device: %d\n", rc);
2429 LIBCFS_FREE(dev, sizeof(*dev));
2430 return NULL;
2431 }
2432
2433 list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
2434 return dev;
2435 }
2436
2437 static void kiblnd_base_shutdown(void)
2438 {
2439 struct kib_sched_info *sched;
2440 int i;
2441
2442 LASSERT(list_empty(&kiblnd_data.kib_devs));
2443
2444 switch (kiblnd_data.kib_init) {
2445 default:
2446 LBUG();
2447
2448 case IBLND_INIT_ALL:
2449 case IBLND_INIT_DATA:
2450 LASSERT(kiblnd_data.kib_peers != NULL);
2451 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2452 LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2453 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2454 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2455
2456 /* flag threads to terminate; wake and wait for them to die */
2457 kiblnd_data.kib_shutdown = 1;
2458
2459 /*
2460 * NB: we really want to stop scheduler threads net by net
2461 * instead of the whole module, this should be improved
2462 * with dynamic configuration LNet
2463 */
2464 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2465 wake_up_all(&sched->ibs_waitq);
2466
2467 wake_up_all(&kiblnd_data.kib_connd_waitq);
2468 wake_up_all(&kiblnd_data.kib_failover_waitq);
2469
2470 i = 2;
2471 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2472 i++;
2473 /* power of 2 ? */
2474 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2475 "Waiting for %d threads to terminate\n",
2476 atomic_read(&kiblnd_data.kib_nthreads));
2477 set_current_state(TASK_UNINTERRUPTIBLE);
2478 schedule_timeout(cfs_time_seconds(1));
2479 }
2480
2481 /* fall through */
2482
2483 case IBLND_INIT_NOTHING:
2484 break;
2485 }
2486
2487 if (kiblnd_data.kib_peers != NULL) {
2488 LIBCFS_FREE(kiblnd_data.kib_peers,
2489 sizeof(struct list_head) *
2490 kiblnd_data.kib_peer_hash_size);
2491 }
2492
2493 if (kiblnd_data.kib_scheds != NULL)
2494 cfs_percpt_free(kiblnd_data.kib_scheds);
2495
2496 kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2497 module_put(THIS_MODULE);
2498 }
2499
2500 void kiblnd_shutdown(lnet_ni_t *ni)
2501 {
2502 kib_net_t *net = ni->ni_data;
2503 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2504 int i;
2505 unsigned long flags;
2506
2507 LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2508
2509 if (net == NULL)
2510 goto out;
2511
2512 write_lock_irqsave(g_lock, flags);
2513 net->ibn_shutdown = 1;
2514 write_unlock_irqrestore(g_lock, flags);
2515
2516 switch (net->ibn_init) {
2517 default:
2518 LBUG();
2519
2520 case IBLND_INIT_ALL:
2521 /* nuke all existing peers within this net */
2522 kiblnd_del_peer(ni, LNET_NID_ANY);
2523
2524 /* Wait for all peer state to clean up */
2525 i = 2;
2526 while (atomic_read(&net->ibn_npeers) != 0) {
2527 i++;
2528 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
2529 "%s: waiting for %d peers to disconnect\n",
2530 libcfs_nid2str(ni->ni_nid),
2531 atomic_read(&net->ibn_npeers));
2532 set_current_state(TASK_UNINTERRUPTIBLE);
2533 schedule_timeout(cfs_time_seconds(1));
2534 }
2535
2536 kiblnd_net_fini_pools(net);
2537
2538 write_lock_irqsave(g_lock, flags);
2539 LASSERT(net->ibn_dev->ibd_nnets > 0);
2540 net->ibn_dev->ibd_nnets--;
2541 list_del(&net->ibn_list);
2542 write_unlock_irqrestore(g_lock, flags);
2543
2544 /* fall through */
2545
2546 case IBLND_INIT_NOTHING:
2547 LASSERT(atomic_read(&net->ibn_nconns) == 0);
2548
2549 if (net->ibn_dev != NULL &&
2550 net->ibn_dev->ibd_nnets == 0)
2551 kiblnd_destroy_dev(net->ibn_dev);
2552
2553 break;
2554 }
2555
2556 net->ibn_init = IBLND_INIT_NOTHING;
2557 ni->ni_data = NULL;
2558
2559 LIBCFS_FREE(net, sizeof(*net));
2560
2561 out:
2562 if (list_empty(&kiblnd_data.kib_devs))
2563 kiblnd_base_shutdown();
2564 }
2565
2566 static int kiblnd_base_startup(void)
2567 {
2568 struct kib_sched_info *sched;
2569 int rc;
2570 int i;
2571
2572 LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
2573
2574 try_module_get(THIS_MODULE);
2575 /* zero pointers, flags etc */
2576 memset(&kiblnd_data, 0, sizeof(kiblnd_data));
2577
2578 rwlock_init(&kiblnd_data.kib_global_lock);
2579
2580 INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2581 INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2582
2583 kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2584 LIBCFS_ALLOC(kiblnd_data.kib_peers,
2585 sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
2586 if (kiblnd_data.kib_peers == NULL)
2587 goto failed;
2588 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2589 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2590
2591 spin_lock_init(&kiblnd_data.kib_connd_lock);
2592 INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2593 INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
2594 init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2595 init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2596
2597 kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2598 sizeof(*sched));
2599 if (kiblnd_data.kib_scheds == NULL)
2600 goto failed;
2601
2602 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
2603 int nthrs;
2604
2605 spin_lock_init(&sched->ibs_lock);
2606 INIT_LIST_HEAD(&sched->ibs_conns);
2607 init_waitqueue_head(&sched->ibs_waitq);
2608
2609 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2610 if (*kiblnd_tunables.kib_nscheds > 0) {
2611 nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
2612 } else {
2613 /*
2614 * max to half of CPUs, another half is reserved for
2615 * upper layer modules
2616 */
2617 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2618 }
2619
2620 sched->ibs_nthreads_max = nthrs;
2621 sched->ibs_cpt = i;
2622 }
2623
2624 kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
2625
2626 /* lists/ptrs/locks initialised */
2627 kiblnd_data.kib_init = IBLND_INIT_DATA;
2628 /*****************************************************/
2629
2630 rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
2631 if (rc != 0) {
2632 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
2633 goto failed;
2634 }
2635
2636 if (*kiblnd_tunables.kib_dev_failover != 0)
2637 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
2638 "kiblnd_failover");
2639
2640 if (rc != 0) {
2641 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
2642 goto failed;
2643 }
2644
2645 /* flag everything initialised */
2646 kiblnd_data.kib_init = IBLND_INIT_ALL;
2647 /*****************************************************/
2648
2649 return 0;
2650
2651 failed:
2652 kiblnd_base_shutdown();
2653 return -ENETDOWN;
2654 }
2655
2656 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
2657 {
2658 int rc = 0;
2659 int nthrs;
2660 int i;
2661
2662 if (sched->ibs_nthreads == 0) {
2663 if (*kiblnd_tunables.kib_nscheds > 0) {
2664 nthrs = sched->ibs_nthreads_max;
2665 } else {
2666 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2667 sched->ibs_cpt);
2668 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2669 nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
2670 }
2671 } else {
2672 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
2673 /* increase one thread if there is new interface */
2674 nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
2675 }
2676
2677 for (i = 0; i < nthrs; i++) {
2678 long id;
2679 char name[20];
2680
2681 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
2682 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
2683 KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
2684 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
2685 if (rc == 0)
2686 continue;
2687
2688 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2689 sched->ibs_cpt, sched->ibs_nthreads + i, rc);
2690 break;
2691 }
2692
2693 sched->ibs_nthreads += i;
2694 return rc;
2695 }
2696
2697 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
2698 int ncpts)
2699 {
2700 int cpt;
2701 int rc;
2702 int i;
2703
2704 for (i = 0; i < ncpts; i++) {
2705 struct kib_sched_info *sched;
2706
2707 cpt = (cpts == NULL) ? i : cpts[i];
2708 sched = kiblnd_data.kib_scheds[cpt];
2709
2710 if (!newdev && sched->ibs_nthreads > 0)
2711 continue;
2712
2713 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
2714 if (rc != 0) {
2715 CERROR("Failed to start scheduler threads for %s\n",
2716 dev->ibd_ifname);
2717 return rc;
2718 }
2719 }
2720 return 0;
2721 }
2722
2723 static kib_dev_t *kiblnd_dev_search(char *ifname)
2724 {
2725 kib_dev_t *alias = NULL;
2726 kib_dev_t *dev;
2727 char *colon;
2728 char *colon2;
2729
2730 colon = strchr(ifname, ':');
2731 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
2732 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2733 return dev;
2734
2735 if (alias != NULL)
2736 continue;
2737
2738 colon2 = strchr(dev->ibd_ifname, ':');
2739 if (colon != NULL)
2740 *colon = 0;
2741 if (colon2 != NULL)
2742 *colon2 = 0;
2743
2744 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2745 alias = dev;
2746
2747 if (colon != NULL)
2748 *colon = ':';
2749 if (colon2 != NULL)
2750 *colon2 = ':';
2751 }
2752 return alias;
2753 }
2754
2755 int kiblnd_startup(lnet_ni_t *ni)
2756 {
2757 char *ifname;
2758 kib_dev_t *ibdev = NULL;
2759 kib_net_t *net;
2760 struct timespec64 tv;
2761 unsigned long flags;
2762 int rc;
2763 int newdev;
2764
2765 LASSERT(ni->ni_lnd == &the_o2iblnd);
2766
2767 if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
2768 rc = kiblnd_base_startup();
2769 if (rc != 0)
2770 return rc;
2771 }
2772
2773 LIBCFS_ALLOC(net, sizeof(*net));
2774 ni->ni_data = net;
2775 if (net == NULL)
2776 goto net_failed;
2777
2778 ktime_get_real_ts64(&tv);
2779 net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
2780 tv.tv_nsec / NSEC_PER_USEC;
2781
2782 ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
2783 ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
2784 ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
2785 ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
2786
2787 if (ni->ni_interfaces[0] != NULL) {
2788 /* Use the IPoIB interface specified in 'networks=' */
2789
2790 CLASSERT(LNET_MAX_INTERFACES > 1);
2791 if (ni->ni_interfaces[1] != NULL) {
2792 CERROR("Multiple interfaces not supported\n");
2793 goto failed;
2794 }
2795
2796 ifname = ni->ni_interfaces[0];
2797 } else {
2798 ifname = *kiblnd_tunables.kib_default_ipif;
2799 }
2800
2801 if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
2802 CERROR("IPoIB interface name too long: %s\n", ifname);
2803 goto failed;
2804 }
2805
2806 ibdev = kiblnd_dev_search(ifname);
2807
2808 newdev = ibdev == NULL;
2809 /* hmm...create kib_dev even for alias */
2810 if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
2811 ibdev = kiblnd_create_dev(ifname);
2812
2813 if (ibdev == NULL)
2814 goto failed;
2815
2816 net->ibn_dev = ibdev;
2817 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
2818
2819 rc = kiblnd_dev_start_threads(ibdev, newdev,
2820 ni->ni_cpts, ni->ni_ncpts);
2821 if (rc != 0)
2822 goto failed;
2823
2824 rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
2825 if (rc != 0) {
2826 CERROR("Failed to initialize NI pools: %d\n", rc);
2827 goto failed;
2828 }
2829
2830 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2831 ibdev->ibd_nnets++;
2832 list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
2833 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2834
2835 net->ibn_init = IBLND_INIT_ALL;
2836
2837 return 0;
2838
2839 failed:
2840 if (net->ibn_dev == NULL && ibdev != NULL)
2841 kiblnd_destroy_dev(ibdev);
2842
2843 net_failed:
2844 kiblnd_shutdown(ni);
2845
2846 CDEBUG(D_NET, "kiblnd_startup failed\n");
2847 return -ENETDOWN;
2848 }
2849
2850 static void __exit kiblnd_module_fini(void)
2851 {
2852 lnet_unregister_lnd(&the_o2iblnd);
2853 }
2854
2855 static int __init kiblnd_module_init(void)
2856 {
2857 int rc;
2858
2859 CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
2860 CLASSERT(offsetof(kib_msg_t,
2861 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
2862 <= IBLND_MSG_SIZE);
2863 CLASSERT(offsetof(kib_msg_t,
2864 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
2865 <= IBLND_MSG_SIZE);
2866
2867 rc = kiblnd_tunables_init();
2868 if (rc != 0)
2869 return rc;
2870
2871 lnet_register_lnd(&the_o2iblnd);
2872
2873 return 0;
2874 }
2875
2876 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2877 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
2878 MODULE_LICENSE("GPL");
2879
2880 module_init(kiblnd_module_init);
2881 module_exit(kiblnd_module_fini);
This page took 0.150508 seconds and 6 git commands to generate.