staging: lustre: fix all NULL comparisons in LNet layer
[deliverable/linux.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd.c
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41 #include <asm/div64.h>
42 #include <asm/page.h>
43 #include "o2iblnd.h"
44
45 static lnd_t the_o2iblnd = {
46 .lnd_type = O2IBLND,
47 .lnd_startup = kiblnd_startup,
48 .lnd_shutdown = kiblnd_shutdown,
49 .lnd_ctl = kiblnd_ctl,
50 .lnd_query = kiblnd_query,
51 .lnd_send = kiblnd_send,
52 .lnd_recv = kiblnd_recv,
53 };
54
55 kib_data_t kiblnd_data;
56
57 static __u32 kiblnd_cksum(void *ptr, int nob)
58 {
59 char *c = ptr;
60 __u32 sum = 0;
61
62 while (nob-- > 0)
63 sum = ((sum << 1) | (sum >> 31)) + *c++;
64
65 /* ensure I don't return 0 (== no checksum) */
66 return (sum == 0) ? 1 : sum;
67 }
68
69 static char *kiblnd_msgtype2str(int type)
70 {
71 switch (type) {
72 case IBLND_MSG_CONNREQ:
73 return "CONNREQ";
74
75 case IBLND_MSG_CONNACK:
76 return "CONNACK";
77
78 case IBLND_MSG_NOOP:
79 return "NOOP";
80
81 case IBLND_MSG_IMMEDIATE:
82 return "IMMEDIATE";
83
84 case IBLND_MSG_PUT_REQ:
85 return "PUT_REQ";
86
87 case IBLND_MSG_PUT_NAK:
88 return "PUT_NAK";
89
90 case IBLND_MSG_PUT_ACK:
91 return "PUT_ACK";
92
93 case IBLND_MSG_PUT_DONE:
94 return "PUT_DONE";
95
96 case IBLND_MSG_GET_REQ:
97 return "GET_REQ";
98
99 case IBLND_MSG_GET_DONE:
100 return "GET_DONE";
101
102 default:
103 return "???";
104 }
105 }
106
107 static int kiblnd_msgtype2size(int type)
108 {
109 const int hdr_size = offsetof(kib_msg_t, ibm_u);
110
111 switch (type) {
112 case IBLND_MSG_CONNREQ:
113 case IBLND_MSG_CONNACK:
114 return hdr_size + sizeof(kib_connparams_t);
115
116 case IBLND_MSG_NOOP:
117 return hdr_size;
118
119 case IBLND_MSG_IMMEDIATE:
120 return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
121
122 case IBLND_MSG_PUT_REQ:
123 return hdr_size + sizeof(kib_putreq_msg_t);
124
125 case IBLND_MSG_PUT_ACK:
126 return hdr_size + sizeof(kib_putack_msg_t);
127
128 case IBLND_MSG_GET_REQ:
129 return hdr_size + sizeof(kib_get_msg_t);
130
131 case IBLND_MSG_PUT_NAK:
132 case IBLND_MSG_PUT_DONE:
133 case IBLND_MSG_GET_DONE:
134 return hdr_size + sizeof(kib_completion_msg_t);
135 default:
136 return -1;
137 }
138 }
139
140 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
141 {
142 kib_rdma_desc_t *rd;
143 int nob;
144 int n;
145 int i;
146
147 LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
148 msg->ibm_type == IBLND_MSG_PUT_ACK);
149
150 rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
151 &msg->ibm_u.get.ibgm_rd :
152 &msg->ibm_u.putack.ibpam_rd;
153
154 if (flip) {
155 __swab32s(&rd->rd_key);
156 __swab32s(&rd->rd_nfrags);
157 }
158
159 n = rd->rd_nfrags;
160
161 if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
162 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
163 n, IBLND_MAX_RDMA_FRAGS);
164 return 1;
165 }
166
167 nob = offsetof(kib_msg_t, ibm_u) +
168 kiblnd_rd_msg_size(rd, msg->ibm_type, n);
169
170 if (msg->ibm_nob < nob) {
171 CERROR("Short %s: %d(%d)\n",
172 kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
173 return 1;
174 }
175
176 if (!flip)
177 return 0;
178
179 for (i = 0; i < n; i++) {
180 __swab32s(&rd->rd_frags[i].rf_nob);
181 __swab64s(&rd->rd_frags[i].rf_addr);
182 }
183
184 return 0;
185 }
186
187 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
188 int credits, lnet_nid_t dstnid, __u64 dststamp)
189 {
190 kib_net_t *net = ni->ni_data;
191
192 /*
193 * CAVEAT EMPTOR! all message fields not set here should have been
194 * initialised previously.
195 */
196 msg->ibm_magic = IBLND_MSG_MAGIC;
197 msg->ibm_version = version;
198 /* ibm_type */
199 msg->ibm_credits = credits;
200 /* ibm_nob */
201 msg->ibm_cksum = 0;
202 msg->ibm_srcnid = ni->ni_nid;
203 msg->ibm_srcstamp = net->ibn_incarnation;
204 msg->ibm_dstnid = dstnid;
205 msg->ibm_dststamp = dststamp;
206
207 if (*kiblnd_tunables.kib_cksum) {
208 /* NB ibm_cksum zero while computing cksum */
209 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
210 }
211 }
212
213 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
214 {
215 const int hdr_size = offsetof(kib_msg_t, ibm_u);
216 __u32 msg_cksum;
217 __u16 version;
218 int msg_nob;
219 int flip;
220
221 /* 6 bytes are enough to have received magic + version */
222 if (nob < 6) {
223 CERROR("Short message: %d\n", nob);
224 return -EPROTO;
225 }
226
227 if (msg->ibm_magic == IBLND_MSG_MAGIC) {
228 flip = 0;
229 } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
230 flip = 1;
231 } else {
232 CERROR("Bad magic: %08x\n", msg->ibm_magic);
233 return -EPROTO;
234 }
235
236 version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
237 if (version != IBLND_MSG_VERSION &&
238 version != IBLND_MSG_VERSION_1) {
239 CERROR("Bad version: %x\n", version);
240 return -EPROTO;
241 }
242
243 if (nob < hdr_size) {
244 CERROR("Short message: %d\n", nob);
245 return -EPROTO;
246 }
247
248 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
249 if (msg_nob > nob) {
250 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
251 return -EPROTO;
252 }
253
254 /*
255 * checksum must be computed with ibm_cksum zero and BEFORE anything
256 * gets flipped
257 */
258 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
259 msg->ibm_cksum = 0;
260 if (msg_cksum != 0 &&
261 msg_cksum != kiblnd_cksum(msg, msg_nob)) {
262 CERROR("Bad checksum\n");
263 return -EPROTO;
264 }
265
266 msg->ibm_cksum = msg_cksum;
267
268 if (flip) {
269 /* leave magic unflipped as a clue to peer endianness */
270 msg->ibm_version = version;
271 CLASSERT(sizeof(msg->ibm_type) == 1);
272 CLASSERT(sizeof(msg->ibm_credits) == 1);
273 msg->ibm_nob = msg_nob;
274 __swab64s(&msg->ibm_srcnid);
275 __swab64s(&msg->ibm_srcstamp);
276 __swab64s(&msg->ibm_dstnid);
277 __swab64s(&msg->ibm_dststamp);
278 }
279
280 if (msg->ibm_srcnid == LNET_NID_ANY) {
281 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
282 return -EPROTO;
283 }
284
285 if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
286 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
287 msg_nob, kiblnd_msgtype2size(msg->ibm_type));
288 return -EPROTO;
289 }
290
291 switch (msg->ibm_type) {
292 default:
293 CERROR("Unknown message type %x\n", msg->ibm_type);
294 return -EPROTO;
295
296 case IBLND_MSG_NOOP:
297 case IBLND_MSG_IMMEDIATE:
298 case IBLND_MSG_PUT_REQ:
299 break;
300
301 case IBLND_MSG_PUT_ACK:
302 case IBLND_MSG_GET_REQ:
303 if (kiblnd_unpack_rd(msg, flip))
304 return -EPROTO;
305 break;
306
307 case IBLND_MSG_PUT_NAK:
308 case IBLND_MSG_PUT_DONE:
309 case IBLND_MSG_GET_DONE:
310 if (flip)
311 __swab32s(&msg->ibm_u.completion.ibcm_status);
312 break;
313
314 case IBLND_MSG_CONNREQ:
315 case IBLND_MSG_CONNACK:
316 if (flip) {
317 __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
318 __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
319 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
320 }
321 break;
322 }
323 return 0;
324 }
325
326 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
327 {
328 kib_peer_t *peer;
329 kib_net_t *net = ni->ni_data;
330 int cpt = lnet_cpt_of_nid(nid);
331 unsigned long flags;
332
333 LASSERT(net);
334 LASSERT(nid != LNET_NID_ANY);
335
336 LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
337 if (!peer) {
338 CERROR("Cannot allocate peer\n");
339 return -ENOMEM;
340 }
341
342 memset(peer, 0, sizeof(*peer)); /* zero flags etc */
343
344 peer->ibp_ni = ni;
345 peer->ibp_nid = nid;
346 peer->ibp_error = 0;
347 peer->ibp_last_alive = 0;
348 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
349
350 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
351 INIT_LIST_HEAD(&peer->ibp_conns);
352 INIT_LIST_HEAD(&peer->ibp_tx_queue);
353
354 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
355
356 /* always called with a ref on ni, which prevents ni being shutdown */
357 LASSERT(net->ibn_shutdown == 0);
358
359 /* npeers only grows with the global lock held */
360 atomic_inc(&net->ibn_npeers);
361
362 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
363
364 *peerp = peer;
365 return 0;
366 }
367
368 void kiblnd_destroy_peer(kib_peer_t *peer)
369 {
370 kib_net_t *net = peer->ibp_ni->ni_data;
371
372 LASSERT(net);
373 LASSERT(atomic_read(&peer->ibp_refcount) == 0);
374 LASSERT(!kiblnd_peer_active(peer));
375 LASSERT(peer->ibp_connecting == 0);
376 LASSERT(peer->ibp_accepting == 0);
377 LASSERT(list_empty(&peer->ibp_conns));
378 LASSERT(list_empty(&peer->ibp_tx_queue));
379
380 LIBCFS_FREE(peer, sizeof(*peer));
381
382 /*
383 * NB a peer's connections keep a reference on their peer until
384 * they are destroyed, so we can be assured that _all_ state to do
385 * with this peer has been cleaned up when its refcount drops to
386 * zero.
387 */
388 atomic_dec(&net->ibn_npeers);
389 }
390
391 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
392 {
393 /*
394 * the caller is responsible for accounting the additional reference
395 * that this creates
396 */
397 struct list_head *peer_list = kiblnd_nid2peerlist(nid);
398 struct list_head *tmp;
399 kib_peer_t *peer;
400
401 list_for_each(tmp, peer_list) {
402 peer = list_entry(tmp, kib_peer_t, ibp_list);
403
404 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
405 peer->ibp_accepting > 0 ||
406 !list_empty(&peer->ibp_conns)); /* active conn */
407
408 if (peer->ibp_nid != nid)
409 continue;
410
411 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
412 peer, libcfs_nid2str(nid),
413 atomic_read(&peer->ibp_refcount),
414 peer->ibp_version);
415 return peer;
416 }
417 return NULL;
418 }
419
420 void kiblnd_unlink_peer_locked(kib_peer_t *peer)
421 {
422 LASSERT(list_empty(&peer->ibp_conns));
423
424 LASSERT(kiblnd_peer_active(peer));
425 list_del_init(&peer->ibp_list);
426 /* lose peerlist's ref */
427 kiblnd_peer_decref(peer);
428 }
429
430 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
431 lnet_nid_t *nidp, int *count)
432 {
433 kib_peer_t *peer;
434 struct list_head *ptmp;
435 int i;
436 unsigned long flags;
437
438 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
439
440 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
441 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
442 peer = list_entry(ptmp, kib_peer_t, ibp_list);
443 LASSERT(peer->ibp_connecting > 0 ||
444 peer->ibp_accepting > 0 ||
445 !list_empty(&peer->ibp_conns));
446
447 if (peer->ibp_ni != ni)
448 continue;
449
450 if (index-- > 0)
451 continue;
452
453 *nidp = peer->ibp_nid;
454 *count = atomic_read(&peer->ibp_refcount);
455
456 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
457 flags);
458 return 0;
459 }
460 }
461
462 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
463 return -ENOENT;
464 }
465
466 static void kiblnd_del_peer_locked(kib_peer_t *peer)
467 {
468 struct list_head *ctmp;
469 struct list_head *cnxt;
470 kib_conn_t *conn;
471
472 if (list_empty(&peer->ibp_conns)) {
473 kiblnd_unlink_peer_locked(peer);
474 } else {
475 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
476 conn = list_entry(ctmp, kib_conn_t, ibc_list);
477
478 kiblnd_close_conn_locked(conn, 0);
479 }
480 /* NB closing peer's last conn unlinked it. */
481 }
482 /*
483 * NB peer now unlinked; might even be freed if the peer table had the
484 * last ref on it.
485 */
486 }
487
488 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
489 {
490 LIST_HEAD(zombies);
491 struct list_head *ptmp;
492 struct list_head *pnxt;
493 kib_peer_t *peer;
494 int lo;
495 int hi;
496 int i;
497 unsigned long flags;
498 int rc = -ENOENT;
499
500 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
501
502 if (nid != LNET_NID_ANY) {
503 lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
504 hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
505 } else {
506 lo = 0;
507 hi = kiblnd_data.kib_peer_hash_size - 1;
508 }
509
510 for (i = lo; i <= hi; i++) {
511 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
512 peer = list_entry(ptmp, kib_peer_t, ibp_list);
513 LASSERT(peer->ibp_connecting > 0 ||
514 peer->ibp_accepting > 0 ||
515 !list_empty(&peer->ibp_conns));
516
517 if (peer->ibp_ni != ni)
518 continue;
519
520 if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
521 continue;
522
523 if (!list_empty(&peer->ibp_tx_queue)) {
524 LASSERT(list_empty(&peer->ibp_conns));
525
526 list_splice_init(&peer->ibp_tx_queue,
527 &zombies);
528 }
529
530 kiblnd_del_peer_locked(peer);
531 rc = 0; /* matched something */
532 }
533 }
534
535 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
536
537 kiblnd_txlist_done(ni, &zombies, -EIO);
538
539 return rc;
540 }
541
542 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
543 {
544 kib_peer_t *peer;
545 struct list_head *ptmp;
546 kib_conn_t *conn;
547 struct list_head *ctmp;
548 int i;
549 unsigned long flags;
550
551 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
552
553 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
554 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
555 peer = list_entry(ptmp, kib_peer_t, ibp_list);
556 LASSERT(peer->ibp_connecting > 0 ||
557 peer->ibp_accepting > 0 ||
558 !list_empty(&peer->ibp_conns));
559
560 if (peer->ibp_ni != ni)
561 continue;
562
563 list_for_each(ctmp, &peer->ibp_conns) {
564 if (index-- > 0)
565 continue;
566
567 conn = list_entry(ctmp, kib_conn_t,
568 ibc_list);
569 kiblnd_conn_addref(conn);
570 read_unlock_irqrestore(
571 &kiblnd_data.kib_global_lock,
572 flags);
573 return conn;
574 }
575 }
576 }
577
578 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
579 return NULL;
580 }
581
582 int kiblnd_translate_mtu(int value)
583 {
584 switch (value) {
585 default:
586 return -1;
587 case 0:
588 return 0;
589 case 256:
590 return IB_MTU_256;
591 case 512:
592 return IB_MTU_512;
593 case 1024:
594 return IB_MTU_1024;
595 case 2048:
596 return IB_MTU_2048;
597 case 4096:
598 return IB_MTU_4096;
599 }
600 }
601
602 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
603 {
604 int mtu;
605
606 /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
607 if (!cmid->route.path_rec)
608 return;
609
610 mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
611 LASSERT(mtu >= 0);
612 if (mtu != 0)
613 cmid->route.path_rec->mtu = mtu;
614 }
615
616 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
617 {
618 cpumask_t *mask;
619 int vectors;
620 int off;
621 int i;
622 lnet_nid_t nid = conn->ibc_peer->ibp_nid;
623
624 vectors = conn->ibc_cmid->device->num_comp_vectors;
625 if (vectors <= 1)
626 return 0;
627
628 mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
629 if (!mask)
630 return 0;
631
632 /* hash NID to CPU id in this partition... */
633 off = do_div(nid, cpumask_weight(mask));
634 for_each_cpu(i, mask) {
635 if (off-- == 0)
636 return i % vectors;
637 }
638
639 LBUG();
640 return 1;
641 }
642
643 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
644 int state, int version)
645 {
646 /*
647 * CAVEAT EMPTOR:
648 * If the new conn is created successfully it takes over the caller's
649 * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
650 * is destroyed. On failure, the caller's ref on 'peer' remains and
651 * she must dispose of 'cmid'. (Actually I'd block forever if I tried
652 * to destroy 'cmid' here since I'm called from the CM which still has
653 * its ref on 'cmid').
654 */
655 rwlock_t *glock = &kiblnd_data.kib_global_lock;
656 kib_net_t *net = peer->ibp_ni->ni_data;
657 kib_dev_t *dev;
658 struct ib_qp_init_attr *init_qp_attr;
659 struct kib_sched_info *sched;
660 struct ib_cq_init_attr cq_attr = {};
661 kib_conn_t *conn;
662 struct ib_cq *cq;
663 unsigned long flags;
664 int cpt;
665 int rc;
666 int i;
667
668 LASSERT(net);
669 LASSERT(!in_interrupt());
670
671 dev = net->ibn_dev;
672
673 cpt = lnet_cpt_of_nid(peer->ibp_nid);
674 sched = kiblnd_data.kib_scheds[cpt];
675
676 LASSERT(sched->ibs_nthreads > 0);
677
678 LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
679 sizeof(*init_qp_attr));
680 if (!init_qp_attr) {
681 CERROR("Can't allocate qp_attr for %s\n",
682 libcfs_nid2str(peer->ibp_nid));
683 goto failed_0;
684 }
685
686 LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
687 if (!conn) {
688 CERROR("Can't allocate connection for %s\n",
689 libcfs_nid2str(peer->ibp_nid));
690 goto failed_1;
691 }
692
693 conn->ibc_state = IBLND_CONN_INIT;
694 conn->ibc_version = version;
695 conn->ibc_peer = peer; /* I take the caller's ref */
696 cmid->context = conn; /* for future CM callbacks */
697 conn->ibc_cmid = cmid;
698
699 INIT_LIST_HEAD(&conn->ibc_early_rxs);
700 INIT_LIST_HEAD(&conn->ibc_tx_noops);
701 INIT_LIST_HEAD(&conn->ibc_tx_queue);
702 INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
703 INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
704 INIT_LIST_HEAD(&conn->ibc_active_txs);
705 spin_lock_init(&conn->ibc_lock);
706
707 LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
708 sizeof(*conn->ibc_connvars));
709 if (!conn->ibc_connvars) {
710 CERROR("Can't allocate in-progress connection state\n");
711 goto failed_2;
712 }
713
714 write_lock_irqsave(glock, flags);
715 if (dev->ibd_failover) {
716 write_unlock_irqrestore(glock, flags);
717 CERROR("%s: failover in progress\n", dev->ibd_ifname);
718 goto failed_2;
719 }
720
721 if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
722 /* wakeup failover thread and teardown connection */
723 if (kiblnd_dev_can_failover(dev)) {
724 list_add_tail(&dev->ibd_fail_list,
725 &kiblnd_data.kib_failed_devs);
726 wake_up(&kiblnd_data.kib_failover_waitq);
727 }
728
729 write_unlock_irqrestore(glock, flags);
730 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
731 cmid->device->name, dev->ibd_ifname);
732 goto failed_2;
733 }
734
735 kiblnd_hdev_addref_locked(dev->ibd_hdev);
736 conn->ibc_hdev = dev->ibd_hdev;
737
738 kiblnd_setup_mtu_locked(cmid);
739
740 write_unlock_irqrestore(glock, flags);
741
742 LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
743 IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
744 if (!conn->ibc_rxs) {
745 CERROR("Cannot allocate RX buffers\n");
746 goto failed_2;
747 }
748
749 rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
750 IBLND_RX_MSG_PAGES(version));
751 if (rc != 0)
752 goto failed_2;
753
754 kiblnd_map_rx_descs(conn);
755
756 cq_attr.cqe = IBLND_CQ_ENTRIES(version);
757 cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
758 cq = ib_create_cq(cmid->device,
759 kiblnd_cq_completion, kiblnd_cq_event, conn,
760 &cq_attr);
761 if (IS_ERR(cq)) {
762 CERROR("Can't create CQ: %ld, cqe: %d\n",
763 PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
764 goto failed_2;
765 }
766
767 conn->ibc_cq = cq;
768
769 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
770 if (rc != 0) {
771 CERROR("Can't request completion notificiation: %d\n", rc);
772 goto failed_2;
773 }
774
775 init_qp_attr->event_handler = kiblnd_qp_event;
776 init_qp_attr->qp_context = conn;
777 init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
778 init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
779 init_qp_attr->cap.max_send_sge = 1;
780 init_qp_attr->cap.max_recv_sge = 1;
781 init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
782 init_qp_attr->qp_type = IB_QPT_RC;
783 init_qp_attr->send_cq = cq;
784 init_qp_attr->recv_cq = cq;
785
786 conn->ibc_sched = sched;
787
788 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
789 if (rc != 0) {
790 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
791 rc, init_qp_attr->cap.max_send_wr,
792 init_qp_attr->cap.max_recv_wr);
793 goto failed_2;
794 }
795
796 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
797
798 /* 1 ref for caller and each rxmsg */
799 atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
800 conn->ibc_nrx = IBLND_RX_MSGS(version);
801
802 /* post receives */
803 for (i = 0; i < IBLND_RX_MSGS(version); i++) {
804 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
805 IBLND_POSTRX_NO_CREDIT);
806 if (rc != 0) {
807 CERROR("Can't post rxmsg: %d\n", rc);
808
809 /* Make posted receives complete */
810 kiblnd_abort_receives(conn);
811
812 /*
813 * correct # of posted buffers
814 * NB locking needed now I'm racing with completion
815 */
816 spin_lock_irqsave(&sched->ibs_lock, flags);
817 conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
818 spin_unlock_irqrestore(&sched->ibs_lock, flags);
819
820 /*
821 * cmid will be destroyed by CM(ofed) after cm_callback
822 * returned, so we can't refer it anymore
823 * (by kiblnd_connd()->kiblnd_destroy_conn)
824 */
825 rdma_destroy_qp(conn->ibc_cmid);
826 conn->ibc_cmid = NULL;
827
828 /* Drop my own and unused rxbuffer refcounts */
829 while (i++ <= IBLND_RX_MSGS(version))
830 kiblnd_conn_decref(conn);
831
832 return NULL;
833 }
834 }
835
836 /* Init successful! */
837 LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
838 state == IBLND_CONN_PASSIVE_WAIT);
839 conn->ibc_state = state;
840
841 /* 1 more conn */
842 atomic_inc(&net->ibn_nconns);
843 return conn;
844
845 failed_2:
846 kiblnd_destroy_conn(conn);
847 failed_1:
848 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
849 failed_0:
850 return NULL;
851 }
852
853 void kiblnd_destroy_conn(kib_conn_t *conn)
854 {
855 struct rdma_cm_id *cmid = conn->ibc_cmid;
856 kib_peer_t *peer = conn->ibc_peer;
857 int rc;
858
859 LASSERT(!in_interrupt());
860 LASSERT(atomic_read(&conn->ibc_refcount) == 0);
861 LASSERT(list_empty(&conn->ibc_early_rxs));
862 LASSERT(list_empty(&conn->ibc_tx_noops));
863 LASSERT(list_empty(&conn->ibc_tx_queue));
864 LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
865 LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
866 LASSERT(list_empty(&conn->ibc_active_txs));
867 LASSERT(conn->ibc_noops_posted == 0);
868 LASSERT(conn->ibc_nsends_posted == 0);
869
870 switch (conn->ibc_state) {
871 default:
872 /* conn must be completely disengaged from the network */
873 LBUG();
874
875 case IBLND_CONN_DISCONNECTED:
876 /* connvars should have been freed already */
877 LASSERT(!conn->ibc_connvars);
878 break;
879
880 case IBLND_CONN_INIT:
881 break;
882 }
883
884 /* conn->ibc_cmid might be destroyed by CM already */
885 if (cmid && cmid->qp)
886 rdma_destroy_qp(cmid);
887
888 if (conn->ibc_cq) {
889 rc = ib_destroy_cq(conn->ibc_cq);
890 if (rc != 0)
891 CWARN("Error destroying CQ: %d\n", rc);
892 }
893
894 if (conn->ibc_rx_pages)
895 kiblnd_unmap_rx_descs(conn);
896
897 if (conn->ibc_rxs) {
898 LIBCFS_FREE(conn->ibc_rxs,
899 IBLND_RX_MSGS(conn->ibc_version)
900 * sizeof(kib_rx_t));
901 }
902
903 if (conn->ibc_connvars)
904 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
905
906 if (conn->ibc_hdev)
907 kiblnd_hdev_decref(conn->ibc_hdev);
908
909 /* See CAVEAT EMPTOR above in kiblnd_create_conn */
910 if (conn->ibc_state != IBLND_CONN_INIT) {
911 kib_net_t *net = peer->ibp_ni->ni_data;
912
913 kiblnd_peer_decref(peer);
914 rdma_destroy_id(cmid);
915 atomic_dec(&net->ibn_nconns);
916 }
917
918 LIBCFS_FREE(conn, sizeof(*conn));
919 }
920
921 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
922 {
923 kib_conn_t *conn;
924 struct list_head *ctmp;
925 struct list_head *cnxt;
926 int count = 0;
927
928 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
929 conn = list_entry(ctmp, kib_conn_t, ibc_list);
930
931 CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
932 libcfs_nid2str(peer->ibp_nid),
933 conn->ibc_version, why);
934
935 kiblnd_close_conn_locked(conn, why);
936 count++;
937 }
938
939 return count;
940 }
941
942 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
943 int version, __u64 incarnation)
944 {
945 kib_conn_t *conn;
946 struct list_head *ctmp;
947 struct list_head *cnxt;
948 int count = 0;
949
950 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
951 conn = list_entry(ctmp, kib_conn_t, ibc_list);
952
953 if (conn->ibc_version == version &&
954 conn->ibc_incarnation == incarnation)
955 continue;
956
957 CDEBUG(D_NET,
958 "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
959 libcfs_nid2str(peer->ibp_nid),
960 conn->ibc_version, conn->ibc_incarnation,
961 version, incarnation);
962
963 kiblnd_close_conn_locked(conn, -ESTALE);
964 count++;
965 }
966
967 return count;
968 }
969
970 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
971 {
972 kib_peer_t *peer;
973 struct list_head *ptmp;
974 struct list_head *pnxt;
975 int lo;
976 int hi;
977 int i;
978 unsigned long flags;
979 int count = 0;
980
981 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
982
983 if (nid != LNET_NID_ANY) {
984 lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
985 hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
986 } else {
987 lo = 0;
988 hi = kiblnd_data.kib_peer_hash_size - 1;
989 }
990
991 for (i = lo; i <= hi; i++) {
992 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
993 peer = list_entry(ptmp, kib_peer_t, ibp_list);
994 LASSERT(peer->ibp_connecting > 0 ||
995 peer->ibp_accepting > 0 ||
996 !list_empty(&peer->ibp_conns));
997
998 if (peer->ibp_ni != ni)
999 continue;
1000
1001 if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
1002 continue;
1003
1004 count += kiblnd_close_peer_conns_locked(peer, 0);
1005 }
1006 }
1007
1008 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1009
1010 /* wildcards always succeed */
1011 if (nid == LNET_NID_ANY)
1012 return 0;
1013
1014 return (count == 0) ? -ENOENT : 0;
1015 }
1016
1017 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1018 {
1019 struct libcfs_ioctl_data *data = arg;
1020 int rc = -EINVAL;
1021
1022 switch (cmd) {
1023 case IOC_LIBCFS_GET_PEER: {
1024 lnet_nid_t nid = 0;
1025 int count = 0;
1026
1027 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1028 &nid, &count);
1029 data->ioc_nid = nid;
1030 data->ioc_count = count;
1031 break;
1032 }
1033
1034 case IOC_LIBCFS_DEL_PEER: {
1035 rc = kiblnd_del_peer(ni, data->ioc_nid);
1036 break;
1037 }
1038 case IOC_LIBCFS_GET_CONN: {
1039 kib_conn_t *conn;
1040
1041 rc = 0;
1042 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1043 if (!conn) {
1044 rc = -ENOENT;
1045 break;
1046 }
1047
1048 LASSERT(conn->ibc_cmid);
1049 data->ioc_nid = conn->ibc_peer->ibp_nid;
1050 if (!conn->ibc_cmid->route.path_rec)
1051 data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1052 else
1053 data->ioc_u32[0] =
1054 ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1055 kiblnd_conn_decref(conn);
1056 break;
1057 }
1058 case IOC_LIBCFS_CLOSE_CONNECTION: {
1059 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1060 break;
1061 }
1062
1063 default:
1064 break;
1065 }
1066
1067 return rc;
1068 }
1069
1070 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1071 {
1072 unsigned long last_alive = 0;
1073 unsigned long now = cfs_time_current();
1074 rwlock_t *glock = &kiblnd_data.kib_global_lock;
1075 kib_peer_t *peer;
1076 unsigned long flags;
1077
1078 read_lock_irqsave(glock, flags);
1079
1080 peer = kiblnd_find_peer_locked(nid);
1081 if (peer) {
1082 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
1083 peer->ibp_accepting > 0 ||
1084 !list_empty(&peer->ibp_conns)); /* active conn */
1085 last_alive = peer->ibp_last_alive;
1086 }
1087
1088 read_unlock_irqrestore(glock, flags);
1089
1090 if (last_alive != 0)
1091 *when = last_alive;
1092
1093 /*
1094 * peer is not persistent in hash, trigger peer creation
1095 * and connection establishment with a NULL tx
1096 */
1097 if (!peer)
1098 kiblnd_launch_tx(ni, NULL, nid);
1099
1100 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1101 libcfs_nid2str(nid), peer,
1102 last_alive ? cfs_duration_sec(now - last_alive) : -1);
1103 }
1104
1105 void kiblnd_free_pages(kib_pages_t *p)
1106 {
1107 int npages = p->ibp_npages;
1108 int i;
1109
1110 for (i = 0; i < npages; i++) {
1111 if (p->ibp_pages[i])
1112 __free_page(p->ibp_pages[i]);
1113 }
1114
1115 LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
1116 }
1117
1118 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
1119 {
1120 kib_pages_t *p;
1121 int i;
1122
1123 LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1124 offsetof(kib_pages_t, ibp_pages[npages]));
1125 if (!p) {
1126 CERROR("Can't allocate descriptor for %d pages\n", npages);
1127 return -ENOMEM;
1128 }
1129
1130 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1131 p->ibp_npages = npages;
1132
1133 for (i = 0; i < npages; i++) {
1134 p->ibp_pages[i] = alloc_pages_node(
1135 cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1136 GFP_NOFS, 0);
1137 if (!p->ibp_pages[i]) {
1138 CERROR("Can't allocate page %d of %d\n", i, npages);
1139 kiblnd_free_pages(p);
1140 return -ENOMEM;
1141 }
1142 }
1143
1144 *pp = p;
1145 return 0;
1146 }
1147
1148 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
1149 {
1150 kib_rx_t *rx;
1151 int i;
1152
1153 LASSERT(conn->ibc_rxs);
1154 LASSERT(conn->ibc_hdev);
1155
1156 for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1157 rx = &conn->ibc_rxs[i];
1158
1159 LASSERT(rx->rx_nob >= 0); /* not posted */
1160
1161 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1162 KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1163 rx->rx_msgaddr),
1164 IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1165 }
1166
1167 kiblnd_free_pages(conn->ibc_rx_pages);
1168
1169 conn->ibc_rx_pages = NULL;
1170 }
1171
1172 void kiblnd_map_rx_descs(kib_conn_t *conn)
1173 {
1174 kib_rx_t *rx;
1175 struct page *pg;
1176 int pg_off;
1177 int ipg;
1178 int i;
1179
1180 for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1181 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1182 rx = &conn->ibc_rxs[i];
1183
1184 rx->rx_conn = conn;
1185 rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
1186
1187 rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1188 rx->rx_msg,
1189 IBLND_MSG_SIZE,
1190 DMA_FROM_DEVICE);
1191 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1192 rx->rx_msgaddr));
1193 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1194
1195 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1196 i, rx->rx_msg, rx->rx_msgaddr,
1197 (__u64)(page_to_phys(pg) + pg_off));
1198
1199 pg_off += IBLND_MSG_SIZE;
1200 LASSERT(pg_off <= PAGE_SIZE);
1201
1202 if (pg_off == PAGE_SIZE) {
1203 pg_off = 0;
1204 ipg++;
1205 LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
1206 }
1207 }
1208 }
1209
1210 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
1211 {
1212 kib_hca_dev_t *hdev = tpo->tpo_hdev;
1213 kib_tx_t *tx;
1214 int i;
1215
1216 LASSERT(tpo->tpo_pool.po_allocated == 0);
1217
1218 if (!hdev)
1219 return;
1220
1221 for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1222 tx = &tpo->tpo_tx_descs[i];
1223 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1224 KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1225 tx->tx_msgaddr),
1226 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1227 }
1228
1229 kiblnd_hdev_decref(hdev);
1230 tpo->tpo_hdev = NULL;
1231 }
1232
1233 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
1234 {
1235 kib_hca_dev_t *hdev;
1236 unsigned long flags;
1237 int i = 0;
1238
1239 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1240 while (dev->ibd_failover) {
1241 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1242 if (i++ % 50 == 0)
1243 CDEBUG(D_NET, "%s: Wait for failover\n",
1244 dev->ibd_ifname);
1245 schedule_timeout(cfs_time_seconds(1) / 100);
1246
1247 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1248 }
1249
1250 kiblnd_hdev_addref_locked(dev->ibd_hdev);
1251 hdev = dev->ibd_hdev;
1252
1253 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1254
1255 return hdev;
1256 }
1257
1258 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1259 {
1260 kib_pages_t *txpgs = tpo->tpo_tx_pages;
1261 kib_pool_t *pool = &tpo->tpo_pool;
1262 kib_net_t *net = pool->po_owner->ps_net;
1263 kib_dev_t *dev;
1264 struct page *page;
1265 kib_tx_t *tx;
1266 int page_offset;
1267 int ipage;
1268 int i;
1269
1270 LASSERT(net);
1271
1272 dev = net->ibn_dev;
1273
1274 /* pre-mapped messages are not bigger than 1 page */
1275 CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
1276
1277 /* No fancy arithmetic when we do the buffer calculations */
1278 CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
1279
1280 tpo->tpo_hdev = kiblnd_current_hdev(dev);
1281
1282 for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1283 page = txpgs->ibp_pages[ipage];
1284 tx = &tpo->tpo_tx_descs[i];
1285
1286 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1287 page_offset);
1288
1289 tx->tx_msgaddr = kiblnd_dma_map_single(
1290 tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
1291 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1292 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1293 tx->tx_msgaddr));
1294 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1295
1296 list_add(&tx->tx_list, &pool->po_free_list);
1297
1298 page_offset += IBLND_MSG_SIZE;
1299 LASSERT(page_offset <= PAGE_SIZE);
1300
1301 if (page_offset == PAGE_SIZE) {
1302 page_offset = 0;
1303 ipage++;
1304 LASSERT(ipage <= txpgs->ibp_npages);
1305 }
1306 }
1307 }
1308
1309 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
1310 {
1311 __u64 index;
1312
1313 LASSERT(hdev->ibh_mrs[0]);
1314
1315 if (hdev->ibh_nmrs == 1)
1316 return hdev->ibh_mrs[0];
1317
1318 index = addr >> hdev->ibh_mr_shift;
1319
1320 if (index < hdev->ibh_nmrs &&
1321 index == ((addr + size - 1) >> hdev->ibh_mr_shift))
1322 return hdev->ibh_mrs[index];
1323
1324 return NULL;
1325 }
1326
1327 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
1328 {
1329 struct ib_mr *prev_mr;
1330 struct ib_mr *mr;
1331 int i;
1332
1333 LASSERT(hdev->ibh_mrs[0]);
1334
1335 if (*kiblnd_tunables.kib_map_on_demand > 0 &&
1336 *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
1337 return NULL;
1338
1339 if (hdev->ibh_nmrs == 1)
1340 return hdev->ibh_mrs[0];
1341
1342 for (i = 0, mr = prev_mr = NULL;
1343 i < rd->rd_nfrags; i++) {
1344 mr = kiblnd_find_dma_mr(hdev,
1345 rd->rd_frags[i].rf_addr,
1346 rd->rd_frags[i].rf_nob);
1347 if (!prev_mr)
1348 prev_mr = mr;
1349
1350 if (!mr || prev_mr != mr) {
1351 /* Can't covered by one single MR */
1352 mr = NULL;
1353 break;
1354 }
1355 }
1356
1357 return mr;
1358 }
1359
1360 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
1361 {
1362 LASSERT(pool->fpo_map_count == 0);
1363
1364 if (pool->fpo_fmr_pool)
1365 ib_destroy_fmr_pool(pool->fpo_fmr_pool);
1366
1367 if (pool->fpo_hdev)
1368 kiblnd_hdev_decref(pool->fpo_hdev);
1369
1370 LIBCFS_FREE(pool, sizeof(*pool));
1371 }
1372
1373 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
1374 {
1375 kib_fmr_pool_t *pool;
1376
1377 while (!list_empty(head)) {
1378 pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
1379 list_del(&pool->fpo_list);
1380 kiblnd_destroy_fmr_pool(pool);
1381 }
1382 }
1383
1384 static int kiblnd_fmr_pool_size(int ncpts)
1385 {
1386 int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
1387
1388 return max(IBLND_FMR_POOL, size);
1389 }
1390
1391 static int kiblnd_fmr_flush_trigger(int ncpts)
1392 {
1393 int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
1394
1395 return max(IBLND_FMR_POOL_FLUSH, size);
1396 }
1397
1398 static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1399 kib_fmr_pool_t **pp_fpo)
1400 {
1401 /* FMR pool for RDMA */
1402 kib_dev_t *dev = fps->fps_net->ibn_dev;
1403 kib_fmr_pool_t *fpo;
1404 struct ib_fmr_pool_param param = {
1405 .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
1406 .page_shift = PAGE_SHIFT,
1407 .access = (IB_ACCESS_LOCAL_WRITE |
1408 IB_ACCESS_REMOTE_WRITE),
1409 .pool_size = fps->fps_pool_size,
1410 .dirty_watermark = fps->fps_flush_trigger,
1411 .flush_function = NULL,
1412 .flush_arg = NULL,
1413 .cache = !!*kiblnd_tunables.kib_fmr_cache};
1414 int rc;
1415
1416 LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1417 if (!fpo)
1418 return -ENOMEM;
1419
1420 fpo->fpo_hdev = kiblnd_current_hdev(dev);
1421
1422 fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
1423 if (IS_ERR(fpo->fpo_fmr_pool)) {
1424 rc = PTR_ERR(fpo->fpo_fmr_pool);
1425 CERROR("Failed to create FMR pool: %d\n", rc);
1426
1427 kiblnd_hdev_decref(fpo->fpo_hdev);
1428 LIBCFS_FREE(fpo, sizeof(*fpo));
1429 return rc;
1430 }
1431
1432 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1433 fpo->fpo_owner = fps;
1434 *pp_fpo = fpo;
1435
1436 return 0;
1437 }
1438
1439 static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
1440 struct list_head *zombies)
1441 {
1442 if (!fps->fps_net) /* intialized? */
1443 return;
1444
1445 spin_lock(&fps->fps_lock);
1446
1447 while (!list_empty(&fps->fps_pool_list)) {
1448 kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
1449 kib_fmr_pool_t, fpo_list);
1450 fpo->fpo_failed = 1;
1451 list_del(&fpo->fpo_list);
1452 if (fpo->fpo_map_count == 0)
1453 list_add(&fpo->fpo_list, zombies);
1454 else
1455 list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1456 }
1457
1458 spin_unlock(&fps->fps_lock);
1459 }
1460
1461 static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1462 {
1463 if (fps->fps_net) { /* initialized? */
1464 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1465 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1466 }
1467 }
1468
1469 static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
1470 kib_net_t *net, int pool_size,
1471 int flush_trigger)
1472 {
1473 kib_fmr_pool_t *fpo;
1474 int rc;
1475
1476 memset(fps, 0, sizeof(*fps));
1477
1478 fps->fps_net = net;
1479 fps->fps_cpt = cpt;
1480 fps->fps_pool_size = pool_size;
1481 fps->fps_flush_trigger = flush_trigger;
1482 spin_lock_init(&fps->fps_lock);
1483 INIT_LIST_HEAD(&fps->fps_pool_list);
1484 INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1485
1486 rc = kiblnd_create_fmr_pool(fps, &fpo);
1487 if (rc == 0)
1488 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1489
1490 return rc;
1491 }
1492
1493 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
1494 {
1495 if (fpo->fpo_map_count != 0) /* still in use */
1496 return 0;
1497 if (fpo->fpo_failed)
1498 return 1;
1499 return cfs_time_aftereq(now, fpo->fpo_deadline);
1500 }
1501
1502 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1503 {
1504 LIST_HEAD(zombies);
1505 kib_fmr_pool_t *fpo = fmr->fmr_pool;
1506 kib_fmr_poolset_t *fps = fpo->fpo_owner;
1507 unsigned long now = cfs_time_current();
1508 kib_fmr_pool_t *tmp;
1509 int rc;
1510
1511 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1512 LASSERT(rc == 0);
1513
1514 if (status != 0) {
1515 rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
1516 LASSERT(rc == 0);
1517 }
1518
1519 fmr->fmr_pool = NULL;
1520 fmr->fmr_pfmr = NULL;
1521
1522 spin_lock(&fps->fps_lock);
1523 fpo->fpo_map_count--; /* decref the pool */
1524
1525 list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1526 /* the first pool is persistent */
1527 if (fps->fps_pool_list.next == &fpo->fpo_list)
1528 continue;
1529
1530 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1531 list_move(&fpo->fpo_list, &zombies);
1532 fps->fps_version++;
1533 }
1534 }
1535 spin_unlock(&fps->fps_lock);
1536
1537 if (!list_empty(&zombies))
1538 kiblnd_destroy_fmr_pool_list(&zombies);
1539 }
1540
1541 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
1542 __u64 iov, kib_fmr_t *fmr)
1543 {
1544 struct ib_pool_fmr *pfmr;
1545 kib_fmr_pool_t *fpo;
1546 __u64 version;
1547 int rc;
1548
1549 again:
1550 spin_lock(&fps->fps_lock);
1551 version = fps->fps_version;
1552 list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1553 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1554 fpo->fpo_map_count++;
1555 spin_unlock(&fps->fps_lock);
1556
1557 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
1558 pages, npages, iov);
1559 if (likely(!IS_ERR(pfmr))) {
1560 fmr->fmr_pool = fpo;
1561 fmr->fmr_pfmr = pfmr;
1562 return 0;
1563 }
1564
1565 spin_lock(&fps->fps_lock);
1566 fpo->fpo_map_count--;
1567 if (PTR_ERR(pfmr) != -EAGAIN) {
1568 spin_unlock(&fps->fps_lock);
1569 return PTR_ERR(pfmr);
1570 }
1571
1572 /* EAGAIN and ... */
1573 if (version != fps->fps_version) {
1574 spin_unlock(&fps->fps_lock);
1575 goto again;
1576 }
1577 }
1578
1579 if (fps->fps_increasing) {
1580 spin_unlock(&fps->fps_lock);
1581 CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
1582 schedule();
1583 goto again;
1584 }
1585
1586 if (time_before(cfs_time_current(), fps->fps_next_retry)) {
1587 /* someone failed recently */
1588 spin_unlock(&fps->fps_lock);
1589 return -EAGAIN;
1590 }
1591
1592 fps->fps_increasing = 1;
1593 spin_unlock(&fps->fps_lock);
1594
1595 CDEBUG(D_NET, "Allocate new FMR pool\n");
1596 rc = kiblnd_create_fmr_pool(fps, &fpo);
1597 spin_lock(&fps->fps_lock);
1598 fps->fps_increasing = 0;
1599 if (rc == 0) {
1600 fps->fps_version++;
1601 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1602 } else {
1603 fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1604 }
1605 spin_unlock(&fps->fps_lock);
1606
1607 goto again;
1608 }
1609
1610 static void kiblnd_fini_pool(kib_pool_t *pool)
1611 {
1612 LASSERT(list_empty(&pool->po_free_list));
1613 LASSERT(pool->po_allocated == 0);
1614
1615 CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1616 }
1617
1618 static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
1619 {
1620 CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1621
1622 memset(pool, 0, sizeof(*pool));
1623 INIT_LIST_HEAD(&pool->po_free_list);
1624 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1625 pool->po_owner = ps;
1626 pool->po_size = size;
1627 }
1628
1629 static void kiblnd_destroy_pool_list(struct list_head *head)
1630 {
1631 kib_pool_t *pool;
1632
1633 while (!list_empty(head)) {
1634 pool = list_entry(head->next, kib_pool_t, po_list);
1635 list_del(&pool->po_list);
1636
1637 LASSERT(pool->po_owner);
1638 pool->po_owner->ps_pool_destroy(pool);
1639 }
1640 }
1641
1642 static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
1643 {
1644 if (!ps->ps_net) /* intialized? */
1645 return;
1646
1647 spin_lock(&ps->ps_lock);
1648 while (!list_empty(&ps->ps_pool_list)) {
1649 kib_pool_t *po = list_entry(ps->ps_pool_list.next,
1650 kib_pool_t, po_list);
1651 po->po_failed = 1;
1652 list_del(&po->po_list);
1653 if (po->po_allocated == 0)
1654 list_add(&po->po_list, zombies);
1655 else
1656 list_add(&po->po_list, &ps->ps_failed_pool_list);
1657 }
1658 spin_unlock(&ps->ps_lock);
1659 }
1660
1661 static void kiblnd_fini_poolset(kib_poolset_t *ps)
1662 {
1663 if (ps->ps_net) { /* initialized? */
1664 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
1665 kiblnd_destroy_pool_list(&ps->ps_pool_list);
1666 }
1667 }
1668
1669 static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
1670 kib_net_t *net, char *name, int size,
1671 kib_ps_pool_create_t po_create,
1672 kib_ps_pool_destroy_t po_destroy,
1673 kib_ps_node_init_t nd_init,
1674 kib_ps_node_fini_t nd_fini)
1675 {
1676 kib_pool_t *pool;
1677 int rc;
1678
1679 memset(ps, 0, sizeof(*ps));
1680
1681 ps->ps_cpt = cpt;
1682 ps->ps_net = net;
1683 ps->ps_pool_create = po_create;
1684 ps->ps_pool_destroy = po_destroy;
1685 ps->ps_node_init = nd_init;
1686 ps->ps_node_fini = nd_fini;
1687 ps->ps_pool_size = size;
1688 if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
1689 >= sizeof(ps->ps_name))
1690 return -E2BIG;
1691 spin_lock_init(&ps->ps_lock);
1692 INIT_LIST_HEAD(&ps->ps_pool_list);
1693 INIT_LIST_HEAD(&ps->ps_failed_pool_list);
1694
1695 rc = ps->ps_pool_create(ps, size, &pool);
1696 if (rc == 0)
1697 list_add(&pool->po_list, &ps->ps_pool_list);
1698 else
1699 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
1700
1701 return rc;
1702 }
1703
1704 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
1705 {
1706 if (pool->po_allocated != 0) /* still in use */
1707 return 0;
1708 if (pool->po_failed)
1709 return 1;
1710 return cfs_time_aftereq(now, pool->po_deadline);
1711 }
1712
1713 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
1714 {
1715 LIST_HEAD(zombies);
1716 kib_poolset_t *ps = pool->po_owner;
1717 kib_pool_t *tmp;
1718 unsigned long now = cfs_time_current();
1719
1720 spin_lock(&ps->ps_lock);
1721
1722 if (ps->ps_node_fini)
1723 ps->ps_node_fini(pool, node);
1724
1725 LASSERT(pool->po_allocated > 0);
1726 list_add(node, &pool->po_free_list);
1727 pool->po_allocated--;
1728
1729 list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
1730 /* the first pool is persistent */
1731 if (ps->ps_pool_list.next == &pool->po_list)
1732 continue;
1733
1734 if (kiblnd_pool_is_idle(pool, now))
1735 list_move(&pool->po_list, &zombies);
1736 }
1737 spin_unlock(&ps->ps_lock);
1738
1739 if (!list_empty(&zombies))
1740 kiblnd_destroy_pool_list(&zombies);
1741 }
1742
1743 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
1744 {
1745 struct list_head *node;
1746 kib_pool_t *pool;
1747 int rc;
1748
1749 again:
1750 spin_lock(&ps->ps_lock);
1751 list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
1752 if (list_empty(&pool->po_free_list))
1753 continue;
1754
1755 pool->po_allocated++;
1756 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1757 node = pool->po_free_list.next;
1758 list_del(node);
1759
1760 if (ps->ps_node_init) {
1761 /* still hold the lock */
1762 ps->ps_node_init(pool, node);
1763 }
1764 spin_unlock(&ps->ps_lock);
1765 return node;
1766 }
1767
1768 /* no available tx pool and ... */
1769 if (ps->ps_increasing) {
1770 /* another thread is allocating a new pool */
1771 spin_unlock(&ps->ps_lock);
1772 CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
1773 ps->ps_name);
1774 schedule();
1775 goto again;
1776 }
1777
1778 if (time_before(cfs_time_current(), ps->ps_next_retry)) {
1779 /* someone failed recently */
1780 spin_unlock(&ps->ps_lock);
1781 return NULL;
1782 }
1783
1784 ps->ps_increasing = 1;
1785 spin_unlock(&ps->ps_lock);
1786
1787 CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
1788
1789 rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
1790
1791 spin_lock(&ps->ps_lock);
1792 ps->ps_increasing = 0;
1793 if (rc == 0) {
1794 list_add_tail(&pool->po_list, &ps->ps_pool_list);
1795 } else {
1796 ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1797 CERROR("Can't allocate new %s pool because out of memory\n",
1798 ps->ps_name);
1799 }
1800 spin_unlock(&ps->ps_lock);
1801
1802 goto again;
1803 }
1804
1805 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
1806 {
1807 kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
1808 int i;
1809
1810 LASSERT(pool->po_allocated == 0);
1811
1812 if (tpo->tpo_tx_pages) {
1813 kiblnd_unmap_tx_pool(tpo);
1814 kiblnd_free_pages(tpo->tpo_tx_pages);
1815 }
1816
1817 if (!tpo->tpo_tx_descs)
1818 goto out;
1819
1820 for (i = 0; i < pool->po_size; i++) {
1821 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1822
1823 list_del(&tx->tx_list);
1824 if (tx->tx_pages)
1825 LIBCFS_FREE(tx->tx_pages,
1826 LNET_MAX_IOV *
1827 sizeof(*tx->tx_pages));
1828 if (tx->tx_frags)
1829 LIBCFS_FREE(tx->tx_frags,
1830 IBLND_MAX_RDMA_FRAGS *
1831 sizeof(*tx->tx_frags));
1832 if (tx->tx_wrq)
1833 LIBCFS_FREE(tx->tx_wrq,
1834 (1 + IBLND_MAX_RDMA_FRAGS) *
1835 sizeof(*tx->tx_wrq));
1836 if (tx->tx_sge)
1837 LIBCFS_FREE(tx->tx_sge,
1838 (1 + IBLND_MAX_RDMA_FRAGS) *
1839 sizeof(*tx->tx_sge));
1840 if (tx->tx_rd)
1841 LIBCFS_FREE(tx->tx_rd,
1842 offsetof(kib_rdma_desc_t,
1843 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1844 }
1845
1846 LIBCFS_FREE(tpo->tpo_tx_descs,
1847 pool->po_size * sizeof(kib_tx_t));
1848 out:
1849 kiblnd_fini_pool(pool);
1850 LIBCFS_FREE(tpo, sizeof(*tpo));
1851 }
1852
1853 static int kiblnd_tx_pool_size(int ncpts)
1854 {
1855 int ntx = *kiblnd_tunables.kib_ntx / ncpts;
1856
1857 return max(IBLND_TX_POOL, ntx);
1858 }
1859
1860 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
1861 kib_pool_t **pp_po)
1862 {
1863 int i;
1864 int npg;
1865 kib_pool_t *pool;
1866 kib_tx_pool_t *tpo;
1867
1868 LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
1869 if (!tpo) {
1870 CERROR("Failed to allocate TX pool\n");
1871 return -ENOMEM;
1872 }
1873
1874 pool = &tpo->tpo_pool;
1875 kiblnd_init_pool(ps, pool, size);
1876 tpo->tpo_tx_descs = NULL;
1877 tpo->tpo_tx_pages = NULL;
1878
1879 npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
1880 if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
1881 CERROR("Can't allocate tx pages: %d\n", npg);
1882 LIBCFS_FREE(tpo, sizeof(*tpo));
1883 return -ENOMEM;
1884 }
1885
1886 LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
1887 size * sizeof(kib_tx_t));
1888 if (!tpo->tpo_tx_descs) {
1889 CERROR("Can't allocate %d tx descriptors\n", size);
1890 ps->ps_pool_destroy(pool);
1891 return -ENOMEM;
1892 }
1893
1894 memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
1895
1896 for (i = 0; i < size; i++) {
1897 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1898
1899 tx->tx_pool = tpo;
1900 if (ps->ps_net->ibn_fmr_ps) {
1901 LIBCFS_CPT_ALLOC(tx->tx_pages,
1902 lnet_cpt_table(), ps->ps_cpt,
1903 LNET_MAX_IOV * sizeof(*tx->tx_pages));
1904 if (!tx->tx_pages)
1905 break;
1906 }
1907
1908 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
1909 IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
1910 if (!tx->tx_frags)
1911 break;
1912
1913 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
1914
1915 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
1916 (1 + IBLND_MAX_RDMA_FRAGS) *
1917 sizeof(*tx->tx_wrq));
1918 if (!tx->tx_wrq)
1919 break;
1920
1921 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
1922 (1 + IBLND_MAX_RDMA_FRAGS) *
1923 sizeof(*tx->tx_sge));
1924 if (!tx->tx_sge)
1925 break;
1926
1927 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
1928 offsetof(kib_rdma_desc_t,
1929 rd_frags[IBLND_MAX_RDMA_FRAGS]));
1930 if (!tx->tx_rd)
1931 break;
1932 }
1933
1934 if (i == size) {
1935 kiblnd_map_tx_pool(tpo);
1936 *pp_po = pool;
1937 return 0;
1938 }
1939
1940 ps->ps_pool_destroy(pool);
1941 return -ENOMEM;
1942 }
1943
1944 static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
1945 {
1946 kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
1947 tps_poolset);
1948 kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
1949
1950 tx->tx_cookie = tps->tps_next_tx_cookie++;
1951 }
1952
1953 static void kiblnd_net_fini_pools(kib_net_t *net)
1954 {
1955 int i;
1956
1957 cfs_cpt_for_each(i, lnet_cpt_table()) {
1958 kib_tx_poolset_t *tps;
1959 kib_fmr_poolset_t *fps;
1960
1961 if (net->ibn_tx_ps) {
1962 tps = net->ibn_tx_ps[i];
1963 kiblnd_fini_poolset(&tps->tps_poolset);
1964 }
1965
1966 if (net->ibn_fmr_ps) {
1967 fps = net->ibn_fmr_ps[i];
1968 kiblnd_fini_fmr_poolset(fps);
1969 }
1970 }
1971
1972 if (net->ibn_tx_ps) {
1973 cfs_percpt_free(net->ibn_tx_ps);
1974 net->ibn_tx_ps = NULL;
1975 }
1976
1977 if (net->ibn_fmr_ps) {
1978 cfs_percpt_free(net->ibn_fmr_ps);
1979 net->ibn_fmr_ps = NULL;
1980 }
1981 }
1982
1983 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
1984 {
1985 unsigned long flags;
1986 int cpt;
1987 int rc = 0;
1988 int i;
1989
1990 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1991 if (*kiblnd_tunables.kib_map_on_demand == 0 &&
1992 net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
1993 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1994 goto create_tx_pool;
1995 }
1996
1997 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1998
1999 if (*kiblnd_tunables.kib_fmr_pool_size <
2000 *kiblnd_tunables.kib_ntx / 4) {
2001 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2002 *kiblnd_tunables.kib_fmr_pool_size,
2003 *kiblnd_tunables.kib_ntx / 4);
2004 rc = -EINVAL;
2005 goto failed;
2006 }
2007
2008 /*
2009 * TX pool must be created later than FMR, see LU-2268
2010 * for details
2011 */
2012 LASSERT(!net->ibn_tx_ps);
2013
2014 /*
2015 * premapping can fail if ibd_nmr > 1, so we always create
2016 * FMR pool and map-on-demand if premapping failed
2017 */
2018
2019 net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2020 sizeof(kib_fmr_poolset_t));
2021 if (!net->ibn_fmr_ps) {
2022 CERROR("Failed to allocate FMR pool array\n");
2023 rc = -ENOMEM;
2024 goto failed;
2025 }
2026
2027 for (i = 0; i < ncpts; i++) {
2028 cpt = !cpts ? i : cpts[i];
2029 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
2030 kiblnd_fmr_pool_size(ncpts),
2031 kiblnd_fmr_flush_trigger(ncpts));
2032 if (rc == -ENOSYS && i == 0) /* no FMR */
2033 break;
2034
2035 if (rc != 0) { /* a real error */
2036 CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2037 cpt, rc);
2038 goto failed;
2039 }
2040 }
2041
2042 if (i > 0) {
2043 LASSERT(i == ncpts);
2044 goto create_tx_pool;
2045 }
2046
2047 cfs_percpt_free(net->ibn_fmr_ps);
2048 net->ibn_fmr_ps = NULL;
2049
2050 CWARN("Device does not support FMR\n");
2051 goto failed;
2052
2053 create_tx_pool:
2054 net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2055 sizeof(kib_tx_poolset_t));
2056 if (!net->ibn_tx_ps) {
2057 CERROR("Failed to allocate tx pool array\n");
2058 rc = -ENOMEM;
2059 goto failed;
2060 }
2061
2062 for (i = 0; i < ncpts; i++) {
2063 cpt = !cpts ? i : cpts[i];
2064 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2065 cpt, net, "TX",
2066 kiblnd_tx_pool_size(ncpts),
2067 kiblnd_create_tx_pool,
2068 kiblnd_destroy_tx_pool,
2069 kiblnd_tx_init, NULL);
2070 if (rc != 0) {
2071 CERROR("Can't initialize TX pool for CPT %d: %d\n",
2072 cpt, rc);
2073 goto failed;
2074 }
2075 }
2076
2077 return 0;
2078 failed:
2079 kiblnd_net_fini_pools(net);
2080 LASSERT(rc != 0);
2081 return rc;
2082 }
2083
2084 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
2085 {
2086 /*
2087 * It's safe to assume a HCA can handle a page size
2088 * matching that of the native system
2089 */
2090 hdev->ibh_page_shift = PAGE_SHIFT;
2091 hdev->ibh_page_size = 1 << PAGE_SHIFT;
2092 hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
2093
2094 hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
2095 if (hdev->ibh_mr_size == ~0ULL) {
2096 hdev->ibh_mr_shift = 64;
2097 return 0;
2098 }
2099
2100 for (hdev->ibh_mr_shift = 0;
2101 hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
2102 if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
2103 hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
2104 return 0;
2105 }
2106
2107 CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2108 return -EINVAL;
2109 }
2110
2111 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
2112 {
2113 int i;
2114
2115 if (hdev->ibh_nmrs == 0 || !hdev->ibh_mrs)
2116 return;
2117
2118 for (i = 0; i < hdev->ibh_nmrs; i++) {
2119 if (!hdev->ibh_mrs[i])
2120 break;
2121
2122 ib_dereg_mr(hdev->ibh_mrs[i]);
2123 }
2124
2125 LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2126 hdev->ibh_mrs = NULL;
2127 hdev->ibh_nmrs = 0;
2128 }
2129
2130 void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
2131 {
2132 kiblnd_hdev_cleanup_mrs(hdev);
2133
2134 if (hdev->ibh_pd)
2135 ib_dealloc_pd(hdev->ibh_pd);
2136
2137 if (hdev->ibh_cmid)
2138 rdma_destroy_id(hdev->ibh_cmid);
2139
2140 LIBCFS_FREE(hdev, sizeof(*hdev));
2141 }
2142
2143 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
2144 {
2145 struct ib_mr *mr;
2146 int rc;
2147 int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
2148
2149 rc = kiblnd_hdev_get_attr(hdev);
2150 if (rc != 0)
2151 return rc;
2152
2153 LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
2154 if (!hdev->ibh_mrs) {
2155 CERROR("Failed to allocate MRs table\n");
2156 return -ENOMEM;
2157 }
2158
2159 hdev->ibh_mrs[0] = NULL;
2160 hdev->ibh_nmrs = 1;
2161
2162 mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2163 if (IS_ERR(mr)) {
2164 CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
2165 kiblnd_hdev_cleanup_mrs(hdev);
2166 return PTR_ERR(mr);
2167 }
2168
2169 hdev->ibh_mrs[0] = mr;
2170
2171 return 0;
2172 }
2173
2174 /* DUMMY */
2175 static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
2176 struct rdma_cm_event *event)
2177 {
2178 return 0;
2179 }
2180
2181 static int kiblnd_dev_need_failover(kib_dev_t *dev)
2182 {
2183 struct rdma_cm_id *cmid;
2184 struct sockaddr_in srcaddr;
2185 struct sockaddr_in dstaddr;
2186 int rc;
2187
2188 if (!dev->ibd_hdev || /* initializing */
2189 !dev->ibd_hdev->ibh_cmid || /* listener is dead */
2190 *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2191 return 1;
2192
2193 /*
2194 * XXX: it's UGLY, but I don't have better way to find
2195 * ib-bonding HCA failover because:
2196 *
2197 * a. no reliable CM event for HCA failover...
2198 * b. no OFED API to get ib_device for current net_device...
2199 *
2200 * We have only two choices at this point:
2201 *
2202 * a. rdma_bind_addr(), it will conflict with listener cmid
2203 * b. rdma_resolve_addr() to zero addr
2204 */
2205 cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2206 IB_QPT_RC);
2207 if (IS_ERR(cmid)) {
2208 rc = PTR_ERR(cmid);
2209 CERROR("Failed to create cmid for failover: %d\n", rc);
2210 return rc;
2211 }
2212
2213 memset(&srcaddr, 0, sizeof(srcaddr));
2214 srcaddr.sin_family = AF_INET;
2215 srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2216
2217 memset(&dstaddr, 0, sizeof(dstaddr));
2218 dstaddr.sin_family = AF_INET;
2219 rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2220 (struct sockaddr *)&dstaddr, 1);
2221 if (rc != 0 || !cmid->device) {
2222 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2223 dev->ibd_ifname, &dev->ibd_ifip,
2224 cmid->device, rc);
2225 rdma_destroy_id(cmid);
2226 return rc;
2227 }
2228
2229 rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2230 rdma_destroy_id(cmid);
2231
2232 return rc;
2233 }
2234
2235 int kiblnd_dev_failover(kib_dev_t *dev)
2236 {
2237 LIST_HEAD(zombie_tpo);
2238 LIST_HEAD(zombie_ppo);
2239 LIST_HEAD(zombie_fpo);
2240 struct rdma_cm_id *cmid = NULL;
2241 kib_hca_dev_t *hdev = NULL;
2242 struct ib_pd *pd;
2243 kib_net_t *net;
2244 struct sockaddr_in addr;
2245 unsigned long flags;
2246 int rc = 0;
2247 int i;
2248
2249 LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
2250 dev->ibd_can_failover || !dev->ibd_hdev);
2251
2252 rc = kiblnd_dev_need_failover(dev);
2253 if (rc <= 0)
2254 goto out;
2255
2256 if (dev->ibd_hdev &&
2257 dev->ibd_hdev->ibh_cmid) {
2258 /*
2259 * XXX it's not good to close old listener at here,
2260 * because we can fail to create new listener.
2261 * But we have to close it now, otherwise rdma_bind_addr
2262 * will return EADDRINUSE... How crap!
2263 */
2264 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2265
2266 cmid = dev->ibd_hdev->ibh_cmid;
2267 /*
2268 * make next schedule of kiblnd_dev_need_failover()
2269 * return 1 for me
2270 */
2271 dev->ibd_hdev->ibh_cmid = NULL;
2272 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2273
2274 rdma_destroy_id(cmid);
2275 }
2276
2277 cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2278 IB_QPT_RC);
2279 if (IS_ERR(cmid)) {
2280 rc = PTR_ERR(cmid);
2281 CERROR("Failed to create cmid for failover: %d\n", rc);
2282 goto out;
2283 }
2284
2285 memset(&addr, 0, sizeof(addr));
2286 addr.sin_family = AF_INET;
2287 addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2288 addr.sin_port = htons(*kiblnd_tunables.kib_service);
2289
2290 /* Bind to failover device or port */
2291 rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2292 if (rc != 0 || !cmid->device) {
2293 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2294 dev->ibd_ifname, &dev->ibd_ifip,
2295 cmid->device, rc);
2296 rdma_destroy_id(cmid);
2297 goto out;
2298 }
2299
2300 LIBCFS_ALLOC(hdev, sizeof(*hdev));
2301 if (!hdev) {
2302 CERROR("Failed to allocate kib_hca_dev\n");
2303 rdma_destroy_id(cmid);
2304 rc = -ENOMEM;
2305 goto out;
2306 }
2307
2308 atomic_set(&hdev->ibh_ref, 1);
2309 hdev->ibh_dev = dev;
2310 hdev->ibh_cmid = cmid;
2311 hdev->ibh_ibdev = cmid->device;
2312
2313 pd = ib_alloc_pd(cmid->device);
2314 if (IS_ERR(pd)) {
2315 rc = PTR_ERR(pd);
2316 CERROR("Can't allocate PD: %d\n", rc);
2317 goto out;
2318 }
2319
2320 hdev->ibh_pd = pd;
2321
2322 rc = rdma_listen(cmid, 0);
2323 if (rc != 0) {
2324 CERROR("Can't start new listener: %d\n", rc);
2325 goto out;
2326 }
2327
2328 rc = kiblnd_hdev_setup_mrs(hdev);
2329 if (rc != 0) {
2330 CERROR("Can't setup device: %d\n", rc);
2331 goto out;
2332 }
2333
2334 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2335
2336 swap(dev->ibd_hdev, hdev); /* take over the refcount */
2337
2338 list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2339 cfs_cpt_for_each(i, lnet_cpt_table()) {
2340 kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2341 &zombie_tpo);
2342
2343 if (net->ibn_fmr_ps)
2344 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2345 &zombie_fpo);
2346 }
2347 }
2348
2349 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2350 out:
2351 if (!list_empty(&zombie_tpo))
2352 kiblnd_destroy_pool_list(&zombie_tpo);
2353 if (!list_empty(&zombie_ppo))
2354 kiblnd_destroy_pool_list(&zombie_ppo);
2355 if (!list_empty(&zombie_fpo))
2356 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2357 if (hdev)
2358 kiblnd_hdev_decref(hdev);
2359
2360 if (rc != 0)
2361 dev->ibd_failed_failover++;
2362 else
2363 dev->ibd_failed_failover = 0;
2364
2365 return rc;
2366 }
2367
2368 void kiblnd_destroy_dev(kib_dev_t *dev)
2369 {
2370 LASSERT(dev->ibd_nnets == 0);
2371 LASSERT(list_empty(&dev->ibd_nets));
2372
2373 list_del(&dev->ibd_fail_list);
2374 list_del(&dev->ibd_list);
2375
2376 if (dev->ibd_hdev)
2377 kiblnd_hdev_decref(dev->ibd_hdev);
2378
2379 LIBCFS_FREE(dev, sizeof(*dev));
2380 }
2381
2382 static kib_dev_t *kiblnd_create_dev(char *ifname)
2383 {
2384 struct net_device *netdev;
2385 kib_dev_t *dev;
2386 __u32 netmask;
2387 __u32 ip;
2388 int up;
2389 int rc;
2390
2391 rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
2392 if (rc != 0) {
2393 CERROR("Can't query IPoIB interface %s: %d\n",
2394 ifname, rc);
2395 return NULL;
2396 }
2397
2398 if (!up) {
2399 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2400 return NULL;
2401 }
2402
2403 LIBCFS_ALLOC(dev, sizeof(*dev));
2404 if (!dev)
2405 return NULL;
2406
2407 netdev = dev_get_by_name(&init_net, ifname);
2408 if (!netdev) {
2409 dev->ibd_can_failover = 0;
2410 } else {
2411 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2412 dev_put(netdev);
2413 }
2414
2415 INIT_LIST_HEAD(&dev->ibd_nets);
2416 INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2417 INIT_LIST_HEAD(&dev->ibd_fail_list);
2418 dev->ibd_ifip = ip;
2419 strcpy(&dev->ibd_ifname[0], ifname);
2420
2421 /* initialize the device */
2422 rc = kiblnd_dev_failover(dev);
2423 if (rc != 0) {
2424 CERROR("Can't initialize device: %d\n", rc);
2425 LIBCFS_FREE(dev, sizeof(*dev));
2426 return NULL;
2427 }
2428
2429 list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
2430 return dev;
2431 }
2432
2433 static void kiblnd_base_shutdown(void)
2434 {
2435 struct kib_sched_info *sched;
2436 int i;
2437
2438 LASSERT(list_empty(&kiblnd_data.kib_devs));
2439
2440 switch (kiblnd_data.kib_init) {
2441 default:
2442 LBUG();
2443
2444 case IBLND_INIT_ALL:
2445 case IBLND_INIT_DATA:
2446 LASSERT(kiblnd_data.kib_peers);
2447 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2448 LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2449 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2450 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2451
2452 /* flag threads to terminate; wake and wait for them to die */
2453 kiblnd_data.kib_shutdown = 1;
2454
2455 /*
2456 * NB: we really want to stop scheduler threads net by net
2457 * instead of the whole module, this should be improved
2458 * with dynamic configuration LNet
2459 */
2460 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2461 wake_up_all(&sched->ibs_waitq);
2462
2463 wake_up_all(&kiblnd_data.kib_connd_waitq);
2464 wake_up_all(&kiblnd_data.kib_failover_waitq);
2465
2466 i = 2;
2467 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2468 i++;
2469 /* power of 2 ? */
2470 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2471 "Waiting for %d threads to terminate\n",
2472 atomic_read(&kiblnd_data.kib_nthreads));
2473 set_current_state(TASK_UNINTERRUPTIBLE);
2474 schedule_timeout(cfs_time_seconds(1));
2475 }
2476
2477 /* fall through */
2478
2479 case IBLND_INIT_NOTHING:
2480 break;
2481 }
2482
2483 if (kiblnd_data.kib_peers) {
2484 LIBCFS_FREE(kiblnd_data.kib_peers,
2485 sizeof(struct list_head) *
2486 kiblnd_data.kib_peer_hash_size);
2487 }
2488
2489 if (kiblnd_data.kib_scheds)
2490 cfs_percpt_free(kiblnd_data.kib_scheds);
2491
2492 kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2493 module_put(THIS_MODULE);
2494 }
2495
2496 void kiblnd_shutdown(lnet_ni_t *ni)
2497 {
2498 kib_net_t *net = ni->ni_data;
2499 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2500 int i;
2501 unsigned long flags;
2502
2503 LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2504
2505 if (!net)
2506 goto out;
2507
2508 write_lock_irqsave(g_lock, flags);
2509 net->ibn_shutdown = 1;
2510 write_unlock_irqrestore(g_lock, flags);
2511
2512 switch (net->ibn_init) {
2513 default:
2514 LBUG();
2515
2516 case IBLND_INIT_ALL:
2517 /* nuke all existing peers within this net */
2518 kiblnd_del_peer(ni, LNET_NID_ANY);
2519
2520 /* Wait for all peer state to clean up */
2521 i = 2;
2522 while (atomic_read(&net->ibn_npeers) != 0) {
2523 i++;
2524 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
2525 "%s: waiting for %d peers to disconnect\n",
2526 libcfs_nid2str(ni->ni_nid),
2527 atomic_read(&net->ibn_npeers));
2528 set_current_state(TASK_UNINTERRUPTIBLE);
2529 schedule_timeout(cfs_time_seconds(1));
2530 }
2531
2532 kiblnd_net_fini_pools(net);
2533
2534 write_lock_irqsave(g_lock, flags);
2535 LASSERT(net->ibn_dev->ibd_nnets > 0);
2536 net->ibn_dev->ibd_nnets--;
2537 list_del(&net->ibn_list);
2538 write_unlock_irqrestore(g_lock, flags);
2539
2540 /* fall through */
2541
2542 case IBLND_INIT_NOTHING:
2543 LASSERT(atomic_read(&net->ibn_nconns) == 0);
2544
2545 if (net->ibn_dev &&
2546 net->ibn_dev->ibd_nnets == 0)
2547 kiblnd_destroy_dev(net->ibn_dev);
2548
2549 break;
2550 }
2551
2552 net->ibn_init = IBLND_INIT_NOTHING;
2553 ni->ni_data = NULL;
2554
2555 LIBCFS_FREE(net, sizeof(*net));
2556
2557 out:
2558 if (list_empty(&kiblnd_data.kib_devs))
2559 kiblnd_base_shutdown();
2560 }
2561
2562 static int kiblnd_base_startup(void)
2563 {
2564 struct kib_sched_info *sched;
2565 int rc;
2566 int i;
2567
2568 LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
2569
2570 try_module_get(THIS_MODULE);
2571 /* zero pointers, flags etc */
2572 memset(&kiblnd_data, 0, sizeof(kiblnd_data));
2573
2574 rwlock_init(&kiblnd_data.kib_global_lock);
2575
2576 INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2577 INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2578
2579 kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2580 LIBCFS_ALLOC(kiblnd_data.kib_peers,
2581 sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
2582 if (!kiblnd_data.kib_peers)
2583 goto failed;
2584 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2585 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2586
2587 spin_lock_init(&kiblnd_data.kib_connd_lock);
2588 INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2589 INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
2590 init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2591 init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2592
2593 kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2594 sizeof(*sched));
2595 if (!kiblnd_data.kib_scheds)
2596 goto failed;
2597
2598 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
2599 int nthrs;
2600
2601 spin_lock_init(&sched->ibs_lock);
2602 INIT_LIST_HEAD(&sched->ibs_conns);
2603 init_waitqueue_head(&sched->ibs_waitq);
2604
2605 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2606 if (*kiblnd_tunables.kib_nscheds > 0) {
2607 nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
2608 } else {
2609 /*
2610 * max to half of CPUs, another half is reserved for
2611 * upper layer modules
2612 */
2613 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2614 }
2615
2616 sched->ibs_nthreads_max = nthrs;
2617 sched->ibs_cpt = i;
2618 }
2619
2620 kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
2621
2622 /* lists/ptrs/locks initialised */
2623 kiblnd_data.kib_init = IBLND_INIT_DATA;
2624 /*****************************************************/
2625
2626 rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
2627 if (rc != 0) {
2628 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
2629 goto failed;
2630 }
2631
2632 if (*kiblnd_tunables.kib_dev_failover != 0)
2633 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
2634 "kiblnd_failover");
2635
2636 if (rc != 0) {
2637 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
2638 goto failed;
2639 }
2640
2641 /* flag everything initialised */
2642 kiblnd_data.kib_init = IBLND_INIT_ALL;
2643 /*****************************************************/
2644
2645 return 0;
2646
2647 failed:
2648 kiblnd_base_shutdown();
2649 return -ENETDOWN;
2650 }
2651
2652 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
2653 {
2654 int rc = 0;
2655 int nthrs;
2656 int i;
2657
2658 if (sched->ibs_nthreads == 0) {
2659 if (*kiblnd_tunables.kib_nscheds > 0) {
2660 nthrs = sched->ibs_nthreads_max;
2661 } else {
2662 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2663 sched->ibs_cpt);
2664 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2665 nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
2666 }
2667 } else {
2668 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
2669 /* increase one thread if there is new interface */
2670 nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
2671 }
2672
2673 for (i = 0; i < nthrs; i++) {
2674 long id;
2675 char name[20];
2676
2677 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
2678 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
2679 KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
2680 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
2681 if (rc == 0)
2682 continue;
2683
2684 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2685 sched->ibs_cpt, sched->ibs_nthreads + i, rc);
2686 break;
2687 }
2688
2689 sched->ibs_nthreads += i;
2690 return rc;
2691 }
2692
2693 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
2694 int ncpts)
2695 {
2696 int cpt;
2697 int rc;
2698 int i;
2699
2700 for (i = 0; i < ncpts; i++) {
2701 struct kib_sched_info *sched;
2702
2703 cpt = !cpts ? i : cpts[i];
2704 sched = kiblnd_data.kib_scheds[cpt];
2705
2706 if (!newdev && sched->ibs_nthreads > 0)
2707 continue;
2708
2709 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
2710 if (rc != 0) {
2711 CERROR("Failed to start scheduler threads for %s\n",
2712 dev->ibd_ifname);
2713 return rc;
2714 }
2715 }
2716 return 0;
2717 }
2718
2719 static kib_dev_t *kiblnd_dev_search(char *ifname)
2720 {
2721 kib_dev_t *alias = NULL;
2722 kib_dev_t *dev;
2723 char *colon;
2724 char *colon2;
2725
2726 colon = strchr(ifname, ':');
2727 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
2728 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2729 return dev;
2730
2731 if (alias)
2732 continue;
2733
2734 colon2 = strchr(dev->ibd_ifname, ':');
2735 if (colon)
2736 *colon = 0;
2737 if (colon2)
2738 *colon2 = 0;
2739
2740 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2741 alias = dev;
2742
2743 if (colon)
2744 *colon = ':';
2745 if (colon2)
2746 *colon2 = ':';
2747 }
2748 return alias;
2749 }
2750
2751 int kiblnd_startup(lnet_ni_t *ni)
2752 {
2753 char *ifname;
2754 kib_dev_t *ibdev = NULL;
2755 kib_net_t *net;
2756 struct timespec64 tv;
2757 unsigned long flags;
2758 int rc;
2759 int newdev;
2760
2761 LASSERT(ni->ni_lnd == &the_o2iblnd);
2762
2763 if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
2764 rc = kiblnd_base_startup();
2765 if (rc != 0)
2766 return rc;
2767 }
2768
2769 LIBCFS_ALLOC(net, sizeof(*net));
2770 ni->ni_data = net;
2771 if (!net)
2772 goto net_failed;
2773
2774 ktime_get_real_ts64(&tv);
2775 net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
2776 tv.tv_nsec / NSEC_PER_USEC;
2777
2778 ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
2779 ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
2780 ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
2781 ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
2782
2783 if (ni->ni_interfaces[0]) {
2784 /* Use the IPoIB interface specified in 'networks=' */
2785
2786 CLASSERT(LNET_MAX_INTERFACES > 1);
2787 if (ni->ni_interfaces[1]) {
2788 CERROR("Multiple interfaces not supported\n");
2789 goto failed;
2790 }
2791
2792 ifname = ni->ni_interfaces[0];
2793 } else {
2794 ifname = *kiblnd_tunables.kib_default_ipif;
2795 }
2796
2797 if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
2798 CERROR("IPoIB interface name too long: %s\n", ifname);
2799 goto failed;
2800 }
2801
2802 ibdev = kiblnd_dev_search(ifname);
2803
2804 newdev = !ibdev;
2805 /* hmm...create kib_dev even for alias */
2806 if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
2807 ibdev = kiblnd_create_dev(ifname);
2808
2809 if (!ibdev)
2810 goto failed;
2811
2812 net->ibn_dev = ibdev;
2813 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
2814
2815 rc = kiblnd_dev_start_threads(ibdev, newdev,
2816 ni->ni_cpts, ni->ni_ncpts);
2817 if (rc != 0)
2818 goto failed;
2819
2820 rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
2821 if (rc != 0) {
2822 CERROR("Failed to initialize NI pools: %d\n", rc);
2823 goto failed;
2824 }
2825
2826 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2827 ibdev->ibd_nnets++;
2828 list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
2829 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2830
2831 net->ibn_init = IBLND_INIT_ALL;
2832
2833 return 0;
2834
2835 failed:
2836 if (!net->ibn_dev && ibdev)
2837 kiblnd_destroy_dev(ibdev);
2838
2839 net_failed:
2840 kiblnd_shutdown(ni);
2841
2842 CDEBUG(D_NET, "kiblnd_startup failed\n");
2843 return -ENETDOWN;
2844 }
2845
2846 static void __exit kiblnd_module_fini(void)
2847 {
2848 lnet_unregister_lnd(&the_o2iblnd);
2849 }
2850
2851 static int __init kiblnd_module_init(void)
2852 {
2853 int rc;
2854
2855 CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
2856 CLASSERT(offsetof(kib_msg_t,
2857 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
2858 <= IBLND_MSG_SIZE);
2859 CLASSERT(offsetof(kib_msg_t,
2860 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
2861 <= IBLND_MSG_SIZE);
2862
2863 rc = kiblnd_tunables_init();
2864 if (rc != 0)
2865 return rc;
2866
2867 lnet_register_lnd(&the_o2iblnd);
2868
2869 return 0;
2870 }
2871
2872 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2873 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
2874 MODULE_LICENSE("GPL");
2875
2876 module_init(kiblnd_module_init);
2877 module_exit(kiblnd_module_fini);
This page took 0.092889 seconds and 6 git commands to generate.