staging: lustre: fix all NULL comparisons in LNet layer
[deliverable/linux.git] / drivers / staging / lustre / lnet / lnet / lib-move.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/lnet/lib-move.c
37 *
38 * Data movement routines
39 */
40
41#define DEBUG_SUBSYSTEM S_LNET
42
9fdaf8c0 43#include "../../include/linux/lnet/lib-lnet.h"
d7e09d03
PT
44
45static int local_nid_dist_zero = 1;
8cc7b4b9
PT
46module_param(local_nid_dist_zero, int, 0444);
47MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
d7e09d03
PT
48
49int
af66a6e2 50lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
d7e09d03 51{
7e7ab095
MS
52 lnet_test_peer_t *tp;
53 struct list_head *el;
54 struct list_head *next;
55 struct list_head cull;
d7e09d03 56
af66a6e2 57 LASSERT(the_lnet.ln_init);
d7e09d03
PT
58
59 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
60 if (threshold != 0) {
61 /* Adding a new entry */
62 LIBCFS_ALLOC(tp, sizeof(*tp));
06ace26e 63 if (!tp)
d7e09d03
PT
64 return -ENOMEM;
65
66 tp->tp_nid = nid;
67 tp->tp_threshold = threshold;
68
69 lnet_net_lock(0);
70 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
71 lnet_net_unlock(0);
72 return 0;
73 }
74
75 /* removing entries */
76 INIT_LIST_HEAD(&cull);
77
78 lnet_net_lock(0);
79
af66a6e2
LN
80 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
81 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03
PT
82
83 if (tp->tp_threshold == 0 || /* needs culling anyway */
84 nid == LNET_NID_ANY || /* removing all entries */
9b79ca85 85 tp->tp_nid == nid) { /* matched this one */
af66a6e2
LN
86 list_del(&tp->tp_list);
87 list_add(&tp->tp_list, &cull);
d7e09d03
PT
88 }
89 }
90
91 lnet_net_unlock(0);
92
af66a6e2
LN
93 while (!list_empty(&cull)) {
94 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
d7e09d03 95
af66a6e2
LN
96 list_del(&tp->tp_list);
97 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
98 }
99 return 0;
100}
101
102static int
af66a6e2 103fail_peer(lnet_nid_t nid, int outgoing)
d7e09d03
PT
104{
105 lnet_test_peer_t *tp;
7e7ab095
MS
106 struct list_head *el;
107 struct list_head *next;
108 struct list_head cull;
109 int fail = 0;
d7e09d03 110
af66a6e2 111 INIT_LIST_HEAD(&cull);
d7e09d03
PT
112
113 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
114 lnet_net_lock(0);
115
af66a6e2
LN
116 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
117 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03
PT
118
119 if (tp->tp_threshold == 0) {
120 /* zombie entry */
121 if (outgoing) {
4420cfd3
JS
122 /*
123 * only cull zombies on outgoing tests,
d7e09d03 124 * since we may be at interrupt priority on
4420cfd3
JS
125 * incoming messages.
126 */
af66a6e2
LN
127 list_del(&tp->tp_list);
128 list_add(&tp->tp_list, &cull);
d7e09d03
PT
129 }
130 continue;
131 }
132
133 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
134 nid == tp->tp_nid) { /* fail this peer */
135 fail = 1;
136
137 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
138 tp->tp_threshold--;
139 if (outgoing &&
140 tp->tp_threshold == 0) {
141 /* see above */
af66a6e2
LN
142 list_del(&tp->tp_list);
143 list_add(&tp->tp_list, &cull);
d7e09d03
PT
144 }
145 }
146 break;
147 }
148 }
149
150 lnet_net_unlock(0);
151
af66a6e2
LN
152 while (!list_empty(&cull)) {
153 tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
154 list_del(&tp->tp_list);
d7e09d03 155
af66a6e2 156 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
157 }
158
2b5f2e44 159 return fail;
d7e09d03
PT
160}
161
162unsigned int
f351bad2 163lnet_iov_nob(unsigned int niov, struct kvec *iov)
d7e09d03
PT
164{
165 unsigned int nob = 0;
166
167 while (niov-- > 0)
168 nob += (iov++)->iov_len;
169
2b5f2e44 170 return nob;
d7e09d03
PT
171}
172EXPORT_SYMBOL(lnet_iov_nob);
173
174void
f351bad2 175lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
c314c319
JS
176 unsigned int nsiov, struct kvec *siov, unsigned int soffset,
177 unsigned int nob)
d7e09d03
PT
178{
179 /* NB diov, siov are READ-ONLY */
7e7ab095 180 unsigned int this_nob;
d7e09d03
PT
181
182 if (nob == 0)
183 return;
184
185 /* skip complete frags before 'doffset' */
af66a6e2 186 LASSERT(ndiov > 0);
d7e09d03
PT
187 while (doffset >= diov->iov_len) {
188 doffset -= diov->iov_len;
189 diov++;
190 ndiov--;
af66a6e2 191 LASSERT(ndiov > 0);
d7e09d03
PT
192 }
193
194 /* skip complete frags before 'soffset' */
af66a6e2 195 LASSERT(nsiov > 0);
d7e09d03
PT
196 while (soffset >= siov->iov_len) {
197 soffset -= siov->iov_len;
198 siov++;
199 nsiov--;
af66a6e2 200 LASSERT(nsiov > 0);
d7e09d03
PT
201 }
202
203 do {
af66a6e2
LN
204 LASSERT(ndiov > 0);
205 LASSERT(nsiov > 0);
0c575417 206 this_nob = min(diov->iov_len - doffset,
d7e09d03 207 siov->iov_len - soffset);
0c575417 208 this_nob = min(this_nob, nob);
d7e09d03 209
af66a6e2 210 memcpy((char *)diov->iov_base + doffset,
c314c319 211 (char *)siov->iov_base + soffset, this_nob);
d7e09d03
PT
212 nob -= this_nob;
213
214 if (diov->iov_len > doffset + this_nob) {
215 doffset += this_nob;
216 } else {
217 diov++;
218 ndiov--;
219 doffset = 0;
220 }
221
222 if (siov->iov_len > soffset + this_nob) {
223 soffset += this_nob;
224 } else {
225 siov++;
226 nsiov--;
227 soffset = 0;
228 }
229 } while (nob > 0);
230}
231EXPORT_SYMBOL(lnet_copy_iov2iov);
232
233int
f351bad2 234lnet_extract_iov(int dst_niov, struct kvec *dst,
c314c319
JS
235 int src_niov, struct kvec *src,
236 unsigned int offset, unsigned int len)
d7e09d03 237{
4420cfd3
JS
238 /*
239 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 240 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
241 * NB not destructive to 'src'
242 */
7e7ab095
MS
243 unsigned int frag_len;
244 unsigned int niov;
d7e09d03
PT
245
246 if (len == 0) /* no data => */
2b5f2e44 247 return 0; /* no frags */
d7e09d03 248
af66a6e2 249 LASSERT(src_niov > 0);
d7e09d03
PT
250 while (offset >= src->iov_len) { /* skip initial frags */
251 offset -= src->iov_len;
252 src_niov--;
253 src++;
af66a6e2 254 LASSERT(src_niov > 0);
d7e09d03
PT
255 }
256
257 niov = 1;
258 for (;;) {
af66a6e2
LN
259 LASSERT(src_niov > 0);
260 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
261
262 frag_len = src->iov_len - offset;
263 dst->iov_base = ((char *)src->iov_base) + offset;
264
265 if (len <= frag_len) {
266 dst->iov_len = len;
2b5f2e44 267 return niov;
d7e09d03
PT
268 }
269
270 dst->iov_len = frag_len;
271
272 len -= frag_len;
273 dst++;
274 src++;
275 niov++;
276 src_niov--;
277 offset = 0;
278 }
279}
280EXPORT_SYMBOL(lnet_extract_iov);
281
d7e09d03 282unsigned int
af66a6e2 283lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
d7e09d03 284{
7e7ab095 285 unsigned int nob = 0;
d7e09d03
PT
286
287 while (niov-- > 0)
288 nob += (kiov++)->kiov_len;
289
2b5f2e44 290 return nob;
d7e09d03
PT
291}
292EXPORT_SYMBOL(lnet_kiov_nob);
293
294void
af66a6e2 295lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
ae4003f0
LN
296 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
297 unsigned int nob)
d7e09d03
PT
298{
299 /* NB diov, siov are READ-ONLY */
7e7ab095
MS
300 unsigned int this_nob;
301 char *daddr = NULL;
302 char *saddr = NULL;
d7e09d03
PT
303
304 if (nob == 0)
305 return;
306
af66a6e2 307 LASSERT(!in_interrupt());
d7e09d03 308
af66a6e2 309 LASSERT(ndiov > 0);
d7e09d03
PT
310 while (doffset >= diov->kiov_len) {
311 doffset -= diov->kiov_len;
312 diov++;
313 ndiov--;
af66a6e2 314 LASSERT(ndiov > 0);
d7e09d03
PT
315 }
316
af66a6e2 317 LASSERT(nsiov > 0);
d7e09d03
PT
318 while (soffset >= siov->kiov_len) {
319 soffset -= siov->kiov_len;
320 siov++;
321 nsiov--;
af66a6e2 322 LASSERT(nsiov > 0);
d7e09d03
PT
323 }
324
325 do {
af66a6e2
LN
326 LASSERT(ndiov > 0);
327 LASSERT(nsiov > 0);
0c575417 328 this_nob = min(diov->kiov_len - doffset,
d7e09d03 329 siov->kiov_len - soffset);
0c575417 330 this_nob = min(this_nob, nob);
d7e09d03 331
06ace26e 332 if (!daddr)
d7e09d03
PT
333 daddr = ((char *)kmap(diov->kiov_page)) +
334 diov->kiov_offset + doffset;
06ace26e 335 if (!saddr)
d7e09d03
PT
336 saddr = ((char *)kmap(siov->kiov_page)) +
337 siov->kiov_offset + soffset;
338
4420cfd3
JS
339 /*
340 * Vanishing risk of kmap deadlock when mapping 2 pages.
d7e09d03 341 * However in practice at least one of the kiovs will be mapped
4420cfd3
JS
342 * kernel pages and the map/unmap will be NOOPs
343 */
af66a6e2 344 memcpy(daddr, saddr, this_nob);
d7e09d03
PT
345 nob -= this_nob;
346
347 if (diov->kiov_len > doffset + this_nob) {
348 daddr += this_nob;
349 doffset += this_nob;
350 } else {
351 kunmap(diov->kiov_page);
352 daddr = NULL;
353 diov++;
354 ndiov--;
355 doffset = 0;
356 }
357
358 if (siov->kiov_len > soffset + this_nob) {
359 saddr += this_nob;
360 soffset += this_nob;
361 } else {
362 kunmap(siov->kiov_page);
363 saddr = NULL;
364 siov++;
365 nsiov--;
366 soffset = 0;
367 }
368 } while (nob > 0);
369
06ace26e 370 if (daddr)
d7e09d03 371 kunmap(diov->kiov_page);
06ace26e 372 if (saddr)
d7e09d03
PT
373 kunmap(siov->kiov_page);
374}
375EXPORT_SYMBOL(lnet_copy_kiov2kiov);
376
377void
f351bad2 378lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
ae4003f0
LN
379 unsigned int nkiov, lnet_kiov_t *kiov,
380 unsigned int kiovoffset, unsigned int nob)
d7e09d03
PT
381{
382 /* NB iov, kiov are READ-ONLY */
7e7ab095
MS
383 unsigned int this_nob;
384 char *addr = NULL;
d7e09d03
PT
385
386 if (nob == 0)
387 return;
388
af66a6e2 389 LASSERT(!in_interrupt());
d7e09d03 390
af66a6e2 391 LASSERT(niov > 0);
d7e09d03
PT
392 while (iovoffset >= iov->iov_len) {
393 iovoffset -= iov->iov_len;
394 iov++;
395 niov--;
af66a6e2 396 LASSERT(niov > 0);
d7e09d03
PT
397 }
398
af66a6e2 399 LASSERT(nkiov > 0);
d7e09d03
PT
400 while (kiovoffset >= kiov->kiov_len) {
401 kiovoffset -= kiov->kiov_len;
402 kiov++;
403 nkiov--;
af66a6e2 404 LASSERT(nkiov > 0);
d7e09d03
PT
405 }
406
407 do {
af66a6e2
LN
408 LASSERT(niov > 0);
409 LASSERT(nkiov > 0);
fce6ad22
JM
410 this_nob = min(iov->iov_len - iovoffset,
411 (__kernel_size_t) kiov->kiov_len - kiovoffset);
0c575417 412 this_nob = min(this_nob, nob);
d7e09d03 413
06ace26e 414 if (!addr)
d7e09d03
PT
415 addr = ((char *)kmap(kiov->kiov_page)) +
416 kiov->kiov_offset + kiovoffset;
417
af66a6e2 418 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
d7e09d03
PT
419 nob -= this_nob;
420
421 if (iov->iov_len > iovoffset + this_nob) {
422 iovoffset += this_nob;
423 } else {
424 iov++;
425 niov--;
426 iovoffset = 0;
427 }
428
429 if (kiov->kiov_len > kiovoffset + this_nob) {
430 addr += this_nob;
431 kiovoffset += this_nob;
432 } else {
433 kunmap(kiov->kiov_page);
434 addr = NULL;
435 kiov++;
436 nkiov--;
437 kiovoffset = 0;
438 }
439
440 } while (nob > 0);
441
06ace26e 442 if (addr)
d7e09d03
PT
443 kunmap(kiov->kiov_page);
444}
445EXPORT_SYMBOL(lnet_copy_kiov2iov);
446
447void
ae4003f0
LN
448lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
449 unsigned int kiovoffset, unsigned int niov,
f351bad2 450 struct kvec *iov, unsigned int iovoffset,
ae4003f0 451 unsigned int nob)
d7e09d03
PT
452{
453 /* NB kiov, iov are READ-ONLY */
7e7ab095
MS
454 unsigned int this_nob;
455 char *addr = NULL;
d7e09d03
PT
456
457 if (nob == 0)
458 return;
459
af66a6e2 460 LASSERT(!in_interrupt());
d7e09d03 461
af66a6e2 462 LASSERT(nkiov > 0);
d7e09d03
PT
463 while (kiovoffset >= kiov->kiov_len) {
464 kiovoffset -= kiov->kiov_len;
465 kiov++;
466 nkiov--;
af66a6e2 467 LASSERT(nkiov > 0);
d7e09d03
PT
468 }
469
af66a6e2 470 LASSERT(niov > 0);
d7e09d03
PT
471 while (iovoffset >= iov->iov_len) {
472 iovoffset -= iov->iov_len;
473 iov++;
474 niov--;
af66a6e2 475 LASSERT(niov > 0);
d7e09d03
PT
476 }
477
478 do {
af66a6e2
LN
479 LASSERT(nkiov > 0);
480 LASSERT(niov > 0);
fce6ad22 481 this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
d7e09d03 482 iov->iov_len - iovoffset);
0c575417 483 this_nob = min(this_nob, nob);
d7e09d03 484
06ace26e 485 if (!addr)
d7e09d03
PT
486 addr = ((char *)kmap(kiov->kiov_page)) +
487 kiov->kiov_offset + kiovoffset;
488
af66a6e2 489 memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
d7e09d03
PT
490 nob -= this_nob;
491
492 if (kiov->kiov_len > kiovoffset + this_nob) {
493 addr += this_nob;
494 kiovoffset += this_nob;
495 } else {
496 kunmap(kiov->kiov_page);
497 addr = NULL;
498 kiov++;
499 nkiov--;
500 kiovoffset = 0;
501 }
502
503 if (iov->iov_len > iovoffset + this_nob) {
504 iovoffset += this_nob;
505 } else {
506 iov++;
507 niov--;
508 iovoffset = 0;
509 }
510 } while (nob > 0);
511
06ace26e 512 if (addr)
d7e09d03
PT
513 kunmap(kiov->kiov_page);
514}
515EXPORT_SYMBOL(lnet_copy_iov2kiov);
516
517int
af66a6e2 518lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
c314c319
JS
519 int src_niov, lnet_kiov_t *src,
520 unsigned int offset, unsigned int len)
d7e09d03 521{
4420cfd3
JS
522 /*
523 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 524 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
525 * NB not destructive to 'src'
526 */
7e7ab095
MS
527 unsigned int frag_len;
528 unsigned int niov;
d7e09d03
PT
529
530 if (len == 0) /* no data => */
2b5f2e44 531 return 0; /* no frags */
d7e09d03 532
af66a6e2 533 LASSERT(src_niov > 0);
d7e09d03
PT
534 while (offset >= src->kiov_len) { /* skip initial frags */
535 offset -= src->kiov_len;
536 src_niov--;
537 src++;
af66a6e2 538 LASSERT(src_niov > 0);
d7e09d03
PT
539 }
540
541 niov = 1;
542 for (;;) {
af66a6e2
LN
543 LASSERT(src_niov > 0);
544 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
545
546 frag_len = src->kiov_len - offset;
547 dst->kiov_page = src->kiov_page;
548 dst->kiov_offset = src->kiov_offset + offset;
549
550 if (len <= frag_len) {
551 dst->kiov_len = len;
ae4003f0 552 LASSERT(dst->kiov_offset + dst->kiov_len
c314c319 553 <= PAGE_CACHE_SIZE);
2b5f2e44 554 return niov;
d7e09d03
PT
555 }
556
557 dst->kiov_len = frag_len;
af66a6e2 558 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
d7e09d03
PT
559
560 len -= frag_len;
561 dst++;
562 src++;
563 niov++;
564 src_niov--;
565 offset = 0;
566 }
567}
568EXPORT_SYMBOL(lnet_extract_kiov);
569
f526b20a 570static void
d7e09d03
PT
571lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
572 unsigned int offset, unsigned int mlen, unsigned int rlen)
573{
7e7ab095 574 unsigned int niov = 0;
f351bad2 575 struct kvec *iov = NULL;
7e7ab095
MS
576 lnet_kiov_t *kiov = NULL;
577 int rc;
d7e09d03 578
af66a6e2 579 LASSERT(!in_interrupt());
06ace26e 580 LASSERT(mlen == 0 || msg);
d7e09d03 581
06ace26e 582 if (msg) {
d7e09d03
PT
583 LASSERT(msg->msg_receiving);
584 LASSERT(!msg->msg_sending);
585 LASSERT(rlen == msg->msg_len);
586 LASSERT(mlen <= msg->msg_len);
587 LASSERT(msg->msg_offset == offset);
588 LASSERT(msg->msg_wanted == mlen);
589
590 msg->msg_receiving = 0;
591
592 if (mlen != 0) {
593 niov = msg->msg_niov;
594 iov = msg->msg_iov;
595 kiov = msg->msg_kiov;
596
af66a6e2 597 LASSERT(niov > 0);
06ace26e 598 LASSERT(!iov != !kiov);
d7e09d03
PT
599 }
600 }
601
0eee6778
JS
602 rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
603 niov, iov, kiov, offset, mlen, rlen);
d7e09d03
PT
604 if (rc < 0)
605 lnet_finalize(ni, msg, rc);
606}
607
f526b20a 608static void
d7e09d03
PT
609lnet_setpayloadbuffer(lnet_msg_t *msg)
610{
611 lnet_libmd_t *md = msg->msg_md;
612
af66a6e2
LN
613 LASSERT(msg->msg_len > 0);
614 LASSERT(!msg->msg_routing);
06ace26e 615 LASSERT(md);
af66a6e2 616 LASSERT(msg->msg_niov == 0);
06ace26e
JS
617 LASSERT(!msg->msg_iov);
618 LASSERT(!msg->msg_kiov);
d7e09d03
PT
619
620 msg->msg_niov = md->md_niov;
621 if ((md->md_options & LNET_MD_KIOV) != 0)
622 msg->msg_kiov = md->md_iov.kiov;
623 else
624 msg->msg_iov = md->md_iov.iov;
625}
626
627void
628lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
629 unsigned int offset, unsigned int len)
630{
631 msg->msg_type = type;
632 msg->msg_target = target;
633 msg->msg_len = len;
634 msg->msg_offset = offset;
635
636 if (len != 0)
637 lnet_setpayloadbuffer(msg);
638
af66a6e2 639 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
d7e09d03
PT
640 msg->msg_hdr.type = cpu_to_le32(type);
641 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
642 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
643 /* src_nid will be set later */
644 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
645 msg->msg_hdr.payload_length = cpu_to_le32(len);
646}
647
f526b20a 648static void
d7e09d03
PT
649lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
650{
7e7ab095
MS
651 void *priv = msg->msg_private;
652 int rc;
d7e09d03 653
af66a6e2
LN
654 LASSERT(!in_interrupt());
655 LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
c314c319 656 (msg->msg_txcredit && msg->msg_peertxcredit));
d7e09d03 657
0eee6778 658 rc = ni->ni_lnd->lnd_send(ni, priv, msg);
d7e09d03
PT
659 if (rc < 0)
660 lnet_finalize(ni, msg, rc);
661}
662
f526b20a 663static int
d7e09d03
PT
664lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
665{
7e7ab095 666 int rc;
d7e09d03
PT
667
668 LASSERT(!msg->msg_sending);
669 LASSERT(msg->msg_receiving);
670 LASSERT(!msg->msg_rx_ready_delay);
06ace26e 671 LASSERT(ni->ni_lnd->lnd_eager_recv);
d7e09d03
PT
672
673 msg->msg_rx_ready_delay = 1;
0eee6778
JS
674 rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
675 &msg->msg_private);
d7e09d03 676 if (rc != 0) {
2d00bd17 677 CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
d7e09d03
PT
678 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
679 libcfs_id2str(msg->msg_target), rc);
680 LASSERT(rc < 0); /* required by my callers */
681 }
682
683 return rc;
684}
685
686/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
f526b20a 687static void
d7e09d03
PT
688lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
689{
a649ad1d 690 unsigned long last_alive = 0;
d7e09d03
PT
691
692 LASSERT(lnet_peer_aliveness_enabled(lp));
06ace26e 693 LASSERT(ni->ni_lnd->lnd_query);
d7e09d03
PT
694
695 lnet_net_unlock(lp->lp_cpt);
0eee6778 696 ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
d7e09d03
PT
697 lnet_net_lock(lp->lp_cpt);
698
699 lp->lp_last_query = cfs_time_current();
700
701 if (last_alive != 0) /* NI has updated timestamp */
702 lp->lp_last_alive = last_alive;
703}
704
705/* NB: always called with lnet_net_lock held */
706static inline int
a649ad1d 707lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
d7e09d03 708{
7e7ab095 709 int alive;
a649ad1d 710 unsigned long deadline;
d7e09d03 711
af66a6e2 712 LASSERT(lnet_peer_aliveness_enabled(lp));
d7e09d03
PT
713
714 /* Trust lnet_notify() if it has more recent aliveness news, but
715 * ignore the initial assumed death (see lnet_peers_start_down()).
716 */
717 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
718 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
719 return 0;
720
721 deadline = cfs_time_add(lp->lp_last_alive,
722 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
723 alive = cfs_time_after(deadline, now);
724
725 /* Update obsolete lp_alive except for routers assumed to be dead
726 * initially, because router checker would update aliveness in this
727 * case, and moreover lp_last_alive at peer creation is assumed.
728 */
729 if (alive && !lp->lp_alive &&
730 !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
731 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
732
733 return alive;
734}
735
4420cfd3
JS
736/*
737 * NB: returns 1 when alive, 0 when dead, negative when error;
738 * may drop the lnet_net_lock
739 */
f526b20a 740static int
af66a6e2 741lnet_peer_alive_locked(lnet_peer_t *lp)
d7e09d03 742{
a649ad1d 743 unsigned long now = cfs_time_current();
d7e09d03
PT
744
745 if (!lnet_peer_aliveness_enabled(lp))
746 return -ENODEV;
747
748 if (lnet_peer_is_alive(lp, now))
749 return 1;
750
4420cfd3
JS
751 /*
752 * Peer appears dead, but we should avoid frequent NI queries (at
753 * most once per lnet_queryinterval seconds).
754 */
d7e09d03
PT
755 if (lp->lp_last_query != 0) {
756 static const int lnet_queryinterval = 1;
757
a649ad1d 758 unsigned long next_query =
d7e09d03
PT
759 cfs_time_add(lp->lp_last_query,
760 cfs_time_seconds(lnet_queryinterval));
761
699503bc 762 if (time_before(now, next_query)) {
d7e09d03 763 if (lp->lp_alive)
2d00bd17 764 CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
d7e09d03
PT
765 libcfs_nid2str(lp->lp_nid),
766 (int)now, (int)next_query,
767 lnet_queryinterval,
768 lp->lp_ni->ni_peertimeout);
769 return 0;
770 }
771 }
772
773 /* query NI for latest aliveness news */
774 lnet_ni_query_locked(lp->lp_ni, lp);
775
776 if (lnet_peer_is_alive(lp, now))
777 return 1;
778
779 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
780 return 0;
781}
782
dee2857e
IH
783/**
784 * \param msg The message to be sent.
785 * \param do_send True if lnet_ni_send() should be called in this function.
786 * lnet_send() is going to lnet_net_unlock immediately after this, so
787 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
788 *
789 * \retval 0 If \a msg sent or OK to send.
790 * \retval EAGAIN If \a msg blocked for credit.
791 * \retval EHOSTUNREACH If the next hop of the message appears dead.
792 * \retval ECANCELED If the MD of the message has been unlinked.
793 */
794static int
d7e09d03
PT
795lnet_post_send_locked(lnet_msg_t *msg, int do_send)
796{
7e7ab095
MS
797 lnet_peer_t *lp = msg->msg_txpeer;
798 lnet_ni_t *ni = lp->lp_ni;
799 int cpt = msg->msg_tx_cpt;
800 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
d7e09d03
PT
801
802 /* non-lnet_send() callers have checked before */
803 LASSERT(!do_send || msg->msg_tx_delayed);
804 LASSERT(!msg->msg_receiving);
805 LASSERT(msg->msg_tx_committed);
806
d7e09d03
PT
807 /* NB 'lp' is always the next hop */
808 if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
809 lnet_peer_alive_locked(lp) == 0) {
810 the_lnet.ln_counters[cpt]->drop_count++;
811 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
812 lnet_net_unlock(cpt);
813
814 CNETERR("Dropping message for %s: peer not alive\n",
815 libcfs_id2str(msg->msg_target));
816 if (do_send)
817 lnet_finalize(ni, msg, -EHOSTUNREACH);
818
819 lnet_net_lock(cpt);
820 return EHOSTUNREACH;
821 }
822
06ace26e 823 if (msg->msg_md &&
dee2857e
IH
824 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
825 lnet_net_unlock(cpt);
826
2d00bd17 827 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
dee2857e
IH
828 libcfs_id2str(msg->msg_target));
829 if (do_send)
830 lnet_finalize(ni, msg, -ECANCELED);
831
832 lnet_net_lock(cpt);
833 return ECANCELED;
834 }
835
d7e09d03 836 if (!msg->msg_peertxcredit) {
af66a6e2 837 LASSERT((lp->lp_txcredits < 0) ==
c314c319 838 !list_empty(&lp->lp_txq));
d7e09d03
PT
839
840 msg->msg_peertxcredit = 1;
841 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
842 lp->lp_txcredits--;
843
844 if (lp->lp_txcredits < lp->lp_mintxcredits)
845 lp->lp_mintxcredits = lp->lp_txcredits;
846
847 if (lp->lp_txcredits < 0) {
848 msg->msg_tx_delayed = 1;
849 list_add_tail(&msg->msg_list, &lp->lp_txq);
850 return EAGAIN;
851 }
852 }
853
854 if (!msg->msg_txcredit) {
855 LASSERT((tq->tq_credits < 0) ==
856 !list_empty(&tq->tq_delayed));
857
858 msg->msg_txcredit = 1;
859 tq->tq_credits--;
860
861 if (tq->tq_credits < tq->tq_credits_min)
862 tq->tq_credits_min = tq->tq_credits;
863
864 if (tq->tq_credits < 0) {
865 msg->msg_tx_delayed = 1;
866 list_add_tail(&msg->msg_list, &tq->tq_delayed);
867 return EAGAIN;
868 }
869 }
870
871 if (do_send) {
872 lnet_net_unlock(cpt);
873 lnet_ni_send(ni, msg);
874 lnet_net_lock(cpt);
875 }
876 return 0;
877}
878
f526b20a 879static lnet_rtrbufpool_t *
d7e09d03
PT
880lnet_msg2bufpool(lnet_msg_t *msg)
881{
7e7ab095
MS
882 lnet_rtrbufpool_t *rbp;
883 int cpt;
d7e09d03
PT
884
885 LASSERT(msg->msg_rx_committed);
886
887 cpt = msg->msg_rx_cpt;
888 rbp = &the_lnet.ln_rtrpools[cpt][0];
889
890 LASSERT(msg->msg_len <= LNET_MTU);
891 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
892 rbp++;
893 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
894 }
895
896 return rbp;
897}
898
f526b20a 899static int
af66a6e2 900lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
d7e09d03 901{
4420cfd3
JS
902 /*
903 * lnet_parse is going to lnet_net_unlock immediately after this, so it
d7e09d03 904 * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
4420cfd3
JS
905 * return EAGAIN if msg blocked and 0 if received or OK to receive
906 */
7e7ab095
MS
907 lnet_peer_t *lp = msg->msg_rxpeer;
908 lnet_rtrbufpool_t *rbp;
909 lnet_rtrbuf_t *rb;
d7e09d03 910
06ace26e
JS
911 LASSERT(!msg->msg_iov);
912 LASSERT(!msg->msg_kiov);
af66a6e2
LN
913 LASSERT(msg->msg_niov == 0);
914 LASSERT(msg->msg_routing);
915 LASSERT(msg->msg_receiving);
916 LASSERT(!msg->msg_sending);
d7e09d03
PT
917
918 /* non-lnet_parse callers only receive delayed messages */
919 LASSERT(!do_recv || msg->msg_rx_delayed);
920
921 if (!msg->msg_peerrtrcredit) {
af66a6e2 922 LASSERT((lp->lp_rtrcredits < 0) ==
c314c319 923 !list_empty(&lp->lp_rtrq));
d7e09d03
PT
924
925 msg->msg_peerrtrcredit = 1;
926 lp->lp_rtrcredits--;
927 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
928 lp->lp_minrtrcredits = lp->lp_rtrcredits;
929
930 if (lp->lp_rtrcredits < 0) {
931 /* must have checked eager_recv before here */
932 LASSERT(msg->msg_rx_ready_delay);
933 msg->msg_rx_delayed = 1;
934 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
935 return EAGAIN;
936 }
937 }
938
939 rbp = lnet_msg2bufpool(msg);
940
941 if (!msg->msg_rtrcredit) {
af66a6e2 942 LASSERT((rbp->rbp_credits < 0) ==
d7e09d03
PT
943 !list_empty(&rbp->rbp_msgs));
944
945 msg->msg_rtrcredit = 1;
946 rbp->rbp_credits--;
947 if (rbp->rbp_credits < rbp->rbp_mincredits)
948 rbp->rbp_mincredits = rbp->rbp_credits;
949
950 if (rbp->rbp_credits < 0) {
951 /* must have checked eager_recv before here */
952 LASSERT(msg->msg_rx_ready_delay);
953 msg->msg_rx_delayed = 1;
954 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
955 return EAGAIN;
956 }
957 }
958
af66a6e2 959 LASSERT(!list_empty(&rbp->rbp_bufs));
d7e09d03
PT
960 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
961 list_del(&rb->rb_list);
962
963 msg->msg_niov = rbp->rbp_npages;
964 msg->msg_kiov = &rb->rb_kiov[0];
965
966 if (do_recv) {
967 int cpt = msg->msg_rx_cpt;
968
969 lnet_net_unlock(cpt);
970 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
971 0, msg->msg_len, msg->msg_len);
972 lnet_net_lock(cpt);
973 }
974 return 0;
975}
976
977void
978lnet_return_tx_credits_locked(lnet_msg_t *msg)
979{
7e7ab095
MS
980 lnet_peer_t *txpeer = msg->msg_txpeer;
981 lnet_msg_t *msg2;
d7e09d03
PT
982
983 if (msg->msg_txcredit) {
7e7ab095 984 struct lnet_ni *ni = txpeer->lp_ni;
d7e09d03
PT
985 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
986
987 /* give back NI txcredits */
988 msg->msg_txcredit = 0;
989
990 LASSERT((tq->tq_credits < 0) ==
991 !list_empty(&tq->tq_delayed));
992
993 tq->tq_credits++;
994 if (tq->tq_credits <= 0) {
995 msg2 = list_entry(tq->tq_delayed.next,
c314c319 996 lnet_msg_t, msg_list);
d7e09d03
PT
997 list_del(&msg2->msg_list);
998
999 LASSERT(msg2->msg_txpeer->lp_ni == ni);
1000 LASSERT(msg2->msg_tx_delayed);
1001
1002 (void) lnet_post_send_locked(msg2, 1);
1003 }
1004 }
1005
1006 if (msg->msg_peertxcredit) {
1007 /* give back peer txcredits */
1008 msg->msg_peertxcredit = 0;
1009
1010 LASSERT((txpeer->lp_txcredits < 0) ==
1011 !list_empty(&txpeer->lp_txq));
1012
1013 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
af66a6e2 1014 LASSERT(txpeer->lp_txqnob >= 0);
d7e09d03
PT
1015
1016 txpeer->lp_txcredits++;
1017 if (txpeer->lp_txcredits <= 0) {
1018 msg2 = list_entry(txpeer->lp_txq.next,
c314c319 1019 lnet_msg_t, msg_list);
d7e09d03
PT
1020 list_del(&msg2->msg_list);
1021
1022 LASSERT(msg2->msg_txpeer == txpeer);
1023 LASSERT(msg2->msg_tx_delayed);
1024
1025 (void) lnet_post_send_locked(msg2, 1);
1026 }
1027 }
1028
06ace26e 1029 if (txpeer) {
d7e09d03
PT
1030 msg->msg_txpeer = NULL;
1031 lnet_peer_decref_locked(txpeer);
1032 }
1033}
1034
1035void
1036lnet_return_rx_credits_locked(lnet_msg_t *msg)
1037{
7e7ab095
MS
1038 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1039 lnet_msg_t *msg2;
d7e09d03
PT
1040
1041 if (msg->msg_rtrcredit) {
1042 /* give back global router credits */
7e7ab095 1043 lnet_rtrbuf_t *rb;
d7e09d03
PT
1044 lnet_rtrbufpool_t *rbp;
1045
4420cfd3
JS
1046 /*
1047 * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
d7e09d03 1048 * there until it gets one allocated, or aborts the wait
4420cfd3
JS
1049 * itself
1050 */
06ace26e 1051 LASSERT(msg->msg_kiov);
d7e09d03
PT
1052
1053 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1054 rbp = rb->rb_pool;
af66a6e2 1055 LASSERT(rbp == lnet_msg2bufpool(msg));
d7e09d03
PT
1056
1057 msg->msg_kiov = NULL;
1058 msg->msg_rtrcredit = 0;
1059
1060 LASSERT((rbp->rbp_credits < 0) ==
1061 !list_empty(&rbp->rbp_msgs));
1062 LASSERT((rbp->rbp_credits > 0) ==
1063 !list_empty(&rbp->rbp_bufs));
1064
1065 list_add(&rb->rb_list, &rbp->rbp_bufs);
1066 rbp->rbp_credits++;
1067 if (rbp->rbp_credits <= 0) {
1068 msg2 = list_entry(rbp->rbp_msgs.next,
c314c319 1069 lnet_msg_t, msg_list);
d7e09d03
PT
1070 list_del(&msg2->msg_list);
1071
1072 (void) lnet_post_routed_recv_locked(msg2, 1);
1073 }
1074 }
1075
1076 if (msg->msg_peerrtrcredit) {
1077 /* give back peer router credits */
1078 msg->msg_peerrtrcredit = 0;
1079
1080 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1081 !list_empty(&rxpeer->lp_rtrq));
1082
1083 rxpeer->lp_rtrcredits++;
1084 if (rxpeer->lp_rtrcredits <= 0) {
1085 msg2 = list_entry(rxpeer->lp_rtrq.next,
c314c319 1086 lnet_msg_t, msg_list);
d7e09d03
PT
1087 list_del(&msg2->msg_list);
1088
1089 (void) lnet_post_routed_recv_locked(msg2, 1);
1090 }
1091 }
06ace26e 1092 if (rxpeer) {
d7e09d03
PT
1093 msg->msg_rxpeer = NULL;
1094 lnet_peer_decref_locked(rxpeer);
1095 }
1096}
1097
1098static int
1099lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1100{
1101 lnet_peer_t *p1 = r1->lr_gateway;
1102 lnet_peer_t *p2 = r2->lr_gateway;
1103
e75fb87f
DO
1104 if (r1->lr_priority < r2->lr_priority)
1105 return 1;
1106
1107 if (r1->lr_priority > r2->lr_priority)
1108 return -1;
1109
d7e09d03
PT
1110 if (r1->lr_hops < r2->lr_hops)
1111 return 1;
1112
1113 if (r1->lr_hops > r2->lr_hops)
1114 return -1;
1115
1116 if (p1->lp_txqnob < p2->lp_txqnob)
1117 return 1;
1118
1119 if (p1->lp_txqnob > p2->lp_txqnob)
1120 return -1;
1121
1122 if (p1->lp_txcredits > p2->lp_txcredits)
1123 return 1;
1124
1125 if (p1->lp_txcredits < p2->lp_txcredits)
1126 return -1;
1127
1128 if (r1->lr_seq - r2->lr_seq <= 0)
1129 return 1;
1130
1131 return -1;
1132}
1133
1134static lnet_peer_t *
1135lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1136{
7e7ab095
MS
1137 lnet_remotenet_t *rnet;
1138 lnet_route_t *rtr;
1139 lnet_route_t *rtr_best;
1140 lnet_route_t *rtr_last;
1141 struct lnet_peer *lp_best;
1142 struct lnet_peer *lp;
1143 int rc;
d7e09d03 1144
4420cfd3
JS
1145 /*
1146 * If @rtr_nid is not LNET_NID_ANY, return the gateway with
1147 * rtr_nid nid, otherwise find the best gateway I can use
1148 */
d7e09d03 1149 rnet = lnet_find_net_locked(LNET_NIDNET(target));
06ace26e 1150 if (!rnet)
d7e09d03
PT
1151 return NULL;
1152
1153 lp_best = NULL;
d3d3d37a
JS
1154 rtr_best = NULL;
1155 rtr_last = NULL;
d7e09d03
PT
1156 list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
1157 lp = rtr->lr_gateway;
1158
1159 if (!lp->lp_alive || /* gateway is down */
1160 ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
1161 rtr->lr_downis != 0)) /* NI to target is down */
1162 continue;
1163
06ace26e 1164 if (ni && lp->lp_ni != ni)
d7e09d03
PT
1165 continue;
1166
1167 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1168 return lp;
1169
06ace26e 1170 if (!lp_best) {
d3d3d37a
JS
1171 rtr_best = rtr;
1172 rtr_last = rtr;
d7e09d03
PT
1173 lp_best = lp;
1174 continue;
1175 }
1176
1177 /* no protection on below fields, but it's harmless */
1178 if (rtr_last->lr_seq - rtr->lr_seq < 0)
1179 rtr_last = rtr;
1180
1181 rc = lnet_compare_routes(rtr, rtr_best);
1182 if (rc < 0)
1183 continue;
1184
1185 rtr_best = rtr;
1186 lp_best = lp;
1187 }
1188
4420cfd3
JS
1189 /*
1190 * set sequence number on the best router to the latest sequence + 1
d7e09d03 1191 * so we can round-robin all routers, it's race and inaccurate but
4420cfd3
JS
1192 * harmless and functional
1193 */
06ace26e 1194 if (rtr_best)
d7e09d03
PT
1195 rtr_best->lr_seq = rtr_last->lr_seq + 1;
1196 return lp_best;
1197}
1198
1199int
1200lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1201{
7e7ab095
MS
1202 lnet_nid_t dst_nid = msg->msg_target.nid;
1203 struct lnet_ni *src_ni;
1204 struct lnet_ni *local_ni;
1205 struct lnet_peer *lp;
1206 int cpt;
1207 int cpt2;
1208 int rc;
d7e09d03 1209
4420cfd3
JS
1210 /*
1211 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
d7e09d03 1212 * but we might want to use pre-determined router for ACK/REPLY
4420cfd3
JS
1213 * in the future
1214 */
06ace26e
JS
1215 /* NB: ni == interface pre-determined (ACK/REPLY) */
1216 LASSERT(!msg->msg_txpeer);
af66a6e2
LN
1217 LASSERT(!msg->msg_sending);
1218 LASSERT(!msg->msg_target_is_router);
1219 LASSERT(!msg->msg_receiving);
d7e09d03
PT
1220
1221 msg->msg_sending = 1;
1222
1223 LASSERT(!msg->msg_tx_committed);
1224 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1225 again:
1226 lnet_net_lock(cpt);
1227
1228 if (the_lnet.ln_shutdown) {
1229 lnet_net_unlock(cpt);
1230 return -ESHUTDOWN;
1231 }
1232
1233 if (src_nid == LNET_NID_ANY) {
1234 src_ni = NULL;
1235 } else {
1236 src_ni = lnet_nid2ni_locked(src_nid, cpt);
06ace26e 1237 if (!src_ni) {
d7e09d03 1238 lnet_net_unlock(cpt);
2d00bd17
JP
1239 LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
1240 libcfs_nid2str(dst_nid),
d7e09d03
PT
1241 libcfs_nid2str(src_nid));
1242 return -EINVAL;
1243 }
af66a6e2 1244 LASSERT(!msg->msg_routing);
d7e09d03
PT
1245 }
1246
1247 /* Is this for someone on a local network? */
1248 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1249
06ace26e
JS
1250 if (local_ni) {
1251 if (!src_ni) {
d7e09d03
PT
1252 src_ni = local_ni;
1253 src_nid = src_ni->ni_nid;
1254 } else if (src_ni == local_ni) {
1255 lnet_ni_decref_locked(local_ni, cpt);
1256 } else {
1257 lnet_ni_decref_locked(local_ni, cpt);
1258 lnet_ni_decref_locked(src_ni, cpt);
1259 lnet_net_unlock(cpt);
1260 LCONSOLE_WARN("No route to %s via from %s\n",
1261 libcfs_nid2str(dst_nid),
1262 libcfs_nid2str(src_nid));
1263 return -EINVAL;
1264 }
1265
1266 LASSERT(src_nid != LNET_NID_ANY);
1267 lnet_msg_commit(msg, cpt);
1268
1269 if (!msg->msg_routing)
1270 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1271
1272 if (src_ni == the_lnet.ln_loni) {
1273 /* No send credit hassles with LOLND */
1274 lnet_net_unlock(cpt);
1275 lnet_ni_send(src_ni, msg);
1276
1277 lnet_net_lock(cpt);
1278 lnet_ni_decref_locked(src_ni, cpt);
1279 lnet_net_unlock(cpt);
1280 return 0;
1281 }
1282
1283 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1284 /* lp has ref on src_ni; lose mine */
1285 lnet_ni_decref_locked(src_ni, cpt);
1286 if (rc != 0) {
1287 lnet_net_unlock(cpt);
1288 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1289 libcfs_nid2str(dst_nid));
1290 /* ENOMEM or shutting down */
1291 return rc;
1292 }
af66a6e2 1293 LASSERT(lp->lp_ni == src_ni);
d7e09d03
PT
1294 } else {
1295 /* sending to a remote network */
1296 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
06ace26e
JS
1297 if (!lp) {
1298 if (src_ni)
d7e09d03
PT
1299 lnet_ni_decref_locked(src_ni, cpt);
1300 lnet_net_unlock(cpt);
1301
2d00bd17 1302 LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
d7e09d03
PT
1303 libcfs_id2str(msg->msg_target),
1304 libcfs_nid2str(src_nid));
1305 return -EHOSTUNREACH;
1306 }
1307
4420cfd3
JS
1308 /*
1309 * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
d7e09d03
PT
1310 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1311 * pre-determined router, this can happen if router table
4420cfd3
JS
1312 * was changed when we release the lock
1313 */
d7e09d03
PT
1314 if (rtr_nid != lp->lp_nid) {
1315 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1316 if (cpt2 != cpt) {
06ace26e 1317 if (src_ni)
d7e09d03
PT
1318 lnet_ni_decref_locked(src_ni, cpt);
1319 lnet_net_unlock(cpt);
1320
1321 rtr_nid = lp->lp_nid;
1322 cpt = cpt2;
1323 goto again;
1324 }
1325 }
1326
1327 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1328 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1329 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1330
06ace26e 1331 if (!src_ni) {
d7e09d03
PT
1332 src_ni = lp->lp_ni;
1333 src_nid = src_ni->ni_nid;
1334 } else {
af66a6e2 1335 LASSERT(src_ni == lp->lp_ni);
d7e09d03
PT
1336 lnet_ni_decref_locked(src_ni, cpt);
1337 }
1338
1339 lnet_peer_addref_locked(lp);
1340
1341 LASSERT(src_nid != LNET_NID_ANY);
1342 lnet_msg_commit(msg, cpt);
1343
1344 if (!msg->msg_routing) {
1345 /* I'm the source and now I know which NI to send on */
1346 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1347 }
1348
1349 msg->msg_target_is_router = 1;
1350 msg->msg_target.nid = lp->lp_nid;
1351 msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
1352 }
1353
1354 /* 'lp' is our best choice of peer */
1355
af66a6e2
LN
1356 LASSERT(!msg->msg_peertxcredit);
1357 LASSERT(!msg->msg_txcredit);
06ace26e 1358 LASSERT(!msg->msg_txpeer);
d7e09d03
PT
1359
1360 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1361
1362 rc = lnet_post_send_locked(msg, 0);
1363 lnet_net_unlock(cpt);
1364
dee2857e
IH
1365 if (rc == EHOSTUNREACH || rc == ECANCELED)
1366 return -rc;
d7e09d03
PT
1367
1368 if (rc == 0)
1369 lnet_ni_send(src_ni, msg);
1370
dee2857e 1371 return 0; /* rc == 0 or EAGAIN */
d7e09d03
PT
1372}
1373
1374static void
1375lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1376{
1377 lnet_net_lock(cpt);
1378 the_lnet.ln_counters[cpt]->drop_count++;
1379 the_lnet.ln_counters[cpt]->drop_length += nob;
1380 lnet_net_unlock(cpt);
1381
1382 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1383}
1384
1385static void
1386lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1387{
7e7ab095 1388 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03
PT
1389
1390 if (msg->msg_wanted != 0)
1391 lnet_setpayloadbuffer(msg);
1392
1393 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1394
4420cfd3
JS
1395 /*
1396 * Must I ACK? If so I'll grab the ack_wmd out of the header and put
1397 * it back into the ACK during lnet_finalize()
1398 */
d7e09d03
PT
1399 msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1400 (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
1401
1402 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1403 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1404}
1405
1406static int
1407lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1408{
7e7ab095
MS
1409 lnet_hdr_t *hdr = &msg->msg_hdr;
1410 struct lnet_match_info info;
1411 int rc;
d7e09d03
PT
1412
1413 /* Convert put fields to host byte order */
1414 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1415 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1416 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1417
1418 info.mi_id.nid = hdr->src_nid;
1419 info.mi_id.pid = hdr->src_pid;
1420 info.mi_opc = LNET_MD_OP_PUT;
1421 info.mi_portal = hdr->msg.put.ptl_index;
1422 info.mi_rlength = hdr->payload_length;
1423 info.mi_roffset = hdr->msg.put.offset;
1424 info.mi_mbits = hdr->msg.put.match_bits;
1425
06ace26e 1426 msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
d7e09d03
PT
1427
1428 again:
1429 rc = lnet_ptl_match_md(&info, msg);
1430 switch (rc) {
1431 default:
1432 LBUG();
1433
1434 case LNET_MATCHMD_OK:
1435 lnet_recv_put(ni, msg);
1436 return 0;
1437
1438 case LNET_MATCHMD_NONE:
1439 if (msg->msg_rx_delayed) /* attached on delayed list */
1440 return 0;
1441
1442 rc = lnet_ni_eager_recv(ni, msg);
1443 if (rc == 0)
1444 goto again;
1445 /* fall through */
1446
1447 case LNET_MATCHMD_DROP:
b0f5aad5 1448 CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
d7e09d03
PT
1449 libcfs_id2str(info.mi_id), info.mi_portal,
1450 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1451
1452 return ENOENT; /* +ve: OK but no match */
1453 }
1454}
1455
1456static int
1457lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1458{
7e7ab095
MS
1459 struct lnet_match_info info;
1460 lnet_hdr_t *hdr = &msg->msg_hdr;
1461 lnet_handle_wire_t reply_wmd;
1462 int rc;
d7e09d03
PT
1463
1464 /* Convert get fields to host byte order */
7e7ab095
MS
1465 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1466 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1467 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1468 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1469
1470 info.mi_id.nid = hdr->src_nid;
1471 info.mi_id.pid = hdr->src_pid;
1472 info.mi_opc = LNET_MD_OP_GET;
1473 info.mi_portal = hdr->msg.get.ptl_index;
1474 info.mi_rlength = hdr->msg.get.sink_length;
1475 info.mi_roffset = hdr->msg.get.src_offset;
1476 info.mi_mbits = hdr->msg.get.match_bits;
d7e09d03
PT
1477
1478 rc = lnet_ptl_match_md(&info, msg);
1479 if (rc == LNET_MATCHMD_DROP) {
b0f5aad5 1480 CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
d7e09d03
PT
1481 libcfs_id2str(info.mi_id), info.mi_portal,
1482 info.mi_mbits, info.mi_roffset, info.mi_rlength);
1483 return ENOENT; /* +ve: OK but no match */
1484 }
1485
1486 LASSERT(rc == LNET_MATCHMD_OK);
1487
1488 lnet_build_msg_event(msg, LNET_EVENT_GET);
1489
1490 reply_wmd = hdr->msg.get.return_wmd;
1491
1492 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1493 msg->msg_offset, msg->msg_wanted);
1494
1495 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1496
1497 if (rdma_get) {
1498 /* The LND completes the REPLY from her recv procedure */
1499 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1500 msg->msg_offset, msg->msg_len, msg->msg_len);
1501 return 0;
1502 }
1503
1504 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1505 msg->msg_receiving = 0;
1506
1507 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1508 if (rc < 0) {
1509 /* didn't get as far as lnet_ni_send() */
1510 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1511 libcfs_nid2str(ni->ni_nid),
1512 libcfs_id2str(info.mi_id), rc);
1513
1514 lnet_finalize(ni, msg, rc);
1515 }
1516
1517 return 0;
1518}
1519
1520static int
1521lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1522{
7e7ab095
MS
1523 void *private = msg->msg_private;
1524 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1525 lnet_process_id_t src = {0};
7e7ab095
MS
1526 lnet_libmd_t *md;
1527 int rlength;
1528 int mlength;
1529 int cpt;
d7e09d03
PT
1530
1531 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1532 lnet_res_lock(cpt);
1533
1534 src.nid = hdr->src_nid;
1535 src.pid = hdr->src_pid;
1536
1537 /* NB handles only looked up by creator (no flips) */
1538 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
06ace26e 1539 if (!md || md->md_threshold == 0 || md->md_me) {
55f5a824 1540 CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
d7e09d03 1541 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1542 !md ? "invalid" : "inactive",
d7e09d03
PT
1543 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1544 hdr->msg.reply.dst_wmd.wh_object_cookie);
06ace26e 1545 if (md && md->md_me)
d7e09d03
PT
1546 CERROR("REPLY MD also attached to portal %d\n",
1547 md->md_me->me_portal);
1548
1549 lnet_res_unlock(cpt);
1550 return ENOENT; /* +ve: OK but no match */
1551 }
1552
af66a6e2 1553 LASSERT(md->md_offset == 0);
d7e09d03
PT
1554
1555 rlength = hdr->payload_length;
005b23d6 1556 mlength = min_t(uint, rlength, md->md_length);
d7e09d03
PT
1557
1558 if (mlength < rlength &&
1559 (md->md_options & LNET_MD_TRUNCATE) == 0) {
55f5a824 1560 CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
d7e09d03
PT
1561 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1562 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1563 mlength);
1564 lnet_res_unlock(cpt);
1565 return ENOENT; /* +ve: OK but no match */
1566 }
1567
55f5a824 1568 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
d7e09d03
PT
1569 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1570 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1571
1572 lnet_msg_attach_md(msg, md, 0, mlength);
1573
1574 if (mlength != 0)
1575 lnet_setpayloadbuffer(msg);
1576
1577 lnet_res_unlock(cpt);
1578
1579 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1580
1581 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1582 return 0;
1583}
1584
1585static int
1586lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1587{
7e7ab095 1588 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1589 lnet_process_id_t src = {0};
7e7ab095
MS
1590 lnet_libmd_t *md;
1591 int cpt;
d7e09d03
PT
1592
1593 src.nid = hdr->src_nid;
1594 src.pid = hdr->src_pid;
1595
1596 /* Convert ack fields to host byte order */
1597 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1598 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1599
1600 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1601 lnet_res_lock(cpt);
1602
1603 /* NB handles only looked up by creator (no flips) */
1604 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
06ace26e 1605 if (!md || md->md_threshold == 0 || md->md_me) {
d7e09d03
PT
1606 /* Don't moan; this is expected */
1607 CDEBUG(D_NET,
55f5a824 1608 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
d7e09d03 1609 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1610 !md ? "invalid" : "inactive",
d7e09d03
PT
1611 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1612 hdr->msg.ack.dst_wmd.wh_object_cookie);
06ace26e 1613 if (md && md->md_me)
d7e09d03
PT
1614 CERROR("Source MD also attached to portal %d\n",
1615 md->md_me->me_portal);
1616
1617 lnet_res_unlock(cpt);
1618 return ENOENT; /* +ve! */
1619 }
1620
55f5a824 1621 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
d7e09d03
PT
1622 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1623 hdr->msg.ack.dst_wmd.wh_object_cookie);
1624
1625 lnet_msg_attach_md(msg, md, 0, 0);
1626
1627 lnet_res_unlock(cpt);
1628
1629 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1630
1631 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1632 return 0;
1633}
1634
1635static int
1636lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1637{
7e7ab095 1638 int rc = 0;
d7e09d03
PT
1639
1640 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1641 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
06ace26e 1642 if (!ni->ni_lnd->lnd_eager_recv) {
d7e09d03
PT
1643 msg->msg_rx_ready_delay = 1;
1644 } else {
1645 lnet_net_unlock(msg->msg_rx_cpt);
1646 rc = lnet_ni_eager_recv(ni, msg);
1647 lnet_net_lock(msg->msg_rx_cpt);
1648 }
1649 }
1650
1651 if (rc == 0)
1652 rc = lnet_post_routed_recv_locked(msg, 0);
1653 return rc;
1654}
1655
1656char *
af66a6e2 1657lnet_msgtyp2str(int type)
d7e09d03
PT
1658{
1659 switch (type) {
1660 case LNET_MSG_ACK:
2b5f2e44 1661 return "ACK";
d7e09d03 1662 case LNET_MSG_PUT:
2b5f2e44 1663 return "PUT";
d7e09d03 1664 case LNET_MSG_GET:
2b5f2e44 1665 return "GET";
d7e09d03 1666 case LNET_MSG_REPLY:
2b5f2e44 1667 return "REPLY";
d7e09d03 1668 case LNET_MSG_HELLO:
2b5f2e44 1669 return "HELLO";
d7e09d03 1670 default:
2b5f2e44 1671 return "<UNKNOWN>";
d7e09d03
PT
1672 }
1673}
d7e09d03
PT
1674
1675void
51f01fab 1676lnet_print_hdr(lnet_hdr_t *hdr)
d7e09d03
PT
1677{
1678 lnet_process_id_t src = {0};
1679 lnet_process_id_t dst = {0};
af66a6e2 1680 char *type_str = lnet_msgtyp2str(hdr->type);
d7e09d03
PT
1681
1682 src.nid = hdr->src_nid;
1683 src.pid = hdr->src_pid;
1684
1685 dst.nid = hdr->dest_nid;
1686 dst.pid = hdr->dest_pid;
1687
1688 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1689 CWARN(" From %s\n", libcfs_id2str(src));
1690 CWARN(" To %s\n", libcfs_id2str(dst));
1691
1692 switch (hdr->type) {
1693 default:
1694 break;
1695
1696 case LNET_MSG_PUT:
2d00bd17 1697 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
d7e09d03
PT
1698 hdr->msg.put.ptl_index,
1699 hdr->msg.put.ack_wmd.wh_interface_cookie,
1700 hdr->msg.put.ack_wmd.wh_object_cookie,
1701 hdr->msg.put.match_bits);
55f5a824 1702 CWARN(" Length %d, offset %d, hdr data %#llx\n",
d7e09d03
PT
1703 hdr->payload_length, hdr->msg.put.offset,
1704 hdr->msg.put.hdr_data);
1705 break;
1706
1707 case LNET_MSG_GET:
2d00bd17
JP
1708 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
1709 hdr->msg.get.ptl_index,
d7e09d03
PT
1710 hdr->msg.get.return_wmd.wh_interface_cookie,
1711 hdr->msg.get.return_wmd.wh_object_cookie,
1712 hdr->msg.get.match_bits);
1713 CWARN(" Length %d, src offset %d\n",
1714 hdr->msg.get.sink_length,
1715 hdr->msg.get.src_offset);
1716 break;
1717
1718 case LNET_MSG_ACK:
2d00bd17 1719 CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
d7e09d03
PT
1720 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1721 hdr->msg.ack.dst_wmd.wh_object_cookie,
1722 hdr->msg.ack.mlength);
1723 break;
1724
1725 case LNET_MSG_REPLY:
2d00bd17 1726 CWARN(" dst md %#llx.%#llx, length %d\n",
d7e09d03
PT
1727 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1728 hdr->msg.reply.dst_wmd.wh_object_cookie,
1729 hdr->payload_length);
1730 }
d7e09d03
PT
1731}
1732
1733int
1734lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1735 void *private, int rdma_req)
1736{
7e7ab095
MS
1737 int rc = 0;
1738 int cpt;
1739 int for_me;
1740 struct lnet_msg *msg;
1741 lnet_pid_t dest_pid;
1742 lnet_nid_t dest_nid;
1743 lnet_nid_t src_nid;
1744 __u32 payload_length;
1745 __u32 type;
d7e09d03 1746
af66a6e2 1747 LASSERT(!in_interrupt());
d7e09d03
PT
1748
1749 type = le32_to_cpu(hdr->type);
1750 src_nid = le64_to_cpu(hdr->src_nid);
1751 dest_nid = le64_to_cpu(hdr->dest_nid);
1752 dest_pid = le32_to_cpu(hdr->dest_pid);
1753 payload_length = le32_to_cpu(hdr->payload_length);
1754
1755 for_me = (ni->ni_nid == dest_nid);
1756 cpt = lnet_cpt_of_nid(from_nid);
1757
1758 switch (type) {
1759 case LNET_MSG_ACK:
1760 case LNET_MSG_GET:
1761 if (payload_length > 0) {
1762 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1763 libcfs_nid2str(from_nid),
1764 libcfs_nid2str(src_nid),
1765 lnet_msgtyp2str(type), payload_length);
1766 return -EPROTO;
1767 }
1768 break;
1769
1770 case LNET_MSG_PUT:
1771 case LNET_MSG_REPLY:
ae4003f0
LN
1772 if (payload_length >
1773 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2d00bd17 1774 CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
d7e09d03
PT
1775 libcfs_nid2str(from_nid),
1776 libcfs_nid2str(src_nid),
1777 lnet_msgtyp2str(type),
1778 payload_length,
1779 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1780 return -EPROTO;
1781 }
1782 break;
1783
1784 default:
1785 CERROR("%s, src %s: Bad message type 0x%x\n",
1786 libcfs_nid2str(from_nid),
1787 libcfs_nid2str(src_nid), type);
1788 return -EPROTO;
1789 }
1790
1791 if (the_lnet.ln_routing &&
ec0067d1 1792 ni->ni_last_alive != ktime_get_real_seconds()) {
d7e09d03
PT
1793 lnet_ni_lock(ni);
1794
1795 /* NB: so far here is the only place to set NI status to "up */
ec0067d1 1796 ni->ni_last_alive = ktime_get_real_seconds();
06ace26e 1797 if (ni->ni_status &&
d7e09d03
PT
1798 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1799 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1800 lnet_ni_unlock(ni);
1801 }
1802
4420cfd3
JS
1803 /*
1804 * Regard a bad destination NID as a protocol error. Senders should
d7e09d03 1805 * know what they're doing; if they don't they're misconfigured, buggy
4420cfd3
JS
1806 * or malicious so we chop them off at the knees :)
1807 */
d7e09d03
PT
1808 if (!for_me) {
1809 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1810 /* should have gone direct */
2d00bd17
JP
1811 CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
1812 libcfs_nid2str(from_nid),
1813 libcfs_nid2str(src_nid),
1814 libcfs_nid2str(dest_nid));
d7e09d03
PT
1815 return -EPROTO;
1816 }
1817
1818 if (lnet_islocalnid(dest_nid)) {
4420cfd3
JS
1819 /*
1820 * dest is another local NI; sender should have used
1821 * this node's NID on its own network
1822 */
2d00bd17
JP
1823 CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
1824 libcfs_nid2str(from_nid),
1825 libcfs_nid2str(src_nid),
1826 libcfs_nid2str(dest_nid));
d7e09d03
PT
1827 return -EPROTO;
1828 }
1829
1830 if (rdma_req && type == LNET_MSG_GET) {
2d00bd17
JP
1831 CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
1832 libcfs_nid2str(from_nid),
1833 libcfs_nid2str(src_nid),
1834 libcfs_nid2str(dest_nid));
d7e09d03
PT
1835 return -EPROTO;
1836 }
1837
1838 if (!the_lnet.ln_routing) {
2d00bd17
JP
1839 CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
1840 libcfs_nid2str(from_nid),
1841 libcfs_nid2str(src_nid),
1842 libcfs_nid2str(dest_nid));
d7e09d03
PT
1843 goto drop;
1844 }
1845 }
1846
4420cfd3
JS
1847 /*
1848 * Message looks OK; we're not going to return an error, so we MUST
1849 * call back lnd_recv() come what may...
1850 */
af66a6e2 1851 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 1852 fail_peer(src_nid, 0)) { /* shall we now? */
d7e09d03
PT
1853 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1854 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1855 lnet_msgtyp2str(type));
1856 goto drop;
1857 }
1858
1859 msg = lnet_msg_alloc();
06ace26e 1860 if (!msg) {
d7e09d03
PT
1861 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1862 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1863 lnet_msgtyp2str(type));
1864 goto drop;
1865 }
1866
ae4003f0
LN
1867 /* msg zeroed in lnet_msg_alloc;
1868 * i.e. flags all clear, pointers NULL etc
1869 */
d7e09d03
PT
1870 msg->msg_type = type;
1871 msg->msg_private = private;
1872 msg->msg_receiving = 1;
d3d3d37a
JS
1873 msg->msg_wanted = payload_length;
1874 msg->msg_len = payload_length;
d7e09d03
PT
1875 msg->msg_offset = 0;
1876 msg->msg_hdr = *hdr;
1877 /* for building message event */
1878 msg->msg_from = from_nid;
1879 if (!for_me) {
1880 msg->msg_target.pid = dest_pid;
1881 msg->msg_target.nid = dest_nid;
1882 msg->msg_routing = 1;
1883
1884 } else {
1885 /* convert common msg->hdr fields to host byteorder */
1886 msg->msg_hdr.type = type;
1887 msg->msg_hdr.src_nid = src_nid;
1888 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1889 msg->msg_hdr.dest_nid = dest_nid;
1890 msg->msg_hdr.dest_pid = dest_pid;
1891 msg->msg_hdr.payload_length = payload_length;
1892 }
1893
1894 lnet_net_lock(cpt);
1895 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
1896 if (rc != 0) {
1897 lnet_net_unlock(cpt);
2d00bd17 1898 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
d7e09d03
PT
1899 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1900 lnet_msgtyp2str(type), rc);
1901 lnet_msg_free(msg);
1902 goto drop;
1903 }
1904
af3fa7c7
LZ
1905 if (lnet_isrouter(msg->msg_rxpeer)) {
1906 lnet_peer_set_alive(msg->msg_rxpeer);
1907 if (avoid_asym_router_failure &&
1908 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
1909 /* received a remote message from router, update
1910 * remote NI status on this router.
1911 * NB: multi-hop routed message will be ignored.
1912 */
1913 lnet_router_ni_update_locked(msg->msg_rxpeer,
1914 LNET_NIDNET(src_nid));
1915 }
1916 }
1917
d7e09d03
PT
1918 lnet_msg_commit(msg, cpt);
1919
1920 if (!for_me) {
1921 rc = lnet_parse_forward_locked(ni, msg);
1922 lnet_net_unlock(cpt);
1923
1924 if (rc < 0)
1925 goto free_drop;
1926 if (rc == 0) {
1927 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1928 0, payload_length, payload_length);
1929 }
1930 return 0;
1931 }
1932
1933 lnet_net_unlock(cpt);
1934
1935 switch (type) {
1936 case LNET_MSG_ACK:
1937 rc = lnet_parse_ack(ni, msg);
1938 break;
1939 case LNET_MSG_PUT:
1940 rc = lnet_parse_put(ni, msg);
1941 break;
1942 case LNET_MSG_GET:
1943 rc = lnet_parse_get(ni, msg, rdma_req);
1944 break;
1945 case LNET_MSG_REPLY:
1946 rc = lnet_parse_reply(ni, msg);
1947 break;
1948 default:
1949 LASSERT(0);
1950 rc = -EPROTO;
1951 goto free_drop; /* prevent an unused label if !kernel */
1952 }
1953
1954 if (rc == 0)
1955 return 0;
1956
af66a6e2 1957 LASSERT(rc == ENOENT);
d7e09d03
PT
1958
1959 free_drop:
06ace26e 1960 LASSERT(!msg->msg_md);
d7e09d03
PT
1961 lnet_finalize(ni, msg, rc);
1962
1963 drop:
1964 lnet_drop_message(ni, cpt, private, payload_length);
1965 return 0;
1966}
1967EXPORT_SYMBOL(lnet_parse);
1968
1969void
1970lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
1971{
1972 while (!list_empty(head)) {
7e7ab095
MS
1973 lnet_process_id_t id = {0};
1974 lnet_msg_t *msg;
d7e09d03
PT
1975
1976 msg = list_entry(head->next, lnet_msg_t, msg_list);
1977 list_del(&msg->msg_list);
1978
1979 id.nid = msg->msg_hdr.src_nid;
1980 id.pid = msg->msg_hdr.src_pid;
1981
06ace26e 1982 LASSERT(!msg->msg_md);
d7e09d03 1983 LASSERT(msg->msg_rx_delayed);
06ace26e 1984 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
1985 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
1986
b0f5aad5 1987 CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
d7e09d03
PT
1988 libcfs_id2str(id),
1989 msg->msg_hdr.msg.put.ptl_index,
1990 msg->msg_hdr.msg.put.match_bits,
1991 msg->msg_hdr.msg.put.offset,
1992 msg->msg_hdr.payload_length, reason);
1993
4420cfd3
JS
1994 /*
1995 * NB I can't drop msg's ref on msg_rxpeer until after I've
d7e09d03 1996 * called lnet_drop_message(), so I just hang onto msg as well
4420cfd3
JS
1997 * until that's done
1998 */
d7e09d03
PT
1999 lnet_drop_message(msg->msg_rxpeer->lp_ni,
2000 msg->msg_rxpeer->lp_cpt,
2001 msg->msg_private, msg->msg_len);
2002 /*
2003 * NB: message will not generate event because w/o attached MD,
2004 * but we still should give error code so lnet_msg_decommit()
2005 * can skip counters operations and other checks.
2006 */
2007 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
2008 }
2009}
2010
2011void
2012lnet_recv_delayed_msg_list(struct list_head *head)
2013{
2014 while (!list_empty(head)) {
7e7ab095
MS
2015 lnet_msg_t *msg;
2016 lnet_process_id_t id;
d7e09d03
PT
2017
2018 msg = list_entry(head->next, lnet_msg_t, msg_list);
2019 list_del(&msg->msg_list);
2020
4420cfd3
JS
2021 /*
2022 * md won't disappear under me, since each msg
2023 * holds a ref on it
2024 */
d7e09d03
PT
2025 id.nid = msg->msg_hdr.src_nid;
2026 id.pid = msg->msg_hdr.src_pid;
2027
2028 LASSERT(msg->msg_rx_delayed);
06ace26e
JS
2029 LASSERT(msg->msg_md);
2030 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
2031 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2032
2d00bd17
JP
2033 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
2034 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2035 msg->msg_hdr.msg.put.match_bits,
2036 msg->msg_hdr.msg.put.offset,
2037 msg->msg_hdr.payload_length);
d7e09d03
PT
2038
2039 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2040 }
2041}
2042
2043/**
2044 * Initiate an asynchronous PUT operation.
2045 *
2046 * There are several events associated with a PUT: completion of the send on
2047 * the initiator node (LNET_EVENT_SEND), and when the send completes
2048 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2049 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2050 * used at the target node to indicate the completion of incoming data
2051 * delivery.
2052 *
2053 * The local events will be logged in the EQ associated with the MD pointed to
2054 * by \a mdh handle. Using a MD without an associated EQ results in these
2055 * events being discarded. In this case, the caller must have another
2056 * mechanism (e.g., a higher level protocol) for determining when it is safe
2057 * to modify the memory region associated with the MD.
2058 *
2059 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2060 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2061 *
2062 * \param self Indicates the NID of a local interface through which to send
2063 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2064 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2065 * must be "free floating" (See LNetMDBind()).
2066 * \param ack Controls whether an acknowledgment is requested.
2067 * Acknowledgments are only sent when they are requested by the initiating
2068 * process and the target MD enables them.
2069 * \param target A process identifier for the target process.
2070 * \param portal The index in the \a target's portal table.
2071 * \param match_bits The match bits to use for MD selection at the target
2072 * process.
2073 * \param offset The offset into the target MD (only used when the target
2074 * MD has the LNET_MD_MANAGE_REMOTE option set).
2075 * \param hdr_data 64 bits of user data that can be included in the message
2076 * header. This data is written to an event queue entry at the target if an
2077 * EQ is present on the matching MD.
2078 *
2079 * \retval 0 Success, and only in this case events will be generated
2080 * and logged to EQ (if it exists).
2081 * \retval -EIO Simulated failure.
2082 * \retval -ENOMEM Memory allocation failure.
2083 * \retval -ENOENT Invalid MD object.
2084 *
2085 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2086 */
2087int
2088LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2089 lnet_process_id_t target, unsigned int portal,
2090 __u64 match_bits, unsigned int offset,
2091 __u64 hdr_data)
2092{
7e7ab095
MS
2093 struct lnet_msg *msg;
2094 struct lnet_libmd *md;
2095 int cpt;
2096 int rc;
d7e09d03 2097
af66a6e2
LN
2098 LASSERT(the_lnet.ln_init);
2099 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2100
af66a6e2 2101 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2102 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2103 CERROR("Dropping PUT to %s: simulated failure\n",
2104 libcfs_id2str(target));
2105 return -EIO;
2106 }
2107
2108 msg = lnet_msg_alloc();
06ace26e 2109 if (!msg) {
d7e09d03
PT
2110 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2111 libcfs_id2str(target));
2112 return -ENOMEM;
2113 }
2114 msg->msg_vmflush = !!memory_pressure_get();
2115
2116 cpt = lnet_cpt_of_cookie(mdh.cookie);
2117 lnet_res_lock(cpt);
2118
2119 md = lnet_handle2md(&mdh);
06ace26e 2120 if (!md || md->md_threshold == 0 || md->md_me) {
b0f5aad5 2121 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2122 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2123 !md ? -1 : md->md_threshold);
2124 if (md && md->md_me)
d7e09d03
PT
2125 CERROR("Source MD also attached to portal %d\n",
2126 md->md_me->me_portal);
2127 lnet_res_unlock(cpt);
2128
2129 lnet_msg_free(msg);
2130 return -ENOENT;
2131 }
2132
2133 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2134
2135 lnet_msg_attach_md(msg, md, 0, 0);
2136
2137 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2138
2139 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2140 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2141 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2142 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2143
2144 /* NB handles only looked up by creator (no flips) */
2145 if (ack == LNET_ACK_REQ) {
2146 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2147 the_lnet.ln_interface_cookie;
2148 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2149 md->md_lh.lh_cookie;
2150 } else {
2151 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2152 LNET_WIRE_HANDLE_COOKIE_NONE;
2153 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2154 LNET_WIRE_HANDLE_COOKIE_NONE;
2155 }
2156
2157 lnet_res_unlock(cpt);
2158
2159 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2160
2161 rc = lnet_send(self, msg, LNET_NID_ANY);
2162 if (rc != 0) {
af66a6e2 2163 CNETERR("Error sending PUT to %s: %d\n",
c314c319 2164 libcfs_id2str(target), rc);
af66a6e2 2165 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2166 }
2167
2168 /* completion will be signalled by an event */
2169 return 0;
2170}
2171EXPORT_SYMBOL(LNetPut);
2172
2173lnet_msg_t *
af66a6e2 2174lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
d7e09d03 2175{
4420cfd3
JS
2176 /*
2177 * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
d7e09d03
PT
2178 * returns a msg for the LND to pass to lnet_finalize() when the sink
2179 * data has been received.
2180 *
2181 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4420cfd3
JS
2182 * lnet_finalize() is called on it, so the LND must call this first
2183 */
7e7ab095
MS
2184 struct lnet_msg *msg = lnet_msg_alloc();
2185 struct lnet_libmd *getmd = getmsg->msg_md;
2186 lnet_process_id_t peer_id = getmsg->msg_target;
2187 int cpt;
d7e09d03
PT
2188
2189 LASSERT(!getmsg->msg_target_is_router);
2190 LASSERT(!getmsg->msg_routing);
2191
2192 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2193 lnet_res_lock(cpt);
2194
af66a6e2 2195 LASSERT(getmd->md_refcount > 0);
d7e09d03 2196
06ace26e 2197 if (!msg) {
af66a6e2 2198 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
c314c319 2199 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
d7e09d03
PT
2200 goto drop;
2201 }
2202
2203 if (getmd->md_threshold == 0) {
af66a6e2 2204 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
c314c319
JS
2205 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2206 getmd);
d7e09d03
PT
2207 lnet_res_unlock(cpt);
2208 goto drop;
2209 }
2210
2211 LASSERT(getmd->md_offset == 0);
2212
2213 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2214 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2215
2216 /* setup information for lnet_build_msg_event */
2217 msg->msg_from = peer_id.nid;
2218 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2219 msg->msg_hdr.src_nid = peer_id.nid;
2220 msg->msg_hdr.payload_length = getmd->md_length;
2221 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2222
2223 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2224 lnet_res_unlock(cpt);
2225
2226 cpt = lnet_cpt_of_nid(peer_id.nid);
2227
2228 lnet_net_lock(cpt);
2229 lnet_msg_commit(msg, cpt);
2230 lnet_net_unlock(cpt);
2231
2232 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2233
2234 return msg;
2235
2236 drop:
2237 cpt = lnet_cpt_of_nid(peer_id.nid);
2238
2239 lnet_net_lock(cpt);
2240 the_lnet.ln_counters[cpt]->drop_count++;
2241 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2242 lnet_net_unlock(cpt);
2243
06ace26e 2244 if (msg)
d7e09d03
PT
2245 lnet_msg_free(msg);
2246
2247 return NULL;
2248}
2249EXPORT_SYMBOL(lnet_create_reply_msg);
2250
2251void
2252lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2253{
4420cfd3
JS
2254 /*
2255 * Set the REPLY length, now the RDMA that elides the REPLY message has
2256 * completed and I know it.
2257 */
06ace26e 2258 LASSERT(reply);
af66a6e2
LN
2259 LASSERT(reply->msg_type == LNET_MSG_GET);
2260 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
d7e09d03 2261
4420cfd3
JS
2262 /*
2263 * NB I trusted my peer to RDMA. If she tells me she's written beyond
2264 * the end of my buffer, I might as well be dead.
2265 */
af66a6e2 2266 LASSERT(len <= reply->msg_ev.mlength);
d7e09d03
PT
2267
2268 reply->msg_ev.mlength = len;
2269}
2270EXPORT_SYMBOL(lnet_set_reply_msg_len);
2271
2272/**
2273 * Initiate an asynchronous GET operation.
2274 *
2275 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2276 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2277 * the target node in the REPLY has been written to local MD.
2278 *
2279 * On the target node, an LNET_EVENT_GET is logged when the GET request
2280 * arrives and is accepted into a MD.
2281 *
2282 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2283 * \param mdh A handle for the MD that describes the memory into which the
ae4003f0
LN
2284 * requested data will be received. The MD must be "free floating"
2285 * (See LNetMDBind()).
d7e09d03
PT
2286 *
2287 * \retval 0 Success, and only in this case events will be generated
2288 * and logged to EQ (if it exists) of the MD.
2289 * \retval -EIO Simulated failure.
2290 * \retval -ENOMEM Memory allocation failure.
2291 * \retval -ENOENT Invalid MD object.
2292 */
2293int
2294LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2295 lnet_process_id_t target, unsigned int portal,
2296 __u64 match_bits, unsigned int offset)
2297{
7e7ab095
MS
2298 struct lnet_msg *msg;
2299 struct lnet_libmd *md;
2300 int cpt;
2301 int rc;
d7e09d03 2302
af66a6e2
LN
2303 LASSERT(the_lnet.ln_init);
2304 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2305
af66a6e2 2306 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2307 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2308 CERROR("Dropping GET to %s: simulated failure\n",
2309 libcfs_id2str(target));
2310 return -EIO;
2311 }
2312
2313 msg = lnet_msg_alloc();
06ace26e 2314 if (!msg) {
d7e09d03
PT
2315 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2316 libcfs_id2str(target));
2317 return -ENOMEM;
2318 }
2319
2320 cpt = lnet_cpt_of_cookie(mdh.cookie);
2321 lnet_res_lock(cpt);
2322
2323 md = lnet_handle2md(&mdh);
06ace26e 2324 if (!md || md->md_threshold == 0 || md->md_me) {
b0f5aad5 2325 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2326 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2327 !md ? -1 : md->md_threshold);
2328 if (md && md->md_me)
d7e09d03
PT
2329 CERROR("REPLY MD also attached to portal %d\n",
2330 md->md_me->me_portal);
2331
2332 lnet_res_unlock(cpt);
2333
2334 lnet_msg_free(msg);
d7e09d03
PT
2335 return -ENOENT;
2336 }
2337
2338 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2339
2340 lnet_msg_attach_md(msg, md, 0, 0);
2341
2342 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2343
2344 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2345 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2346 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2347 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2348
2349 /* NB handles only looked up by creator (no flips) */
2350 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2351 the_lnet.ln_interface_cookie;
2352 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2353 md->md_lh.lh_cookie;
2354
2355 lnet_res_unlock(cpt);
2356
2357 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2358
2359 rc = lnet_send(self, msg, LNET_NID_ANY);
2360 if (rc < 0) {
af66a6e2 2361 CNETERR("Error sending GET to %s: %d\n",
c314c319 2362 libcfs_id2str(target), rc);
af66a6e2 2363 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2364 }
2365
2366 /* completion will be signalled by an event */
2367 return 0;
2368}
2369EXPORT_SYMBOL(LNetGet);
2370
2371/**
2372 * Calculate distance to node at \a dstnid.
2373 *
2374 * \param dstnid Target NID.
2375 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2376 * is saved here.
2377 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2378 * here.
2379 *
2380 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2381 * local_nid_dist_zero is set, which is the default.
2382 * \retval positives Distance to target NID, i.e. number of hops plus one.
2383 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2384 */
2385int
2386LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2387{
7e7ab095
MS
2388 struct list_head *e;
2389 struct lnet_ni *ni;
2390 lnet_remotenet_t *rnet;
2391 __u32 dstnet = LNET_NIDNET(dstnid);
2392 int hops;
2393 int cpt;
2394 __u32 order = 2;
2395 struct list_head *rn_list;
d7e09d03 2396
4420cfd3
JS
2397 /*
2398 * if !local_nid_dist_zero, I don't return a distance of 0 ever
d7e09d03
PT
2399 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2400 * keep order 0 free for 0@lo and order 1 free for a local NID
4420cfd3
JS
2401 * match
2402 */
af66a6e2
LN
2403 LASSERT(the_lnet.ln_init);
2404 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03
PT
2405
2406 cpt = lnet_net_lock_current();
2407
af66a6e2 2408 list_for_each(e, &the_lnet.ln_nis) {
d7e09d03
PT
2409 ni = list_entry(e, lnet_ni_t, ni_list);
2410
2411 if (ni->ni_nid == dstnid) {
06ace26e 2412 if (srcnidp)
d7e09d03 2413 *srcnidp = dstnid;
06ace26e 2414 if (orderp) {
d7e09d03
PT
2415 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2416 *orderp = 0;
2417 else
2418 *orderp = 1;
2419 }
2420 lnet_net_unlock(cpt);
2421
2422 return local_nid_dist_zero ? 0 : 1;
2423 }
2424
2425 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
06ace26e 2426 if (srcnidp)
d7e09d03 2427 *srcnidp = ni->ni_nid;
06ace26e 2428 if (orderp)
d7e09d03
PT
2429 *orderp = order;
2430 lnet_net_unlock(cpt);
2431 return 1;
2432 }
2433
2434 order++;
2435 }
2436
2437 rn_list = lnet_net2rnethash(dstnet);
2438 list_for_each(e, rn_list) {
2439 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2440
2441 if (rnet->lrn_net == dstnet) {
2442 lnet_route_t *route;
2443 lnet_route_t *shortest = NULL;
2444
af66a6e2 2445 LASSERT(!list_empty(&rnet->lrn_routes));
d7e09d03
PT
2446
2447 list_for_each_entry(route, &rnet->lrn_routes,
c314c319 2448 lr_list) {
06ace26e 2449 if (!shortest ||
d7e09d03
PT
2450 route->lr_hops < shortest->lr_hops)
2451 shortest = route;
2452 }
2453
06ace26e 2454 LASSERT(shortest);
d7e09d03 2455 hops = shortest->lr_hops;
06ace26e 2456 if (srcnidp)
d7e09d03 2457 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
06ace26e 2458 if (orderp)
d7e09d03
PT
2459 *orderp = order;
2460 lnet_net_unlock(cpt);
2461 return hops + 1;
2462 }
2463 order++;
2464 }
2465
2466 lnet_net_unlock(cpt);
2467 return -EHOSTUNREACH;
2468}
2469EXPORT_SYMBOL(LNetDist);
This page took 0.494386 seconds and 5 git commands to generate.