IB/mad: pass ib_mad_send_buf explicitly to the recv_handler
[deliverable/linux.git] / drivers / infiniband / core / cache.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50 };
51
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
68 };
69
70 enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
73 };
74
75 enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
79 * ib_cache_update.
80 */
81 GID_TABLE_WRITE_ACTION_MODIFY
82 };
83
84 struct ib_gid_table_entry {
85 unsigned long props;
86 union ib_gid gid;
87 struct ib_gid_attr attr;
88 void *context;
89 };
90
91 struct ib_gid_table {
92 int sz;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
97 *
98 * Delete requires different set of operations:
99 * (a) Find the GID
100 * (b) Delete it.
101 *
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
107 **/
108 struct mutex lock;
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
111 */
112 rwlock_t rwlock;
113 struct ib_gid_table_entry *data_vec;
114 };
115
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
120
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
124
125 ib_dispatch_event(&event);
126 }
127 }
128
129 static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
132 };
133
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
138
139 return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145 unsigned int i;
146 size_t len;
147 int err = -EINVAL;
148
149 len = strlen(buf);
150 if (len == 0)
151 return -EINVAL;
152
153 if (buf[len - 1] == '\n')
154 len--;
155
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
159 err = i;
160 break;
161 }
162
163 return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
169 * scenarios.
170 */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
176 bool default_gid)
177 {
178 int ret = 0;
179 struct net_device *old_net_dev;
180
181 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
182 * sleep-able lock.
183 */
184
185 if (rdma_cap_roce_gid_table(ib_dev, port)) {
186 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
187 write_unlock_irq(&table->rwlock);
188 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
189 * RoCE providers and thus only updates the cache.
190 */
191 if (action == GID_TABLE_WRITE_ACTION_ADD)
192 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
193 &table->data_vec[ix].context);
194 else if (action == GID_TABLE_WRITE_ACTION_DEL)
195 ret = ib_dev->del_gid(ib_dev, port, ix,
196 &table->data_vec[ix].context);
197 write_lock_irq(&table->rwlock);
198 }
199
200 old_net_dev = table->data_vec[ix].attr.ndev;
201 if (old_net_dev && old_net_dev != attr->ndev)
202 dev_put(old_net_dev);
203 /* if modify_gid failed, just delete the old gid */
204 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
205 gid = &zgid;
206 attr = &zattr;
207 table->data_vec[ix].context = NULL;
208 }
209 if (default_gid)
210 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
211 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
212 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
213 if (table->data_vec[ix].attr.ndev &&
214 table->data_vec[ix].attr.ndev != old_net_dev)
215 dev_hold(table->data_vec[ix].attr.ndev);
216
217 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
218
219 return ret;
220 }
221
222 static int add_gid(struct ib_device *ib_dev, u8 port,
223 struct ib_gid_table *table, int ix,
224 const union ib_gid *gid,
225 const struct ib_gid_attr *attr,
226 bool default_gid) {
227 return write_gid(ib_dev, port, table, ix, gid, attr,
228 GID_TABLE_WRITE_ACTION_ADD, default_gid);
229 }
230
231 static int modify_gid(struct ib_device *ib_dev, u8 port,
232 struct ib_gid_table *table, int ix,
233 const union ib_gid *gid,
234 const struct ib_gid_attr *attr,
235 bool default_gid) {
236 return write_gid(ib_dev, port, table, ix, gid, attr,
237 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
238 }
239
240 static int del_gid(struct ib_device *ib_dev, u8 port,
241 struct ib_gid_table *table, int ix,
242 bool default_gid) {
243 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
244 GID_TABLE_WRITE_ACTION_DEL, default_gid);
245 }
246
247 /* rwlock should be read locked */
248 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
249 const struct ib_gid_attr *val, bool default_gid,
250 unsigned long mask, int *pempty)
251 {
252 int i = 0;
253 int found = -1;
254 int empty = pempty ? -1 : 0;
255
256 while (i < table->sz && (found < 0 || empty < 0)) {
257 struct ib_gid_table_entry *data = &table->data_vec[i];
258 struct ib_gid_attr *attr = &data->attr;
259 int curr_index = i;
260
261 i++;
262
263 if (data->props & GID_TABLE_ENTRY_INVALID)
264 continue;
265
266 if (empty < 0)
267 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
268 !memcmp(attr, &zattr, sizeof(*attr)) &&
269 !data->props)
270 empty = curr_index;
271
272 if (found >= 0)
273 continue;
274
275 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
276 attr->gid_type != val->gid_type)
277 continue;
278
279 if (mask & GID_ATTR_FIND_MASK_GID &&
280 memcmp(gid, &data->gid, sizeof(*gid)))
281 continue;
282
283 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
284 attr->ndev != val->ndev)
285 continue;
286
287 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
288 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
289 default_gid)
290 continue;
291
292 found = curr_index;
293 }
294
295 if (pempty)
296 *pempty = empty;
297
298 return found;
299 }
300
301 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
302 {
303 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
304 addrconf_ifid_eui48(&gid->raw[8], dev);
305 }
306
307 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
308 union ib_gid *gid, struct ib_gid_attr *attr)
309 {
310 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
311 struct ib_gid_table *table;
312 int ix;
313 int ret = 0;
314 struct net_device *idev;
315 int empty;
316
317 table = ports_table[port - rdma_start_port(ib_dev)];
318
319 if (!memcmp(gid, &zgid, sizeof(*gid)))
320 return -EINVAL;
321
322 if (ib_dev->get_netdev) {
323 idev = ib_dev->get_netdev(ib_dev, port);
324 if (idev && attr->ndev != idev) {
325 union ib_gid default_gid;
326
327 /* Adding default GIDs in not permitted */
328 make_default_gid(idev, &default_gid);
329 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
330 dev_put(idev);
331 return -EPERM;
332 }
333 }
334 if (idev)
335 dev_put(idev);
336 }
337
338 mutex_lock(&table->lock);
339 write_lock_irq(&table->rwlock);
340
341 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
342 GID_ATTR_FIND_MASK_GID_TYPE |
343 GID_ATTR_FIND_MASK_NETDEV, &empty);
344 if (ix >= 0)
345 goto out_unlock;
346
347 if (empty < 0) {
348 ret = -ENOSPC;
349 goto out_unlock;
350 }
351
352 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
353 if (!ret)
354 dispatch_gid_change_event(ib_dev, port);
355
356 out_unlock:
357 write_unlock_irq(&table->rwlock);
358 mutex_unlock(&table->lock);
359 return ret;
360 }
361
362 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
363 union ib_gid *gid, struct ib_gid_attr *attr)
364 {
365 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
366 struct ib_gid_table *table;
367 int ix;
368
369 table = ports_table[port - rdma_start_port(ib_dev)];
370
371 mutex_lock(&table->lock);
372 write_lock_irq(&table->rwlock);
373
374 ix = find_gid(table, gid, attr, false,
375 GID_ATTR_FIND_MASK_GID |
376 GID_ATTR_FIND_MASK_GID_TYPE |
377 GID_ATTR_FIND_MASK_NETDEV |
378 GID_ATTR_FIND_MASK_DEFAULT,
379 NULL);
380 if (ix < 0)
381 goto out_unlock;
382
383 if (!del_gid(ib_dev, port, table, ix, false))
384 dispatch_gid_change_event(ib_dev, port);
385
386 out_unlock:
387 write_unlock_irq(&table->rwlock);
388 mutex_unlock(&table->lock);
389 return 0;
390 }
391
392 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
393 struct net_device *ndev)
394 {
395 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
396 struct ib_gid_table *table;
397 int ix;
398 bool deleted = false;
399
400 table = ports_table[port - rdma_start_port(ib_dev)];
401
402 mutex_lock(&table->lock);
403 write_lock_irq(&table->rwlock);
404
405 for (ix = 0; ix < table->sz; ix++)
406 if (table->data_vec[ix].attr.ndev == ndev)
407 if (!del_gid(ib_dev, port, table, ix, false))
408 deleted = true;
409
410 write_unlock_irq(&table->rwlock);
411 mutex_unlock(&table->lock);
412
413 if (deleted)
414 dispatch_gid_change_event(ib_dev, port);
415
416 return 0;
417 }
418
419 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
420 union ib_gid *gid, struct ib_gid_attr *attr)
421 {
422 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
423 struct ib_gid_table *table;
424
425 table = ports_table[port - rdma_start_port(ib_dev)];
426
427 if (index < 0 || index >= table->sz)
428 return -EINVAL;
429
430 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
431 return -EAGAIN;
432
433 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
434 if (attr) {
435 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
436 if (attr->ndev)
437 dev_hold(attr->ndev);
438 }
439
440 return 0;
441 }
442
443 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
444 const union ib_gid *gid,
445 const struct ib_gid_attr *val,
446 unsigned long mask,
447 u8 *port, u16 *index)
448 {
449 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
450 struct ib_gid_table *table;
451 u8 p;
452 int local_index;
453 unsigned long flags;
454
455 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
456 table = ports_table[p];
457 read_lock_irqsave(&table->rwlock, flags);
458 local_index = find_gid(table, gid, val, false, mask, NULL);
459 if (local_index >= 0) {
460 if (index)
461 *index = local_index;
462 if (port)
463 *port = p + rdma_start_port(ib_dev);
464 read_unlock_irqrestore(&table->rwlock, flags);
465 return 0;
466 }
467 read_unlock_irqrestore(&table->rwlock, flags);
468 }
469
470 return -ENOENT;
471 }
472
473 static int ib_cache_gid_find(struct ib_device *ib_dev,
474 const union ib_gid *gid,
475 enum ib_gid_type gid_type,
476 struct net_device *ndev, u8 *port,
477 u16 *index)
478 {
479 unsigned long mask = GID_ATTR_FIND_MASK_GID |
480 GID_ATTR_FIND_MASK_GID_TYPE;
481 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
482
483 if (ndev)
484 mask |= GID_ATTR_FIND_MASK_NETDEV;
485
486 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
487 mask, port, index);
488 }
489
490 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
491 const union ib_gid *gid,
492 enum ib_gid_type gid_type,
493 u8 port, struct net_device *ndev,
494 u16 *index)
495 {
496 int local_index;
497 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
498 struct ib_gid_table *table;
499 unsigned long mask = GID_ATTR_FIND_MASK_GID |
500 GID_ATTR_FIND_MASK_GID_TYPE;
501 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
502 unsigned long flags;
503
504 if (port < rdma_start_port(ib_dev) ||
505 port > rdma_end_port(ib_dev))
506 return -ENOENT;
507
508 table = ports_table[port - rdma_start_port(ib_dev)];
509
510 if (ndev)
511 mask |= GID_ATTR_FIND_MASK_NETDEV;
512
513 read_lock_irqsave(&table->rwlock, flags);
514 local_index = find_gid(table, gid, &val, false, mask, NULL);
515 if (local_index >= 0) {
516 if (index)
517 *index = local_index;
518 read_unlock_irqrestore(&table->rwlock, flags);
519 return 0;
520 }
521
522 read_unlock_irqrestore(&table->rwlock, flags);
523 return -ENOENT;
524 }
525 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
526
527 /**
528 * ib_find_gid_by_filter - Returns the GID table index where a specified
529 * GID value occurs
530 * @device: The device to query.
531 * @gid: The GID value to search for.
532 * @port_num: The port number of the device where the GID value could be
533 * searched.
534 * @filter: The filter function is executed on any matching GID in the table.
535 * If the filter function returns true, the corresponding index is returned,
536 * otherwise, we continue searching the GID table. It's guaranteed that
537 * while filter is executed, ndev field is valid and the structure won't
538 * change. filter is executed in an atomic context. filter must not be NULL.
539 * @index: The index into the cached GID table where the GID was found. This
540 * parameter may be NULL.
541 *
542 * ib_cache_gid_find_by_filter() searches for the specified GID value
543 * of which the filter function returns true in the port's GID table.
544 * This function is only supported on RoCE ports.
545 *
546 */
547 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
548 const union ib_gid *gid,
549 u8 port,
550 bool (*filter)(const union ib_gid *,
551 const struct ib_gid_attr *,
552 void *),
553 void *context,
554 u16 *index)
555 {
556 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
557 struct ib_gid_table *table;
558 unsigned int i;
559 unsigned long flags;
560 bool found = false;
561
562 if (!ports_table)
563 return -EOPNOTSUPP;
564
565 if (port < rdma_start_port(ib_dev) ||
566 port > rdma_end_port(ib_dev) ||
567 !rdma_protocol_roce(ib_dev, port))
568 return -EPROTONOSUPPORT;
569
570 table = ports_table[port - rdma_start_port(ib_dev)];
571
572 read_lock_irqsave(&table->rwlock, flags);
573 for (i = 0; i < table->sz; i++) {
574 struct ib_gid_attr attr;
575
576 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
577 goto next;
578
579 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
580 goto next;
581
582 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
583
584 if (filter(gid, &attr, context))
585 found = true;
586
587 next:
588 if (found)
589 break;
590 }
591 read_unlock_irqrestore(&table->rwlock, flags);
592
593 if (!found)
594 return -ENOENT;
595
596 if (index)
597 *index = i;
598 return 0;
599 }
600
601 static struct ib_gid_table *alloc_gid_table(int sz)
602 {
603 struct ib_gid_table *table =
604 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
605
606 if (!table)
607 return NULL;
608
609 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
610 if (!table->data_vec)
611 goto err_free_table;
612
613 mutex_init(&table->lock);
614
615 table->sz = sz;
616 rwlock_init(&table->rwlock);
617
618 return table;
619
620 err_free_table:
621 kfree(table);
622 return NULL;
623 }
624
625 static void release_gid_table(struct ib_gid_table *table)
626 {
627 if (table) {
628 kfree(table->data_vec);
629 kfree(table);
630 }
631 }
632
633 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
634 struct ib_gid_table *table)
635 {
636 int i;
637 bool deleted = false;
638
639 if (!table)
640 return;
641
642 write_lock_irq(&table->rwlock);
643 for (i = 0; i < table->sz; ++i) {
644 if (memcmp(&table->data_vec[i].gid, &zgid,
645 sizeof(table->data_vec[i].gid)))
646 if (!del_gid(ib_dev, port, table, i,
647 table->data_vec[i].props &
648 GID_ATTR_FIND_MASK_DEFAULT))
649 deleted = true;
650 }
651 write_unlock_irq(&table->rwlock);
652
653 if (deleted)
654 dispatch_gid_change_event(ib_dev, port);
655 }
656
657 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
658 struct net_device *ndev,
659 unsigned long gid_type_mask,
660 enum ib_cache_gid_default_mode mode)
661 {
662 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
663 union ib_gid gid;
664 struct ib_gid_attr gid_attr;
665 struct ib_gid_attr zattr_type = zattr;
666 struct ib_gid_table *table;
667 unsigned int gid_type;
668
669 table = ports_table[port - rdma_start_port(ib_dev)];
670
671 make_default_gid(ndev, &gid);
672 memset(&gid_attr, 0, sizeof(gid_attr));
673 gid_attr.ndev = ndev;
674
675 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
676 int ix;
677 union ib_gid current_gid;
678 struct ib_gid_attr current_gid_attr = {};
679
680 if (1UL << gid_type & ~gid_type_mask)
681 continue;
682
683 gid_attr.gid_type = gid_type;
684
685 mutex_lock(&table->lock);
686 write_lock_irq(&table->rwlock);
687 ix = find_gid(table, NULL, &gid_attr, true,
688 GID_ATTR_FIND_MASK_GID_TYPE |
689 GID_ATTR_FIND_MASK_DEFAULT,
690 NULL);
691
692 /* Coudn't find default GID location */
693 WARN_ON(ix < 0);
694
695 zattr_type.gid_type = gid_type;
696
697 if (!__ib_cache_gid_get(ib_dev, port, ix,
698 &current_gid, &current_gid_attr) &&
699 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
700 !memcmp(&gid, &current_gid, sizeof(gid)) &&
701 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
702 goto release;
703
704 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
705 memcmp(&current_gid_attr, &zattr_type,
706 sizeof(current_gid_attr))) {
707 if (del_gid(ib_dev, port, table, ix, true)) {
708 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
709 ix, gid.raw);
710 goto release;
711 } else {
712 dispatch_gid_change_event(ib_dev, port);
713 }
714 }
715
716 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
717 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
718 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
719 gid.raw);
720 else
721 dispatch_gid_change_event(ib_dev, port);
722 }
723
724 release:
725 if (current_gid_attr.ndev)
726 dev_put(current_gid_attr.ndev);
727 write_unlock_irq(&table->rwlock);
728 mutex_unlock(&table->lock);
729 }
730 }
731
732 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
733 struct ib_gid_table *table)
734 {
735 unsigned int i;
736 unsigned long roce_gid_type_mask;
737 unsigned int num_default_gids;
738 unsigned int current_gid = 0;
739
740 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
741 num_default_gids = hweight_long(roce_gid_type_mask);
742 for (i = 0; i < num_default_gids && i < table->sz; i++) {
743 struct ib_gid_table_entry *entry =
744 &table->data_vec[i];
745
746 entry->props |= GID_TABLE_ENTRY_DEFAULT;
747 current_gid = find_next_bit(&roce_gid_type_mask,
748 BITS_PER_LONG,
749 current_gid);
750 entry->attr.gid_type = current_gid++;
751 }
752
753 return 0;
754 }
755
756 static int _gid_table_setup_one(struct ib_device *ib_dev)
757 {
758 u8 port;
759 struct ib_gid_table **table;
760 int err = 0;
761
762 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
763
764 if (!table) {
765 pr_warn("failed to allocate ib gid cache for %s\n",
766 ib_dev->name);
767 return -ENOMEM;
768 }
769
770 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
771 u8 rdma_port = port + rdma_start_port(ib_dev);
772
773 table[port] =
774 alloc_gid_table(
775 ib_dev->port_immutable[rdma_port].gid_tbl_len);
776 if (!table[port]) {
777 err = -ENOMEM;
778 goto rollback_table_setup;
779 }
780
781 err = gid_table_reserve_default(ib_dev,
782 port + rdma_start_port(ib_dev),
783 table[port]);
784 if (err)
785 goto rollback_table_setup;
786 }
787
788 ib_dev->cache.gid_cache = table;
789 return 0;
790
791 rollback_table_setup:
792 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
793 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
794 table[port]);
795 release_gid_table(table[port]);
796 }
797
798 kfree(table);
799 return err;
800 }
801
802 static void gid_table_release_one(struct ib_device *ib_dev)
803 {
804 struct ib_gid_table **table = ib_dev->cache.gid_cache;
805 u8 port;
806
807 if (!table)
808 return;
809
810 for (port = 0; port < ib_dev->phys_port_cnt; port++)
811 release_gid_table(table[port]);
812
813 kfree(table);
814 ib_dev->cache.gid_cache = NULL;
815 }
816
817 static void gid_table_cleanup_one(struct ib_device *ib_dev)
818 {
819 struct ib_gid_table **table = ib_dev->cache.gid_cache;
820 u8 port;
821
822 if (!table)
823 return;
824
825 for (port = 0; port < ib_dev->phys_port_cnt; port++)
826 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
827 table[port]);
828 }
829
830 static int gid_table_setup_one(struct ib_device *ib_dev)
831 {
832 int err;
833
834 err = _gid_table_setup_one(ib_dev);
835
836 if (err)
837 return err;
838
839 err = roce_rescan_device(ib_dev);
840
841 if (err) {
842 gid_table_cleanup_one(ib_dev);
843 gid_table_release_one(ib_dev);
844 }
845
846 return err;
847 }
848
849 int ib_get_cached_gid(struct ib_device *device,
850 u8 port_num,
851 int index,
852 union ib_gid *gid,
853 struct ib_gid_attr *gid_attr)
854 {
855 int res;
856 unsigned long flags;
857 struct ib_gid_table **ports_table = device->cache.gid_cache;
858 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
859
860 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
861 return -EINVAL;
862
863 read_lock_irqsave(&table->rwlock, flags);
864 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
865 read_unlock_irqrestore(&table->rwlock, flags);
866
867 return res;
868 }
869 EXPORT_SYMBOL(ib_get_cached_gid);
870
871 int ib_find_cached_gid(struct ib_device *device,
872 const union ib_gid *gid,
873 enum ib_gid_type gid_type,
874 struct net_device *ndev,
875 u8 *port_num,
876 u16 *index)
877 {
878 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
879 }
880 EXPORT_SYMBOL(ib_find_cached_gid);
881
882 int ib_find_gid_by_filter(struct ib_device *device,
883 const union ib_gid *gid,
884 u8 port_num,
885 bool (*filter)(const union ib_gid *gid,
886 const struct ib_gid_attr *,
887 void *),
888 void *context, u16 *index)
889 {
890 /* Only RoCE GID table supports filter function */
891 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
892 return -EPROTONOSUPPORT;
893
894 return ib_cache_gid_find_by_filter(device, gid,
895 port_num, filter,
896 context, index);
897 }
898 EXPORT_SYMBOL(ib_find_gid_by_filter);
899
900 int ib_get_cached_pkey(struct ib_device *device,
901 u8 port_num,
902 int index,
903 u16 *pkey)
904 {
905 struct ib_pkey_cache *cache;
906 unsigned long flags;
907 int ret = 0;
908
909 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
910 return -EINVAL;
911
912 read_lock_irqsave(&device->cache.lock, flags);
913
914 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
915
916 if (index < 0 || index >= cache->table_len)
917 ret = -EINVAL;
918 else
919 *pkey = cache->table[index];
920
921 read_unlock_irqrestore(&device->cache.lock, flags);
922
923 return ret;
924 }
925 EXPORT_SYMBOL(ib_get_cached_pkey);
926
927 int ib_find_cached_pkey(struct ib_device *device,
928 u8 port_num,
929 u16 pkey,
930 u16 *index)
931 {
932 struct ib_pkey_cache *cache;
933 unsigned long flags;
934 int i;
935 int ret = -ENOENT;
936 int partial_ix = -1;
937
938 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
939 return -EINVAL;
940
941 read_lock_irqsave(&device->cache.lock, flags);
942
943 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
944
945 *index = -1;
946
947 for (i = 0; i < cache->table_len; ++i)
948 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
949 if (cache->table[i] & 0x8000) {
950 *index = i;
951 ret = 0;
952 break;
953 } else
954 partial_ix = i;
955 }
956
957 if (ret && partial_ix >= 0) {
958 *index = partial_ix;
959 ret = 0;
960 }
961
962 read_unlock_irqrestore(&device->cache.lock, flags);
963
964 return ret;
965 }
966 EXPORT_SYMBOL(ib_find_cached_pkey);
967
968 int ib_find_exact_cached_pkey(struct ib_device *device,
969 u8 port_num,
970 u16 pkey,
971 u16 *index)
972 {
973 struct ib_pkey_cache *cache;
974 unsigned long flags;
975 int i;
976 int ret = -ENOENT;
977
978 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
979 return -EINVAL;
980
981 read_lock_irqsave(&device->cache.lock, flags);
982
983 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
984
985 *index = -1;
986
987 for (i = 0; i < cache->table_len; ++i)
988 if (cache->table[i] == pkey) {
989 *index = i;
990 ret = 0;
991 break;
992 }
993
994 read_unlock_irqrestore(&device->cache.lock, flags);
995
996 return ret;
997 }
998 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
999
1000 int ib_get_cached_lmc(struct ib_device *device,
1001 u8 port_num,
1002 u8 *lmc)
1003 {
1004 unsigned long flags;
1005 int ret = 0;
1006
1007 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1008 return -EINVAL;
1009
1010 read_lock_irqsave(&device->cache.lock, flags);
1011 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1012 read_unlock_irqrestore(&device->cache.lock, flags);
1013
1014 return ret;
1015 }
1016 EXPORT_SYMBOL(ib_get_cached_lmc);
1017
1018 static void ib_cache_update(struct ib_device *device,
1019 u8 port)
1020 {
1021 struct ib_port_attr *tprops = NULL;
1022 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1023 struct ib_gid_cache {
1024 int table_len;
1025 union ib_gid table[0];
1026 } *gid_cache = NULL;
1027 int i;
1028 int ret;
1029 struct ib_gid_table *table;
1030 struct ib_gid_table **ports_table = device->cache.gid_cache;
1031 bool use_roce_gid_table =
1032 rdma_cap_roce_gid_table(device, port);
1033
1034 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1035 return;
1036
1037 table = ports_table[port - rdma_start_port(device)];
1038
1039 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1040 if (!tprops)
1041 return;
1042
1043 ret = ib_query_port(device, port, tprops);
1044 if (ret) {
1045 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
1046 ret, device->name);
1047 goto err;
1048 }
1049
1050 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1051 sizeof *pkey_cache->table, GFP_KERNEL);
1052 if (!pkey_cache)
1053 goto err;
1054
1055 pkey_cache->table_len = tprops->pkey_tbl_len;
1056
1057 if (!use_roce_gid_table) {
1058 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1059 sizeof(*gid_cache->table), GFP_KERNEL);
1060 if (!gid_cache)
1061 goto err;
1062
1063 gid_cache->table_len = tprops->gid_tbl_len;
1064 }
1065
1066 for (i = 0; i < pkey_cache->table_len; ++i) {
1067 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1068 if (ret) {
1069 printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
1070 ret, device->name, i);
1071 goto err;
1072 }
1073 }
1074
1075 if (!use_roce_gid_table) {
1076 for (i = 0; i < gid_cache->table_len; ++i) {
1077 ret = ib_query_gid(device, port, i,
1078 gid_cache->table + i, NULL);
1079 if (ret) {
1080 printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
1081 ret, device->name, i);
1082 goto err;
1083 }
1084 }
1085 }
1086
1087 write_lock_irq(&device->cache.lock);
1088
1089 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1090
1091 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1092 if (!use_roce_gid_table) {
1093 write_lock(&table->rwlock);
1094 for (i = 0; i < gid_cache->table_len; i++) {
1095 modify_gid(device, port, table, i, gid_cache->table + i,
1096 &zattr, false);
1097 }
1098 write_unlock(&table->rwlock);
1099 }
1100
1101 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1102
1103 write_unlock_irq(&device->cache.lock);
1104
1105 kfree(gid_cache);
1106 kfree(old_pkey_cache);
1107 kfree(tprops);
1108 return;
1109
1110 err:
1111 kfree(pkey_cache);
1112 kfree(gid_cache);
1113 kfree(tprops);
1114 }
1115
1116 static void ib_cache_task(struct work_struct *_work)
1117 {
1118 struct ib_update_work *work =
1119 container_of(_work, struct ib_update_work, work);
1120
1121 ib_cache_update(work->device, work->port_num);
1122 kfree(work);
1123 }
1124
1125 static void ib_cache_event(struct ib_event_handler *handler,
1126 struct ib_event *event)
1127 {
1128 struct ib_update_work *work;
1129
1130 if (event->event == IB_EVENT_PORT_ERR ||
1131 event->event == IB_EVENT_PORT_ACTIVE ||
1132 event->event == IB_EVENT_LID_CHANGE ||
1133 event->event == IB_EVENT_PKEY_CHANGE ||
1134 event->event == IB_EVENT_SM_CHANGE ||
1135 event->event == IB_EVENT_CLIENT_REREGISTER ||
1136 event->event == IB_EVENT_GID_CHANGE) {
1137 work = kmalloc(sizeof *work, GFP_ATOMIC);
1138 if (work) {
1139 INIT_WORK(&work->work, ib_cache_task);
1140 work->device = event->device;
1141 work->port_num = event->element.port_num;
1142 queue_work(ib_wq, &work->work);
1143 }
1144 }
1145 }
1146
1147 int ib_cache_setup_one(struct ib_device *device)
1148 {
1149 int p;
1150 int err;
1151
1152 rwlock_init(&device->cache.lock);
1153
1154 device->cache.pkey_cache =
1155 kzalloc(sizeof *device->cache.pkey_cache *
1156 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1157 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1158 (rdma_end_port(device) -
1159 rdma_start_port(device) + 1),
1160 GFP_KERNEL);
1161 if (!device->cache.pkey_cache ||
1162 !device->cache.lmc_cache) {
1163 printk(KERN_WARNING "Couldn't allocate cache "
1164 "for %s\n", device->name);
1165 return -ENOMEM;
1166 }
1167
1168 err = gid_table_setup_one(device);
1169 if (err)
1170 /* Allocated memory will be cleaned in the release function */
1171 return err;
1172
1173 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1174 ib_cache_update(device, p + rdma_start_port(device));
1175
1176 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1177 device, ib_cache_event);
1178 err = ib_register_event_handler(&device->cache.event_handler);
1179 if (err)
1180 goto err;
1181
1182 return 0;
1183
1184 err:
1185 gid_table_cleanup_one(device);
1186 return err;
1187 }
1188
1189 void ib_cache_release_one(struct ib_device *device)
1190 {
1191 int p;
1192
1193 /*
1194 * The release function frees all the cache elements.
1195 * This function should be called as part of freeing
1196 * all the device's resources when the cache could no
1197 * longer be accessed.
1198 */
1199 if (device->cache.pkey_cache)
1200 for (p = 0;
1201 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1202 kfree(device->cache.pkey_cache[p]);
1203
1204 gid_table_release_one(device);
1205 kfree(device->cache.pkey_cache);
1206 kfree(device->cache.lmc_cache);
1207 }
1208
1209 void ib_cache_cleanup_one(struct ib_device *device)
1210 {
1211 /* The cleanup function unregisters the event handler,
1212 * waits for all in-progress workqueue elements and cleans
1213 * up the GID cache. This function should be called after
1214 * the device was removed from the devices list and all
1215 * clients were removed, so the cache exists but is
1216 * non-functional and shouldn't be updated anymore.
1217 */
1218 ib_unregister_event_handler(&device->cache.event_handler);
1219 flush_workqueue(ib_wq);
1220 gid_table_cleanup_one(device);
1221 }
1222
1223 void __init ib_cache_setup(void)
1224 {
1225 roce_gid_mgmt_init();
1226 }
1227
1228 void __exit ib_cache_cleanup(void)
1229 {
1230 roce_gid_mgmt_cleanup();
1231 }
This page took 0.058223 seconds and 5 git commands to generate.