Merge tag 'iio-fixes-for-4.7b' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[deliverable/linux.git] / drivers / infiniband / core / cache.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50 };
51
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
68 };
69
70 enum gid_table_entry_props {
71 GID_TABLE_ENTRY_INVALID = 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
73 };
74
75 enum gid_table_write_action {
76 GID_TABLE_WRITE_ACTION_ADD,
77 GID_TABLE_WRITE_ACTION_DEL,
78 /* MODIFY only updates the GID table. Currently only used by
79 * ib_cache_update.
80 */
81 GID_TABLE_WRITE_ACTION_MODIFY
82 };
83
84 struct ib_gid_table_entry {
85 unsigned long props;
86 union ib_gid gid;
87 struct ib_gid_attr attr;
88 void *context;
89 };
90
91 struct ib_gid_table {
92 int sz;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
97 *
98 * Delete requires different set of operations:
99 * (a) Find the GID
100 * (b) Delete it.
101 *
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
107 **/
108 struct mutex lock;
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
111 */
112 rwlock_t rwlock;
113 struct ib_gid_table_entry *data_vec;
114 };
115
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118 if (rdma_cap_roce_gid_table(ib_dev, port)) {
119 struct ib_event event;
120
121 event.device = ib_dev;
122 event.element.port_num = port;
123 event.event = IB_EVENT_GID_CHANGE;
124
125 ib_dispatch_event(&event);
126 }
127 }
128
129 static const char * const gid_type_str[] = {
130 [IB_GID_TYPE_IB] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
132 };
133
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137 return gid_type_str[gid_type];
138
139 return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145 unsigned int i;
146 size_t len;
147 int err = -EINVAL;
148
149 len = strlen(buf);
150 if (len == 0)
151 return -EINVAL;
152
153 if (buf[len - 1] == '\n')
154 len--;
155
156 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158 len == strlen(gid_type_str[i])) {
159 err = i;
160 break;
161 }
162
163 return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
169 * scenarios.
170 */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172 struct ib_gid_table *table, int ix,
173 const union ib_gid *gid,
174 const struct ib_gid_attr *attr,
175 enum gid_table_write_action action,
176 bool default_gid)
177 __releases(&table->rwlock) __acquires(&table->rwlock)
178 {
179 int ret = 0;
180 struct net_device *old_net_dev;
181 enum ib_gid_type old_gid_type;
182
183 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
184 * sleep-able lock.
185 */
186
187 if (rdma_cap_roce_gid_table(ib_dev, port)) {
188 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
189 write_unlock_irq(&table->rwlock);
190 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191 * RoCE providers and thus only updates the cache.
192 */
193 if (action == GID_TABLE_WRITE_ACTION_ADD)
194 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
195 &table->data_vec[ix].context);
196 else if (action == GID_TABLE_WRITE_ACTION_DEL)
197 ret = ib_dev->del_gid(ib_dev, port, ix,
198 &table->data_vec[ix].context);
199 write_lock_irq(&table->rwlock);
200 }
201
202 old_net_dev = table->data_vec[ix].attr.ndev;
203 old_gid_type = table->data_vec[ix].attr.gid_type;
204 if (old_net_dev && old_net_dev != attr->ndev)
205 dev_put(old_net_dev);
206 /* if modify_gid failed, just delete the old gid */
207 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
208 gid = &zgid;
209 attr = &zattr;
210 table->data_vec[ix].context = NULL;
211 }
212
213 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
214 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
215 if (default_gid) {
216 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217 if (action == GID_TABLE_WRITE_ACTION_DEL)
218 table->data_vec[ix].attr.gid_type = old_gid_type;
219 }
220 if (table->data_vec[ix].attr.ndev &&
221 table->data_vec[ix].attr.ndev != old_net_dev)
222 dev_hold(table->data_vec[ix].attr.ndev);
223
224 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
225
226 return ret;
227 }
228
229 static int add_gid(struct ib_device *ib_dev, u8 port,
230 struct ib_gid_table *table, int ix,
231 const union ib_gid *gid,
232 const struct ib_gid_attr *attr,
233 bool default_gid) {
234 return write_gid(ib_dev, port, table, ix, gid, attr,
235 GID_TABLE_WRITE_ACTION_ADD, default_gid);
236 }
237
238 static int modify_gid(struct ib_device *ib_dev, u8 port,
239 struct ib_gid_table *table, int ix,
240 const union ib_gid *gid,
241 const struct ib_gid_attr *attr,
242 bool default_gid) {
243 return write_gid(ib_dev, port, table, ix, gid, attr,
244 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
245 }
246
247 static int del_gid(struct ib_device *ib_dev, u8 port,
248 struct ib_gid_table *table, int ix,
249 bool default_gid) {
250 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
251 GID_TABLE_WRITE_ACTION_DEL, default_gid);
252 }
253
254 /* rwlock should be read locked */
255 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
256 const struct ib_gid_attr *val, bool default_gid,
257 unsigned long mask, int *pempty)
258 {
259 int i = 0;
260 int found = -1;
261 int empty = pempty ? -1 : 0;
262
263 while (i < table->sz && (found < 0 || empty < 0)) {
264 struct ib_gid_table_entry *data = &table->data_vec[i];
265 struct ib_gid_attr *attr = &data->attr;
266 int curr_index = i;
267
268 i++;
269
270 if (data->props & GID_TABLE_ENTRY_INVALID)
271 continue;
272
273 if (empty < 0)
274 if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
275 !memcmp(attr, &zattr, sizeof(*attr)) &&
276 !data->props)
277 empty = curr_index;
278
279 if (found >= 0)
280 continue;
281
282 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
283 attr->gid_type != val->gid_type)
284 continue;
285
286 if (mask & GID_ATTR_FIND_MASK_GID &&
287 memcmp(gid, &data->gid, sizeof(*gid)))
288 continue;
289
290 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
291 attr->ndev != val->ndev)
292 continue;
293
294 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
295 !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
296 default_gid)
297 continue;
298
299 found = curr_index;
300 }
301
302 if (pempty)
303 *pempty = empty;
304
305 return found;
306 }
307
308 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
309 {
310 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
311 addrconf_ifid_eui48(&gid->raw[8], dev);
312 }
313
314 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
315 union ib_gid *gid, struct ib_gid_attr *attr)
316 {
317 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
318 struct ib_gid_table *table;
319 int ix;
320 int ret = 0;
321 struct net_device *idev;
322 int empty;
323
324 table = ports_table[port - rdma_start_port(ib_dev)];
325
326 if (!memcmp(gid, &zgid, sizeof(*gid)))
327 return -EINVAL;
328
329 if (ib_dev->get_netdev) {
330 idev = ib_dev->get_netdev(ib_dev, port);
331 if (idev && attr->ndev != idev) {
332 union ib_gid default_gid;
333
334 /* Adding default GIDs in not permitted */
335 make_default_gid(idev, &default_gid);
336 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
337 dev_put(idev);
338 return -EPERM;
339 }
340 }
341 if (idev)
342 dev_put(idev);
343 }
344
345 mutex_lock(&table->lock);
346 write_lock_irq(&table->rwlock);
347
348 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
349 GID_ATTR_FIND_MASK_GID_TYPE |
350 GID_ATTR_FIND_MASK_NETDEV, &empty);
351 if (ix >= 0)
352 goto out_unlock;
353
354 if (empty < 0) {
355 ret = -ENOSPC;
356 goto out_unlock;
357 }
358
359 ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
360 if (!ret)
361 dispatch_gid_change_event(ib_dev, port);
362
363 out_unlock:
364 write_unlock_irq(&table->rwlock);
365 mutex_unlock(&table->lock);
366 return ret;
367 }
368
369 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
370 union ib_gid *gid, struct ib_gid_attr *attr)
371 {
372 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
373 struct ib_gid_table *table;
374 int ix;
375
376 table = ports_table[port - rdma_start_port(ib_dev)];
377
378 mutex_lock(&table->lock);
379 write_lock_irq(&table->rwlock);
380
381 ix = find_gid(table, gid, attr, false,
382 GID_ATTR_FIND_MASK_GID |
383 GID_ATTR_FIND_MASK_GID_TYPE |
384 GID_ATTR_FIND_MASK_NETDEV |
385 GID_ATTR_FIND_MASK_DEFAULT,
386 NULL);
387 if (ix < 0)
388 goto out_unlock;
389
390 if (!del_gid(ib_dev, port, table, ix, false))
391 dispatch_gid_change_event(ib_dev, port);
392
393 out_unlock:
394 write_unlock_irq(&table->rwlock);
395 mutex_unlock(&table->lock);
396 return 0;
397 }
398
399 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
400 struct net_device *ndev)
401 {
402 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
403 struct ib_gid_table *table;
404 int ix;
405 bool deleted = false;
406
407 table = ports_table[port - rdma_start_port(ib_dev)];
408
409 mutex_lock(&table->lock);
410 write_lock_irq(&table->rwlock);
411
412 for (ix = 0; ix < table->sz; ix++)
413 if (table->data_vec[ix].attr.ndev == ndev)
414 if (!del_gid(ib_dev, port, table, ix, false))
415 deleted = true;
416
417 write_unlock_irq(&table->rwlock);
418 mutex_unlock(&table->lock);
419
420 if (deleted)
421 dispatch_gid_change_event(ib_dev, port);
422
423 return 0;
424 }
425
426 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
427 union ib_gid *gid, struct ib_gid_attr *attr)
428 {
429 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
430 struct ib_gid_table *table;
431
432 table = ports_table[port - rdma_start_port(ib_dev)];
433
434 if (index < 0 || index >= table->sz)
435 return -EINVAL;
436
437 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
438 return -EAGAIN;
439
440 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
441 if (attr) {
442 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
443 if (attr->ndev)
444 dev_hold(attr->ndev);
445 }
446
447 return 0;
448 }
449
450 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
451 const union ib_gid *gid,
452 const struct ib_gid_attr *val,
453 unsigned long mask,
454 u8 *port, u16 *index)
455 {
456 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
457 struct ib_gid_table *table;
458 u8 p;
459 int local_index;
460 unsigned long flags;
461
462 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
463 table = ports_table[p];
464 read_lock_irqsave(&table->rwlock, flags);
465 local_index = find_gid(table, gid, val, false, mask, NULL);
466 if (local_index >= 0) {
467 if (index)
468 *index = local_index;
469 if (port)
470 *port = p + rdma_start_port(ib_dev);
471 read_unlock_irqrestore(&table->rwlock, flags);
472 return 0;
473 }
474 read_unlock_irqrestore(&table->rwlock, flags);
475 }
476
477 return -ENOENT;
478 }
479
480 static int ib_cache_gid_find(struct ib_device *ib_dev,
481 const union ib_gid *gid,
482 enum ib_gid_type gid_type,
483 struct net_device *ndev, u8 *port,
484 u16 *index)
485 {
486 unsigned long mask = GID_ATTR_FIND_MASK_GID |
487 GID_ATTR_FIND_MASK_GID_TYPE;
488 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
489
490 if (ndev)
491 mask |= GID_ATTR_FIND_MASK_NETDEV;
492
493 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
494 mask, port, index);
495 }
496
497 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
498 const union ib_gid *gid,
499 enum ib_gid_type gid_type,
500 u8 port, struct net_device *ndev,
501 u16 *index)
502 {
503 int local_index;
504 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
505 struct ib_gid_table *table;
506 unsigned long mask = GID_ATTR_FIND_MASK_GID |
507 GID_ATTR_FIND_MASK_GID_TYPE;
508 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
509 unsigned long flags;
510
511 if (port < rdma_start_port(ib_dev) ||
512 port > rdma_end_port(ib_dev))
513 return -ENOENT;
514
515 table = ports_table[port - rdma_start_port(ib_dev)];
516
517 if (ndev)
518 mask |= GID_ATTR_FIND_MASK_NETDEV;
519
520 read_lock_irqsave(&table->rwlock, flags);
521 local_index = find_gid(table, gid, &val, false, mask, NULL);
522 if (local_index >= 0) {
523 if (index)
524 *index = local_index;
525 read_unlock_irqrestore(&table->rwlock, flags);
526 return 0;
527 }
528
529 read_unlock_irqrestore(&table->rwlock, flags);
530 return -ENOENT;
531 }
532 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
533
534 /**
535 * ib_find_gid_by_filter - Returns the GID table index where a specified
536 * GID value occurs
537 * @device: The device to query.
538 * @gid: The GID value to search for.
539 * @port_num: The port number of the device where the GID value could be
540 * searched.
541 * @filter: The filter function is executed on any matching GID in the table.
542 * If the filter function returns true, the corresponding index is returned,
543 * otherwise, we continue searching the GID table. It's guaranteed that
544 * while filter is executed, ndev field is valid and the structure won't
545 * change. filter is executed in an atomic context. filter must not be NULL.
546 * @index: The index into the cached GID table where the GID was found. This
547 * parameter may be NULL.
548 *
549 * ib_cache_gid_find_by_filter() searches for the specified GID value
550 * of which the filter function returns true in the port's GID table.
551 * This function is only supported on RoCE ports.
552 *
553 */
554 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
555 const union ib_gid *gid,
556 u8 port,
557 bool (*filter)(const union ib_gid *,
558 const struct ib_gid_attr *,
559 void *),
560 void *context,
561 u16 *index)
562 {
563 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
564 struct ib_gid_table *table;
565 unsigned int i;
566 unsigned long flags;
567 bool found = false;
568
569 if (!ports_table)
570 return -EOPNOTSUPP;
571
572 if (port < rdma_start_port(ib_dev) ||
573 port > rdma_end_port(ib_dev) ||
574 !rdma_protocol_roce(ib_dev, port))
575 return -EPROTONOSUPPORT;
576
577 table = ports_table[port - rdma_start_port(ib_dev)];
578
579 read_lock_irqsave(&table->rwlock, flags);
580 for (i = 0; i < table->sz; i++) {
581 struct ib_gid_attr attr;
582
583 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
584 goto next;
585
586 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
587 goto next;
588
589 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
590
591 if (filter(gid, &attr, context))
592 found = true;
593
594 next:
595 if (found)
596 break;
597 }
598 read_unlock_irqrestore(&table->rwlock, flags);
599
600 if (!found)
601 return -ENOENT;
602
603 if (index)
604 *index = i;
605 return 0;
606 }
607
608 static struct ib_gid_table *alloc_gid_table(int sz)
609 {
610 struct ib_gid_table *table =
611 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
612
613 if (!table)
614 return NULL;
615
616 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
617 if (!table->data_vec)
618 goto err_free_table;
619
620 mutex_init(&table->lock);
621
622 table->sz = sz;
623 rwlock_init(&table->rwlock);
624
625 return table;
626
627 err_free_table:
628 kfree(table);
629 return NULL;
630 }
631
632 static void release_gid_table(struct ib_gid_table *table)
633 {
634 if (table) {
635 kfree(table->data_vec);
636 kfree(table);
637 }
638 }
639
640 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
641 struct ib_gid_table *table)
642 {
643 int i;
644 bool deleted = false;
645
646 if (!table)
647 return;
648
649 write_lock_irq(&table->rwlock);
650 for (i = 0; i < table->sz; ++i) {
651 if (memcmp(&table->data_vec[i].gid, &zgid,
652 sizeof(table->data_vec[i].gid)))
653 if (!del_gid(ib_dev, port, table, i,
654 table->data_vec[i].props &
655 GID_ATTR_FIND_MASK_DEFAULT))
656 deleted = true;
657 }
658 write_unlock_irq(&table->rwlock);
659
660 if (deleted)
661 dispatch_gid_change_event(ib_dev, port);
662 }
663
664 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
665 struct net_device *ndev,
666 unsigned long gid_type_mask,
667 enum ib_cache_gid_default_mode mode)
668 {
669 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
670 union ib_gid gid;
671 struct ib_gid_attr gid_attr;
672 struct ib_gid_attr zattr_type = zattr;
673 struct ib_gid_table *table;
674 unsigned int gid_type;
675
676 table = ports_table[port - rdma_start_port(ib_dev)];
677
678 make_default_gid(ndev, &gid);
679 memset(&gid_attr, 0, sizeof(gid_attr));
680 gid_attr.ndev = ndev;
681
682 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
683 int ix;
684 union ib_gid current_gid;
685 struct ib_gid_attr current_gid_attr = {};
686
687 if (1UL << gid_type & ~gid_type_mask)
688 continue;
689
690 gid_attr.gid_type = gid_type;
691
692 mutex_lock(&table->lock);
693 write_lock_irq(&table->rwlock);
694 ix = find_gid(table, NULL, &gid_attr, true,
695 GID_ATTR_FIND_MASK_GID_TYPE |
696 GID_ATTR_FIND_MASK_DEFAULT,
697 NULL);
698
699 /* Coudn't find default GID location */
700 if (WARN_ON(ix < 0))
701 goto release;
702
703 zattr_type.gid_type = gid_type;
704
705 if (!__ib_cache_gid_get(ib_dev, port, ix,
706 &current_gid, &current_gid_attr) &&
707 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
708 !memcmp(&gid, &current_gid, sizeof(gid)) &&
709 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
710 goto release;
711
712 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
713 memcmp(&current_gid_attr, &zattr_type,
714 sizeof(current_gid_attr))) {
715 if (del_gid(ib_dev, port, table, ix, true)) {
716 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
717 ix, gid.raw);
718 goto release;
719 } else {
720 dispatch_gid_change_event(ib_dev, port);
721 }
722 }
723
724 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
725 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
726 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
727 gid.raw);
728 else
729 dispatch_gid_change_event(ib_dev, port);
730 }
731
732 release:
733 if (current_gid_attr.ndev)
734 dev_put(current_gid_attr.ndev);
735 write_unlock_irq(&table->rwlock);
736 mutex_unlock(&table->lock);
737 }
738 }
739
740 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
741 struct ib_gid_table *table)
742 {
743 unsigned int i;
744 unsigned long roce_gid_type_mask;
745 unsigned int num_default_gids;
746 unsigned int current_gid = 0;
747
748 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
749 num_default_gids = hweight_long(roce_gid_type_mask);
750 for (i = 0; i < num_default_gids && i < table->sz; i++) {
751 struct ib_gid_table_entry *entry =
752 &table->data_vec[i];
753
754 entry->props |= GID_TABLE_ENTRY_DEFAULT;
755 current_gid = find_next_bit(&roce_gid_type_mask,
756 BITS_PER_LONG,
757 current_gid);
758 entry->attr.gid_type = current_gid++;
759 }
760
761 return 0;
762 }
763
764 static int _gid_table_setup_one(struct ib_device *ib_dev)
765 {
766 u8 port;
767 struct ib_gid_table **table;
768 int err = 0;
769
770 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
771
772 if (!table) {
773 pr_warn("failed to allocate ib gid cache for %s\n",
774 ib_dev->name);
775 return -ENOMEM;
776 }
777
778 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
779 u8 rdma_port = port + rdma_start_port(ib_dev);
780
781 table[port] =
782 alloc_gid_table(
783 ib_dev->port_immutable[rdma_port].gid_tbl_len);
784 if (!table[port]) {
785 err = -ENOMEM;
786 goto rollback_table_setup;
787 }
788
789 err = gid_table_reserve_default(ib_dev,
790 port + rdma_start_port(ib_dev),
791 table[port]);
792 if (err)
793 goto rollback_table_setup;
794 }
795
796 ib_dev->cache.gid_cache = table;
797 return 0;
798
799 rollback_table_setup:
800 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
801 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
802 table[port]);
803 release_gid_table(table[port]);
804 }
805
806 kfree(table);
807 return err;
808 }
809
810 static void gid_table_release_one(struct ib_device *ib_dev)
811 {
812 struct ib_gid_table **table = ib_dev->cache.gid_cache;
813 u8 port;
814
815 if (!table)
816 return;
817
818 for (port = 0; port < ib_dev->phys_port_cnt; port++)
819 release_gid_table(table[port]);
820
821 kfree(table);
822 ib_dev->cache.gid_cache = NULL;
823 }
824
825 static void gid_table_cleanup_one(struct ib_device *ib_dev)
826 {
827 struct ib_gid_table **table = ib_dev->cache.gid_cache;
828 u8 port;
829
830 if (!table)
831 return;
832
833 for (port = 0; port < ib_dev->phys_port_cnt; port++)
834 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
835 table[port]);
836 }
837
838 static int gid_table_setup_one(struct ib_device *ib_dev)
839 {
840 int err;
841
842 err = _gid_table_setup_one(ib_dev);
843
844 if (err)
845 return err;
846
847 err = roce_rescan_device(ib_dev);
848
849 if (err) {
850 gid_table_cleanup_one(ib_dev);
851 gid_table_release_one(ib_dev);
852 }
853
854 return err;
855 }
856
857 int ib_get_cached_gid(struct ib_device *device,
858 u8 port_num,
859 int index,
860 union ib_gid *gid,
861 struct ib_gid_attr *gid_attr)
862 {
863 int res;
864 unsigned long flags;
865 struct ib_gid_table **ports_table = device->cache.gid_cache;
866 struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
867
868 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
869 return -EINVAL;
870
871 read_lock_irqsave(&table->rwlock, flags);
872 res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
873 read_unlock_irqrestore(&table->rwlock, flags);
874
875 return res;
876 }
877 EXPORT_SYMBOL(ib_get_cached_gid);
878
879 int ib_find_cached_gid(struct ib_device *device,
880 const union ib_gid *gid,
881 enum ib_gid_type gid_type,
882 struct net_device *ndev,
883 u8 *port_num,
884 u16 *index)
885 {
886 return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
887 }
888 EXPORT_SYMBOL(ib_find_cached_gid);
889
890 int ib_find_gid_by_filter(struct ib_device *device,
891 const union ib_gid *gid,
892 u8 port_num,
893 bool (*filter)(const union ib_gid *gid,
894 const struct ib_gid_attr *,
895 void *),
896 void *context, u16 *index)
897 {
898 /* Only RoCE GID table supports filter function */
899 if (!rdma_cap_roce_gid_table(device, port_num) && filter)
900 return -EPROTONOSUPPORT;
901
902 return ib_cache_gid_find_by_filter(device, gid,
903 port_num, filter,
904 context, index);
905 }
906 EXPORT_SYMBOL(ib_find_gid_by_filter);
907
908 int ib_get_cached_pkey(struct ib_device *device,
909 u8 port_num,
910 int index,
911 u16 *pkey)
912 {
913 struct ib_pkey_cache *cache;
914 unsigned long flags;
915 int ret = 0;
916
917 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
918 return -EINVAL;
919
920 read_lock_irqsave(&device->cache.lock, flags);
921
922 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
923
924 if (index < 0 || index >= cache->table_len)
925 ret = -EINVAL;
926 else
927 *pkey = cache->table[index];
928
929 read_unlock_irqrestore(&device->cache.lock, flags);
930
931 return ret;
932 }
933 EXPORT_SYMBOL(ib_get_cached_pkey);
934
935 int ib_find_cached_pkey(struct ib_device *device,
936 u8 port_num,
937 u16 pkey,
938 u16 *index)
939 {
940 struct ib_pkey_cache *cache;
941 unsigned long flags;
942 int i;
943 int ret = -ENOENT;
944 int partial_ix = -1;
945
946 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
947 return -EINVAL;
948
949 read_lock_irqsave(&device->cache.lock, flags);
950
951 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
952
953 *index = -1;
954
955 for (i = 0; i < cache->table_len; ++i)
956 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
957 if (cache->table[i] & 0x8000) {
958 *index = i;
959 ret = 0;
960 break;
961 } else
962 partial_ix = i;
963 }
964
965 if (ret && partial_ix >= 0) {
966 *index = partial_ix;
967 ret = 0;
968 }
969
970 read_unlock_irqrestore(&device->cache.lock, flags);
971
972 return ret;
973 }
974 EXPORT_SYMBOL(ib_find_cached_pkey);
975
976 int ib_find_exact_cached_pkey(struct ib_device *device,
977 u8 port_num,
978 u16 pkey,
979 u16 *index)
980 {
981 struct ib_pkey_cache *cache;
982 unsigned long flags;
983 int i;
984 int ret = -ENOENT;
985
986 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
987 return -EINVAL;
988
989 read_lock_irqsave(&device->cache.lock, flags);
990
991 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
992
993 *index = -1;
994
995 for (i = 0; i < cache->table_len; ++i)
996 if (cache->table[i] == pkey) {
997 *index = i;
998 ret = 0;
999 break;
1000 }
1001
1002 read_unlock_irqrestore(&device->cache.lock, flags);
1003
1004 return ret;
1005 }
1006 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1007
1008 int ib_get_cached_lmc(struct ib_device *device,
1009 u8 port_num,
1010 u8 *lmc)
1011 {
1012 unsigned long flags;
1013 int ret = 0;
1014
1015 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1016 return -EINVAL;
1017
1018 read_lock_irqsave(&device->cache.lock, flags);
1019 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
1020 read_unlock_irqrestore(&device->cache.lock, flags);
1021
1022 return ret;
1023 }
1024 EXPORT_SYMBOL(ib_get_cached_lmc);
1025
1026 static void ib_cache_update(struct ib_device *device,
1027 u8 port)
1028 {
1029 struct ib_port_attr *tprops = NULL;
1030 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
1031 struct ib_gid_cache {
1032 int table_len;
1033 union ib_gid table[0];
1034 } *gid_cache = NULL;
1035 int i;
1036 int ret;
1037 struct ib_gid_table *table;
1038 struct ib_gid_table **ports_table = device->cache.gid_cache;
1039 bool use_roce_gid_table =
1040 rdma_cap_roce_gid_table(device, port);
1041
1042 if (port < rdma_start_port(device) || port > rdma_end_port(device))
1043 return;
1044
1045 table = ports_table[port - rdma_start_port(device)];
1046
1047 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1048 if (!tprops)
1049 return;
1050
1051 ret = ib_query_port(device, port, tprops);
1052 if (ret) {
1053 pr_warn("ib_query_port failed (%d) for %s\n",
1054 ret, device->name);
1055 goto err;
1056 }
1057
1058 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1059 sizeof *pkey_cache->table, GFP_KERNEL);
1060 if (!pkey_cache)
1061 goto err;
1062
1063 pkey_cache->table_len = tprops->pkey_tbl_len;
1064
1065 if (!use_roce_gid_table) {
1066 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1067 sizeof(*gid_cache->table), GFP_KERNEL);
1068 if (!gid_cache)
1069 goto err;
1070
1071 gid_cache->table_len = tprops->gid_tbl_len;
1072 }
1073
1074 for (i = 0; i < pkey_cache->table_len; ++i) {
1075 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1076 if (ret) {
1077 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1078 ret, device->name, i);
1079 goto err;
1080 }
1081 }
1082
1083 if (!use_roce_gid_table) {
1084 for (i = 0; i < gid_cache->table_len; ++i) {
1085 ret = ib_query_gid(device, port, i,
1086 gid_cache->table + i, NULL);
1087 if (ret) {
1088 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1089 ret, device->name, i);
1090 goto err;
1091 }
1092 }
1093 }
1094
1095 write_lock_irq(&device->cache.lock);
1096
1097 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
1098
1099 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1100 if (!use_roce_gid_table) {
1101 write_lock(&table->rwlock);
1102 for (i = 0; i < gid_cache->table_len; i++) {
1103 modify_gid(device, port, table, i, gid_cache->table + i,
1104 &zattr, false);
1105 }
1106 write_unlock(&table->rwlock);
1107 }
1108
1109 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
1110
1111 write_unlock_irq(&device->cache.lock);
1112
1113 kfree(gid_cache);
1114 kfree(old_pkey_cache);
1115 kfree(tprops);
1116 return;
1117
1118 err:
1119 kfree(pkey_cache);
1120 kfree(gid_cache);
1121 kfree(tprops);
1122 }
1123
1124 static void ib_cache_task(struct work_struct *_work)
1125 {
1126 struct ib_update_work *work =
1127 container_of(_work, struct ib_update_work, work);
1128
1129 ib_cache_update(work->device, work->port_num);
1130 kfree(work);
1131 }
1132
1133 static void ib_cache_event(struct ib_event_handler *handler,
1134 struct ib_event *event)
1135 {
1136 struct ib_update_work *work;
1137
1138 if (event->event == IB_EVENT_PORT_ERR ||
1139 event->event == IB_EVENT_PORT_ACTIVE ||
1140 event->event == IB_EVENT_LID_CHANGE ||
1141 event->event == IB_EVENT_PKEY_CHANGE ||
1142 event->event == IB_EVENT_SM_CHANGE ||
1143 event->event == IB_EVENT_CLIENT_REREGISTER ||
1144 event->event == IB_EVENT_GID_CHANGE) {
1145 work = kmalloc(sizeof *work, GFP_ATOMIC);
1146 if (work) {
1147 INIT_WORK(&work->work, ib_cache_task);
1148 work->device = event->device;
1149 work->port_num = event->element.port_num;
1150 queue_work(ib_wq, &work->work);
1151 }
1152 }
1153 }
1154
1155 int ib_cache_setup_one(struct ib_device *device)
1156 {
1157 int p;
1158 int err;
1159
1160 rwlock_init(&device->cache.lock);
1161
1162 device->cache.pkey_cache =
1163 kzalloc(sizeof *device->cache.pkey_cache *
1164 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1165 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1166 (rdma_end_port(device) -
1167 rdma_start_port(device) + 1),
1168 GFP_KERNEL);
1169 if (!device->cache.pkey_cache ||
1170 !device->cache.lmc_cache) {
1171 pr_warn("Couldn't allocate cache for %s\n", device->name);
1172 return -ENOMEM;
1173 }
1174
1175 err = gid_table_setup_one(device);
1176 if (err)
1177 /* Allocated memory will be cleaned in the release function */
1178 return err;
1179
1180 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1181 ib_cache_update(device, p + rdma_start_port(device));
1182
1183 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1184 device, ib_cache_event);
1185 err = ib_register_event_handler(&device->cache.event_handler);
1186 if (err)
1187 goto err;
1188
1189 return 0;
1190
1191 err:
1192 gid_table_cleanup_one(device);
1193 return err;
1194 }
1195
1196 void ib_cache_release_one(struct ib_device *device)
1197 {
1198 int p;
1199
1200 /*
1201 * The release function frees all the cache elements.
1202 * This function should be called as part of freeing
1203 * all the device's resources when the cache could no
1204 * longer be accessed.
1205 */
1206 if (device->cache.pkey_cache)
1207 for (p = 0;
1208 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1209 kfree(device->cache.pkey_cache[p]);
1210
1211 gid_table_release_one(device);
1212 kfree(device->cache.pkey_cache);
1213 kfree(device->cache.lmc_cache);
1214 }
1215
1216 void ib_cache_cleanup_one(struct ib_device *device)
1217 {
1218 /* The cleanup function unregisters the event handler,
1219 * waits for all in-progress workqueue elements and cleans
1220 * up the GID cache. This function should be called after
1221 * the device was removed from the devices list and all
1222 * clients were removed, so the cache exists but is
1223 * non-functional and shouldn't be updated anymore.
1224 */
1225 ib_unregister_event_handler(&device->cache.event_handler);
1226 flush_workqueue(ib_wq);
1227 gid_table_cleanup_one(device);
1228 }
1229
1230 void __init ib_cache_setup(void)
1231 {
1232 roce_gid_mgmt_init();
1233 }
1234
1235 void __exit ib_cache_cleanup(void)
1236 {
1237 roce_gid_mgmt_cleanup();
1238 }
This page took 0.059815 seconds and 6 git commands to generate.