2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache
{
52 struct ib_update_work
{
53 struct work_struct work
;
54 struct ib_device
*device
;
61 static const struct ib_gid_attr zattr
;
63 enum gid_attr_find_mask
{
64 GID_ATTR_FIND_MASK_GID
= 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV
= 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT
= 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE
= 1UL << 3,
70 enum gid_table_entry_props
{
71 GID_TABLE_ENTRY_INVALID
= 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT
= 1UL << 1,
75 enum gid_table_write_action
{
76 GID_TABLE_WRITE_ACTION_ADD
,
77 GID_TABLE_WRITE_ACTION_DEL
,
78 /* MODIFY only updates the GID table. Currently only used by
81 GID_TABLE_WRITE_ACTION_MODIFY
84 struct ib_gid_table_entry
{
87 struct ib_gid_attr attr
;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
98 * Delete requires different set of operations:
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
113 struct ib_gid_table_entry
*data_vec
;
116 static void dispatch_gid_change_event(struct ib_device
*ib_dev
, u8 port
)
118 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
119 struct ib_event event
;
121 event
.device
= ib_dev
;
122 event
.element
.port_num
= port
;
123 event
.event
= IB_EVENT_GID_CHANGE
;
125 ib_dispatch_event(&event
);
129 static const char * const gid_type_str
[] = {
130 [IB_GID_TYPE_IB
] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP
] = "RoCE v2",
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type
)
136 if (gid_type
< ARRAY_SIZE(gid_type_str
) && gid_type_str
[gid_type
])
137 return gid_type_str
[gid_type
];
139 return "Invalid GID type";
141 EXPORT_SYMBOL(ib_cache_gid_type_str
);
143 int ib_cache_gid_parse_type_str(const char *buf
)
153 if (buf
[len
- 1] == '\n')
156 for (i
= 0; i
< ARRAY_SIZE(gid_type_str
); ++i
)
157 if (gid_type_str
[i
] && !strncmp(buf
, gid_type_str
[i
], len
) &&
158 len
== strlen(gid_type_str
[i
])) {
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str
);
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
171 static int write_gid(struct ib_device
*ib_dev
, u8 port
,
172 struct ib_gid_table
*table
, int ix
,
173 const union ib_gid
*gid
,
174 const struct ib_gid_attr
*attr
,
175 enum gid_table_write_action action
,
179 struct net_device
*old_net_dev
;
181 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
185 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
186 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_INVALID
;
187 write_unlock_irq(&table
->rwlock
);
188 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
189 * RoCE providers and thus only updates the cache.
191 if (action
== GID_TABLE_WRITE_ACTION_ADD
)
192 ret
= ib_dev
->add_gid(ib_dev
, port
, ix
, gid
, attr
,
193 &table
->data_vec
[ix
].context
);
194 else if (action
== GID_TABLE_WRITE_ACTION_DEL
)
195 ret
= ib_dev
->del_gid(ib_dev
, port
, ix
,
196 &table
->data_vec
[ix
].context
);
197 write_lock_irq(&table
->rwlock
);
200 old_net_dev
= table
->data_vec
[ix
].attr
.ndev
;
201 if (old_net_dev
&& old_net_dev
!= attr
->ndev
)
202 dev_put(old_net_dev
);
203 /* if modify_gid failed, just delete the old gid */
204 if (ret
|| action
== GID_TABLE_WRITE_ACTION_DEL
) {
207 table
->data_vec
[ix
].context
= NULL
;
210 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_DEFAULT
;
211 memcpy(&table
->data_vec
[ix
].gid
, gid
, sizeof(*gid
));
212 memcpy(&table
->data_vec
[ix
].attr
, attr
, sizeof(*attr
));
213 if (table
->data_vec
[ix
].attr
.ndev
&&
214 table
->data_vec
[ix
].attr
.ndev
!= old_net_dev
)
215 dev_hold(table
->data_vec
[ix
].attr
.ndev
);
217 table
->data_vec
[ix
].props
&= ~GID_TABLE_ENTRY_INVALID
;
222 static int add_gid(struct ib_device
*ib_dev
, u8 port
,
223 struct ib_gid_table
*table
, int ix
,
224 const union ib_gid
*gid
,
225 const struct ib_gid_attr
*attr
,
227 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
228 GID_TABLE_WRITE_ACTION_ADD
, default_gid
);
231 static int modify_gid(struct ib_device
*ib_dev
, u8 port
,
232 struct ib_gid_table
*table
, int ix
,
233 const union ib_gid
*gid
,
234 const struct ib_gid_attr
*attr
,
236 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
237 GID_TABLE_WRITE_ACTION_MODIFY
, default_gid
);
240 static int del_gid(struct ib_device
*ib_dev
, u8 port
,
241 struct ib_gid_table
*table
, int ix
,
243 return write_gid(ib_dev
, port
, table
, ix
, &zgid
, &zattr
,
244 GID_TABLE_WRITE_ACTION_DEL
, default_gid
);
247 /* rwlock should be read locked */
248 static int find_gid(struct ib_gid_table
*table
, const union ib_gid
*gid
,
249 const struct ib_gid_attr
*val
, bool default_gid
,
250 unsigned long mask
, int *pempty
)
254 int empty
= pempty
? -1 : 0;
256 while (i
< table
->sz
&& (found
< 0 || empty
< 0)) {
257 struct ib_gid_table_entry
*data
= &table
->data_vec
[i
];
258 struct ib_gid_attr
*attr
= &data
->attr
;
263 if (data
->props
& GID_TABLE_ENTRY_INVALID
)
267 if (!memcmp(&data
->gid
, &zgid
, sizeof(*gid
)) &&
268 !memcmp(attr
, &zattr
, sizeof(*attr
)) &&
275 if (mask
& GID_ATTR_FIND_MASK_GID_TYPE
&&
276 attr
->gid_type
!= val
->gid_type
)
279 if (mask
& GID_ATTR_FIND_MASK_GID
&&
280 memcmp(gid
, &data
->gid
, sizeof(*gid
)))
283 if (mask
& GID_ATTR_FIND_MASK_NETDEV
&&
284 attr
->ndev
!= val
->ndev
)
287 if (mask
& GID_ATTR_FIND_MASK_DEFAULT
&&
288 !!(data
->props
& GID_TABLE_ENTRY_DEFAULT
) !=
301 static void make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
303 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
304 addrconf_ifid_eui48(&gid
->raw
[8], dev
);
307 int ib_cache_gid_add(struct ib_device
*ib_dev
, u8 port
,
308 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
310 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
311 struct ib_gid_table
*table
;
314 struct net_device
*idev
;
317 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
319 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
322 if (ib_dev
->get_netdev
) {
323 idev
= ib_dev
->get_netdev(ib_dev
, port
);
324 if (idev
&& attr
->ndev
!= idev
) {
325 union ib_gid default_gid
;
327 /* Adding default GIDs in not permitted */
328 make_default_gid(idev
, &default_gid
);
329 if (!memcmp(gid
, &default_gid
, sizeof(*gid
))) {
338 mutex_lock(&table
->lock
);
339 write_lock_irq(&table
->rwlock
);
341 ix
= find_gid(table
, gid
, attr
, false, GID_ATTR_FIND_MASK_GID
|
342 GID_ATTR_FIND_MASK_GID_TYPE
|
343 GID_ATTR_FIND_MASK_NETDEV
, &empty
);
352 ret
= add_gid(ib_dev
, port
, table
, empty
, gid
, attr
, false);
354 dispatch_gid_change_event(ib_dev
, port
);
357 write_unlock_irq(&table
->rwlock
);
358 mutex_unlock(&table
->lock
);
362 int ib_cache_gid_del(struct ib_device
*ib_dev
, u8 port
,
363 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
365 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
366 struct ib_gid_table
*table
;
369 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
371 mutex_lock(&table
->lock
);
372 write_lock_irq(&table
->rwlock
);
374 ix
= find_gid(table
, gid
, attr
, false,
375 GID_ATTR_FIND_MASK_GID
|
376 GID_ATTR_FIND_MASK_GID_TYPE
|
377 GID_ATTR_FIND_MASK_NETDEV
|
378 GID_ATTR_FIND_MASK_DEFAULT
,
383 if (!del_gid(ib_dev
, port
, table
, ix
, false))
384 dispatch_gid_change_event(ib_dev
, port
);
387 write_unlock_irq(&table
->rwlock
);
388 mutex_unlock(&table
->lock
);
392 int ib_cache_gid_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
393 struct net_device
*ndev
)
395 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
396 struct ib_gid_table
*table
;
398 bool deleted
= false;
400 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
402 mutex_lock(&table
->lock
);
403 write_lock_irq(&table
->rwlock
);
405 for (ix
= 0; ix
< table
->sz
; ix
++)
406 if (table
->data_vec
[ix
].attr
.ndev
== ndev
)
407 if (!del_gid(ib_dev
, port
, table
, ix
, false))
410 write_unlock_irq(&table
->rwlock
);
411 mutex_unlock(&table
->lock
);
414 dispatch_gid_change_event(ib_dev
, port
);
419 static int __ib_cache_gid_get(struct ib_device
*ib_dev
, u8 port
, int index
,
420 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
422 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
423 struct ib_gid_table
*table
;
425 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
427 if (index
< 0 || index
>= table
->sz
)
430 if (table
->data_vec
[index
].props
& GID_TABLE_ENTRY_INVALID
)
433 memcpy(gid
, &table
->data_vec
[index
].gid
, sizeof(*gid
));
435 memcpy(attr
, &table
->data_vec
[index
].attr
, sizeof(*attr
));
437 dev_hold(attr
->ndev
);
443 static int _ib_cache_gid_table_find(struct ib_device
*ib_dev
,
444 const union ib_gid
*gid
,
445 const struct ib_gid_attr
*val
,
447 u8
*port
, u16
*index
)
449 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
450 struct ib_gid_table
*table
;
455 for (p
= 0; p
< ib_dev
->phys_port_cnt
; p
++) {
456 table
= ports_table
[p
];
457 read_lock_irqsave(&table
->rwlock
, flags
);
458 local_index
= find_gid(table
, gid
, val
, false, mask
, NULL
);
459 if (local_index
>= 0) {
461 *index
= local_index
;
463 *port
= p
+ rdma_start_port(ib_dev
);
464 read_unlock_irqrestore(&table
->rwlock
, flags
);
467 read_unlock_irqrestore(&table
->rwlock
, flags
);
473 static int ib_cache_gid_find(struct ib_device
*ib_dev
,
474 const union ib_gid
*gid
,
475 enum ib_gid_type gid_type
,
476 struct net_device
*ndev
, u8
*port
,
479 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
480 GID_ATTR_FIND_MASK_GID_TYPE
;
481 struct ib_gid_attr gid_attr_val
= {.ndev
= ndev
, .gid_type
= gid_type
};
484 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
486 return _ib_cache_gid_table_find(ib_dev
, gid
, &gid_attr_val
,
490 int ib_find_cached_gid_by_port(struct ib_device
*ib_dev
,
491 const union ib_gid
*gid
,
492 enum ib_gid_type gid_type
,
493 u8 port
, struct net_device
*ndev
,
497 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
498 struct ib_gid_table
*table
;
499 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
500 GID_ATTR_FIND_MASK_GID_TYPE
;
501 struct ib_gid_attr val
= {.ndev
= ndev
, .gid_type
= gid_type
};
504 if (port
< rdma_start_port(ib_dev
) ||
505 port
> rdma_end_port(ib_dev
))
508 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
511 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
513 read_lock_irqsave(&table
->rwlock
, flags
);
514 local_index
= find_gid(table
, gid
, &val
, false, mask
, NULL
);
515 if (local_index
>= 0) {
517 *index
= local_index
;
518 read_unlock_irqrestore(&table
->rwlock
, flags
);
522 read_unlock_irqrestore(&table
->rwlock
, flags
);
525 EXPORT_SYMBOL(ib_find_cached_gid_by_port
);
528 * ib_find_gid_by_filter - Returns the GID table index where a specified
530 * @device: The device to query.
531 * @gid: The GID value to search for.
532 * @port_num: The port number of the device where the GID value could be
534 * @filter: The filter function is executed on any matching GID in the table.
535 * If the filter function returns true, the corresponding index is returned,
536 * otherwise, we continue searching the GID table. It's guaranteed that
537 * while filter is executed, ndev field is valid and the structure won't
538 * change. filter is executed in an atomic context. filter must not be NULL.
539 * @index: The index into the cached GID table where the GID was found. This
540 * parameter may be NULL.
542 * ib_cache_gid_find_by_filter() searches for the specified GID value
543 * of which the filter function returns true in the port's GID table.
544 * This function is only supported on RoCE ports.
547 static int ib_cache_gid_find_by_filter(struct ib_device
*ib_dev
,
548 const union ib_gid
*gid
,
550 bool (*filter
)(const union ib_gid
*,
551 const struct ib_gid_attr
*,
556 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
557 struct ib_gid_table
*table
;
565 if (port
< rdma_start_port(ib_dev
) ||
566 port
> rdma_end_port(ib_dev
) ||
567 !rdma_protocol_roce(ib_dev
, port
))
568 return -EPROTONOSUPPORT
;
570 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
572 read_lock_irqsave(&table
->rwlock
, flags
);
573 for (i
= 0; i
< table
->sz
; i
++) {
574 struct ib_gid_attr attr
;
576 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
579 if (memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
582 memcpy(&attr
, &table
->data_vec
[i
].attr
, sizeof(attr
));
584 if (filter(gid
, &attr
, context
))
591 read_unlock_irqrestore(&table
->rwlock
, flags
);
601 static struct ib_gid_table
*alloc_gid_table(int sz
)
603 struct ib_gid_table
*table
=
604 kzalloc(sizeof(struct ib_gid_table
), GFP_KERNEL
);
609 table
->data_vec
= kcalloc(sz
, sizeof(*table
->data_vec
), GFP_KERNEL
);
610 if (!table
->data_vec
)
613 mutex_init(&table
->lock
);
616 rwlock_init(&table
->rwlock
);
625 static void release_gid_table(struct ib_gid_table
*table
)
628 kfree(table
->data_vec
);
633 static void cleanup_gid_table_port(struct ib_device
*ib_dev
, u8 port
,
634 struct ib_gid_table
*table
)
637 bool deleted
= false;
642 write_lock_irq(&table
->rwlock
);
643 for (i
= 0; i
< table
->sz
; ++i
) {
644 if (memcmp(&table
->data_vec
[i
].gid
, &zgid
,
645 sizeof(table
->data_vec
[i
].gid
)))
646 if (!del_gid(ib_dev
, port
, table
, i
,
647 table
->data_vec
[i
].props
&
648 GID_ATTR_FIND_MASK_DEFAULT
))
651 write_unlock_irq(&table
->rwlock
);
654 dispatch_gid_change_event(ib_dev
, port
);
657 void ib_cache_gid_set_default_gid(struct ib_device
*ib_dev
, u8 port
,
658 struct net_device
*ndev
,
659 unsigned long gid_type_mask
,
660 enum ib_cache_gid_default_mode mode
)
662 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
664 struct ib_gid_attr gid_attr
;
665 struct ib_gid_attr zattr_type
= zattr
;
666 struct ib_gid_table
*table
;
667 unsigned int gid_type
;
669 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
671 make_default_gid(ndev
, &gid
);
672 memset(&gid_attr
, 0, sizeof(gid_attr
));
673 gid_attr
.ndev
= ndev
;
675 for (gid_type
= 0; gid_type
< IB_GID_TYPE_SIZE
; ++gid_type
) {
677 union ib_gid current_gid
;
678 struct ib_gid_attr current_gid_attr
= {};
680 if (1UL << gid_type
& ~gid_type_mask
)
683 gid_attr
.gid_type
= gid_type
;
685 mutex_lock(&table
->lock
);
686 write_lock_irq(&table
->rwlock
);
687 ix
= find_gid(table
, NULL
, &gid_attr
, true,
688 GID_ATTR_FIND_MASK_GID_TYPE
|
689 GID_ATTR_FIND_MASK_DEFAULT
,
692 /* Coudn't find default GID location */
695 zattr_type
.gid_type
= gid_type
;
697 if (!__ib_cache_gid_get(ib_dev
, port
, ix
,
698 ¤t_gid
, ¤t_gid_attr
) &&
699 mode
== IB_CACHE_GID_DEFAULT_MODE_SET
&&
700 !memcmp(&gid
, ¤t_gid
, sizeof(gid
)) &&
701 !memcmp(&gid_attr
, ¤t_gid_attr
, sizeof(gid_attr
)))
704 if (memcmp(¤t_gid
, &zgid
, sizeof(current_gid
)) ||
705 memcmp(¤t_gid_attr
, &zattr_type
,
706 sizeof(current_gid_attr
))) {
707 if (del_gid(ib_dev
, port
, table
, ix
, true)) {
708 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
712 dispatch_gid_change_event(ib_dev
, port
);
716 if (mode
== IB_CACHE_GID_DEFAULT_MODE_SET
) {
717 if (add_gid(ib_dev
, port
, table
, ix
, &gid
, &gid_attr
, true))
718 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
721 dispatch_gid_change_event(ib_dev
, port
);
725 if (current_gid_attr
.ndev
)
726 dev_put(current_gid_attr
.ndev
);
727 write_unlock_irq(&table
->rwlock
);
728 mutex_unlock(&table
->lock
);
732 static int gid_table_reserve_default(struct ib_device
*ib_dev
, u8 port
,
733 struct ib_gid_table
*table
)
736 unsigned long roce_gid_type_mask
;
737 unsigned int num_default_gids
;
738 unsigned int current_gid
= 0;
740 roce_gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
741 num_default_gids
= hweight_long(roce_gid_type_mask
);
742 for (i
= 0; i
< num_default_gids
&& i
< table
->sz
; i
++) {
743 struct ib_gid_table_entry
*entry
=
746 entry
->props
|= GID_TABLE_ENTRY_DEFAULT
;
747 current_gid
= find_next_bit(&roce_gid_type_mask
,
750 entry
->attr
.gid_type
= current_gid
++;
756 static int _gid_table_setup_one(struct ib_device
*ib_dev
)
759 struct ib_gid_table
**table
;
762 table
= kcalloc(ib_dev
->phys_port_cnt
, sizeof(*table
), GFP_KERNEL
);
765 pr_warn("failed to allocate ib gid cache for %s\n",
770 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
771 u8 rdma_port
= port
+ rdma_start_port(ib_dev
);
775 ib_dev
->port_immutable
[rdma_port
].gid_tbl_len
);
778 goto rollback_table_setup
;
781 err
= gid_table_reserve_default(ib_dev
,
782 port
+ rdma_start_port(ib_dev
),
785 goto rollback_table_setup
;
788 ib_dev
->cache
.gid_cache
= table
;
791 rollback_table_setup
:
792 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
793 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
795 release_gid_table(table
[port
]);
802 static void gid_table_release_one(struct ib_device
*ib_dev
)
804 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
810 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
811 release_gid_table(table
[port
]);
814 ib_dev
->cache
.gid_cache
= NULL
;
817 static void gid_table_cleanup_one(struct ib_device
*ib_dev
)
819 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
825 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
826 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
830 static int gid_table_setup_one(struct ib_device
*ib_dev
)
834 err
= _gid_table_setup_one(ib_dev
);
839 err
= roce_rescan_device(ib_dev
);
842 gid_table_cleanup_one(ib_dev
);
843 gid_table_release_one(ib_dev
);
849 int ib_get_cached_gid(struct ib_device
*device
,
853 struct ib_gid_attr
*gid_attr
)
857 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
858 struct ib_gid_table
*table
= ports_table
[port_num
- rdma_start_port(device
)];
860 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
863 read_lock_irqsave(&table
->rwlock
, flags
);
864 res
= __ib_cache_gid_get(device
, port_num
, index
, gid
, gid_attr
);
865 read_unlock_irqrestore(&table
->rwlock
, flags
);
869 EXPORT_SYMBOL(ib_get_cached_gid
);
871 int ib_find_cached_gid(struct ib_device
*device
,
872 const union ib_gid
*gid
,
873 enum ib_gid_type gid_type
,
874 struct net_device
*ndev
,
878 return ib_cache_gid_find(device
, gid
, gid_type
, ndev
, port_num
, index
);
880 EXPORT_SYMBOL(ib_find_cached_gid
);
882 int ib_find_gid_by_filter(struct ib_device
*device
,
883 const union ib_gid
*gid
,
885 bool (*filter
)(const union ib_gid
*gid
,
886 const struct ib_gid_attr
*,
888 void *context
, u16
*index
)
890 /* Only RoCE GID table supports filter function */
891 if (!rdma_cap_roce_gid_table(device
, port_num
) && filter
)
892 return -EPROTONOSUPPORT
;
894 return ib_cache_gid_find_by_filter(device
, gid
,
898 EXPORT_SYMBOL(ib_find_gid_by_filter
);
900 int ib_get_cached_pkey(struct ib_device
*device
,
905 struct ib_pkey_cache
*cache
;
909 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
912 read_lock_irqsave(&device
->cache
.lock
, flags
);
914 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
916 if (index
< 0 || index
>= cache
->table_len
)
919 *pkey
= cache
->table
[index
];
921 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
925 EXPORT_SYMBOL(ib_get_cached_pkey
);
927 int ib_find_cached_pkey(struct ib_device
*device
,
932 struct ib_pkey_cache
*cache
;
938 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
941 read_lock_irqsave(&device
->cache
.lock
, flags
);
943 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
947 for (i
= 0; i
< cache
->table_len
; ++i
)
948 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
949 if (cache
->table
[i
] & 0x8000) {
957 if (ret
&& partial_ix
>= 0) {
962 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
966 EXPORT_SYMBOL(ib_find_cached_pkey
);
968 int ib_find_exact_cached_pkey(struct ib_device
*device
,
973 struct ib_pkey_cache
*cache
;
978 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
981 read_lock_irqsave(&device
->cache
.lock
, flags
);
983 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
987 for (i
= 0; i
< cache
->table_len
; ++i
)
988 if (cache
->table
[i
] == pkey
) {
994 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
998 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
1000 int ib_get_cached_lmc(struct ib_device
*device
,
1004 unsigned long flags
;
1007 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
1010 read_lock_irqsave(&device
->cache
.lock
, flags
);
1011 *lmc
= device
->cache
.lmc_cache
[port_num
- rdma_start_port(device
)];
1012 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1016 EXPORT_SYMBOL(ib_get_cached_lmc
);
1018 static void ib_cache_update(struct ib_device
*device
,
1021 struct ib_port_attr
*tprops
= NULL
;
1022 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
1023 struct ib_gid_cache
{
1025 union ib_gid table
[0];
1026 } *gid_cache
= NULL
;
1029 struct ib_gid_table
*table
;
1030 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
1031 bool use_roce_gid_table
=
1032 rdma_cap_roce_gid_table(device
, port
);
1034 if (port
< rdma_start_port(device
) || port
> rdma_end_port(device
))
1037 table
= ports_table
[port
- rdma_start_port(device
)];
1039 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
1043 ret
= ib_query_port(device
, port
, tprops
);
1045 printk(KERN_WARNING
"ib_query_port failed (%d) for %s\n",
1050 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
1051 sizeof *pkey_cache
->table
, GFP_KERNEL
);
1055 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
1057 if (!use_roce_gid_table
) {
1058 gid_cache
= kmalloc(sizeof(*gid_cache
) + tprops
->gid_tbl_len
*
1059 sizeof(*gid_cache
->table
), GFP_KERNEL
);
1063 gid_cache
->table_len
= tprops
->gid_tbl_len
;
1066 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
1067 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
1069 printk(KERN_WARNING
"ib_query_pkey failed (%d) for %s (index %d)\n",
1070 ret
, device
->name
, i
);
1075 if (!use_roce_gid_table
) {
1076 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
1077 ret
= ib_query_gid(device
, port
, i
,
1078 gid_cache
->table
+ i
, NULL
);
1080 printk(KERN_WARNING
"ib_query_gid failed (%d) for %s (index %d)\n",
1081 ret
, device
->name
, i
);
1087 write_lock_irq(&device
->cache
.lock
);
1089 old_pkey_cache
= device
->cache
.pkey_cache
[port
- rdma_start_port(device
)];
1091 device
->cache
.pkey_cache
[port
- rdma_start_port(device
)] = pkey_cache
;
1092 if (!use_roce_gid_table
) {
1093 write_lock(&table
->rwlock
);
1094 for (i
= 0; i
< gid_cache
->table_len
; i
++) {
1095 modify_gid(device
, port
, table
, i
, gid_cache
->table
+ i
,
1098 write_unlock(&table
->rwlock
);
1101 device
->cache
.lmc_cache
[port
- rdma_start_port(device
)] = tprops
->lmc
;
1103 write_unlock_irq(&device
->cache
.lock
);
1106 kfree(old_pkey_cache
);
1116 static void ib_cache_task(struct work_struct
*_work
)
1118 struct ib_update_work
*work
=
1119 container_of(_work
, struct ib_update_work
, work
);
1121 ib_cache_update(work
->device
, work
->port_num
);
1125 static void ib_cache_event(struct ib_event_handler
*handler
,
1126 struct ib_event
*event
)
1128 struct ib_update_work
*work
;
1130 if (event
->event
== IB_EVENT_PORT_ERR
||
1131 event
->event
== IB_EVENT_PORT_ACTIVE
||
1132 event
->event
== IB_EVENT_LID_CHANGE
||
1133 event
->event
== IB_EVENT_PKEY_CHANGE
||
1134 event
->event
== IB_EVENT_SM_CHANGE
||
1135 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
1136 event
->event
== IB_EVENT_GID_CHANGE
) {
1137 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
1139 INIT_WORK(&work
->work
, ib_cache_task
);
1140 work
->device
= event
->device
;
1141 work
->port_num
= event
->element
.port_num
;
1142 queue_work(ib_wq
, &work
->work
);
1147 int ib_cache_setup_one(struct ib_device
*device
)
1152 rwlock_init(&device
->cache
.lock
);
1154 device
->cache
.pkey_cache
=
1155 kzalloc(sizeof *device
->cache
.pkey_cache
*
1156 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
1157 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
1158 (rdma_end_port(device
) -
1159 rdma_start_port(device
) + 1),
1161 if (!device
->cache
.pkey_cache
||
1162 !device
->cache
.lmc_cache
) {
1163 printk(KERN_WARNING
"Couldn't allocate cache "
1164 "for %s\n", device
->name
);
1168 err
= gid_table_setup_one(device
);
1170 /* Allocated memory will be cleaned in the release function */
1173 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1174 ib_cache_update(device
, p
+ rdma_start_port(device
));
1176 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
1177 device
, ib_cache_event
);
1178 err
= ib_register_event_handler(&device
->cache
.event_handler
);
1185 gid_table_cleanup_one(device
);
1189 void ib_cache_release_one(struct ib_device
*device
)
1194 * The release function frees all the cache elements.
1195 * This function should be called as part of freeing
1196 * all the device's resources when the cache could no
1197 * longer be accessed.
1199 if (device
->cache
.pkey_cache
)
1201 p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1202 kfree(device
->cache
.pkey_cache
[p
]);
1204 gid_table_release_one(device
);
1205 kfree(device
->cache
.pkey_cache
);
1206 kfree(device
->cache
.lmc_cache
);
1209 void ib_cache_cleanup_one(struct ib_device
*device
)
1211 /* The cleanup function unregisters the event handler,
1212 * waits for all in-progress workqueue elements and cleans
1213 * up the GID cache. This function should be called after
1214 * the device was removed from the devices list and all
1215 * clients were removed, so the cache exists but is
1216 * non-functional and shouldn't be updated anymore.
1218 ib_unregister_event_handler(&device
->cache
.event_handler
);
1219 flush_workqueue(ib_wq
);
1220 gid_table_cleanup_one(device
);
1223 void __init
ib_cache_setup(void)
1225 roce_gid_mgmt_init();
1228 void __exit
ib_cache_cleanup(void)
1230 roce_gid_mgmt_cleanup();