2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
5 * Written by Deepak (deepak.s@chelsio.com)
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/jhash.h>
16 #include <linux/if_vlan.h>
17 #include <net/addrconf.h>
21 static inline unsigned int ipv4_clip_hash(struct clip_tbl
*c
, const u32
*key
)
23 unsigned int clipt_size_half
= c
->clipt_size
/ 2;
25 return jhash_1word(*key
, 0) % clipt_size_half
;
28 static inline unsigned int ipv6_clip_hash(struct clip_tbl
*d
, const u32
*key
)
30 unsigned int clipt_size_half
= d
->clipt_size
/ 2;
31 u32
xor = key
[0] ^ key
[1] ^ key
[2] ^ key
[3];
33 return clipt_size_half
+
34 (jhash_1word(xor, 0) % clipt_size_half
);
37 static unsigned int clip_addr_hash(struct clip_tbl
*ctbl
, const u32
*addr
,
40 return v6
? ipv6_clip_hash(ctbl
, addr
) :
41 ipv4_clip_hash(ctbl
, addr
);
44 static int clip6_get_mbox(const struct net_device
*dev
,
45 const struct in6_addr
*lip
)
47 struct adapter
*adap
= netdev2adap(dev
);
50 memset(&c
, 0, sizeof(c
));
51 c
.op_to_write
= htonl(FW_CMD_OP_V(FW_CLIP_CMD
) |
52 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
53 c
.alloc_to_len16
= htonl(FW_CLIP_CMD_ALLOC_F
| FW_LEN16(c
));
54 *(__be64
*)&c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
55 *(__be64
*)&c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
56 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
59 static int clip6_release_mbox(const struct net_device
*dev
,
60 const struct in6_addr
*lip
)
62 struct adapter
*adap
= netdev2adap(dev
);
65 memset(&c
, 0, sizeof(c
));
66 c
.op_to_write
= htonl(FW_CMD_OP_V(FW_CLIP_CMD
) |
67 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
68 c
.alloc_to_len16
= htonl(FW_CLIP_CMD_FREE_F
| FW_LEN16(c
));
69 *(__be64
*)&c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
70 *(__be64
*)&c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
71 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
74 int cxgb4_clip_get(const struct net_device
*dev
, const u32
*lip
, u8 v6
)
76 struct adapter
*adap
= netdev2adap(dev
);
77 struct clip_tbl
*ctbl
= adap
->clipt
;
78 struct clip_entry
*ce
, *cte
;
79 u32
*addr
= (u32
*)lip
;
86 hash
= clip_addr_hash(ctbl
, addr
, v6
);
88 read_lock_bh(&ctbl
->lock
);
89 list_for_each_entry(cte
, &ctbl
->hash_list
[hash
], list
) {
90 if (cte
->addr6
.sin6_family
== AF_INET6
&& v6
)
91 ret
= memcmp(lip
, cte
->addr6
.sin6_addr
.s6_addr
,
92 sizeof(struct in6_addr
));
93 else if (cte
->addr
.sin_family
== AF_INET
&& !v6
)
94 ret
= memcmp(lip
, (char *)(&cte
->addr
.sin_addr
),
95 sizeof(struct in_addr
));
98 read_unlock_bh(&ctbl
->lock
);
102 read_unlock_bh(&ctbl
->lock
);
104 write_lock_bh(&ctbl
->lock
);
105 if (!list_empty(&ctbl
->ce_free_head
)) {
106 ce
= list_first_entry(&ctbl
->ce_free_head
,
107 struct clip_entry
, list
);
109 INIT_LIST_HEAD(&ce
->list
);
110 spin_lock_init(&ce
->lock
);
111 atomic_set(&ce
->refcnt
, 0);
112 atomic_dec(&ctbl
->nfree
);
113 list_add_tail(&ce
->list
, &ctbl
->hash_list
[hash
]);
115 ce
->addr6
.sin6_family
= AF_INET6
;
116 memcpy(ce
->addr6
.sin6_addr
.s6_addr
,
117 lip
, sizeof(struct in6_addr
));
118 ret
= clip6_get_mbox(dev
, (const struct in6_addr
*)lip
);
120 write_unlock_bh(&ctbl
->lock
);
124 ce
->addr
.sin_family
= AF_INET
;
125 memcpy((char *)(&ce
->addr
.sin_addr
), lip
,
126 sizeof(struct in_addr
));
129 write_unlock_bh(&ctbl
->lock
);
132 write_unlock_bh(&ctbl
->lock
);
134 atomic_inc(&ce
->refcnt
);
138 EXPORT_SYMBOL(cxgb4_clip_get
);
140 void cxgb4_clip_release(const struct net_device
*dev
, const u32
*lip
, u8 v6
)
142 struct adapter
*adap
= netdev2adap(dev
);
143 struct clip_tbl
*ctbl
= adap
->clipt
;
144 struct clip_entry
*ce
, *cte
;
145 u32
*addr
= (u32
*)lip
;
149 hash
= clip_addr_hash(ctbl
, addr
, v6
);
151 read_lock_bh(&ctbl
->lock
);
152 list_for_each_entry(cte
, &ctbl
->hash_list
[hash
], list
) {
153 if (cte
->addr6
.sin6_family
== AF_INET6
&& v6
)
154 ret
= memcmp(lip
, cte
->addr6
.sin6_addr
.s6_addr
,
155 sizeof(struct in6_addr
));
156 else if (cte
->addr
.sin_family
== AF_INET
&& !v6
)
157 ret
= memcmp(lip
, (char *)(&cte
->addr
.sin_addr
),
158 sizeof(struct in_addr
));
161 read_unlock_bh(&ctbl
->lock
);
165 read_unlock_bh(&ctbl
->lock
);
169 write_lock_bh(&ctbl
->lock
);
170 spin_lock_bh(&ce
->lock
);
171 if (atomic_dec_and_test(&ce
->refcnt
)) {
173 INIT_LIST_HEAD(&ce
->list
);
174 list_add_tail(&ce
->list
, &ctbl
->ce_free_head
);
175 atomic_inc(&ctbl
->nfree
);
177 clip6_release_mbox(dev
, (const struct in6_addr
*)lip
);
179 spin_unlock_bh(&ce
->lock
);
180 write_unlock_bh(&ctbl
->lock
);
182 EXPORT_SYMBOL(cxgb4_clip_release
);
184 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
186 * The physical device reference is needed to send the actul CLIP command.
188 static int cxgb4_update_dev_clip(struct net_device
*root_dev
,
189 struct net_device
*dev
)
191 struct inet6_dev
*idev
= NULL
;
192 struct inet6_ifaddr
*ifa
;
195 idev
= __in6_dev_get(root_dev
);
199 read_lock_bh(&idev
->lock
);
200 list_for_each_entry(ifa
, &idev
->addr_list
, if_list
) {
201 ret
= cxgb4_clip_get(dev
, (const u32
*)ifa
->addr
.s6_addr
, 1);
205 read_unlock_bh(&idev
->lock
);
210 int cxgb4_update_root_dev_clip(struct net_device
*dev
)
212 struct net_device
*root_dev
= NULL
;
215 /* First populate the real net device's IPv6 addresses */
216 ret
= cxgb4_update_dev_clip(dev
, dev
);
220 /* Parse all bond and vlan devices layered on top of the physical dev */
221 root_dev
= netdev_master_upper_dev_get_rcu(dev
);
223 ret
= cxgb4_update_dev_clip(root_dev
, dev
);
228 for (i
= 0; i
< VLAN_N_VID
; i
++) {
229 root_dev
= __vlan_find_dev_deep_rcu(dev
, htons(ETH_P_8021Q
), i
);
233 ret
= cxgb4_update_dev_clip(root_dev
, dev
);
240 EXPORT_SYMBOL(cxgb4_update_root_dev_clip
);
242 int clip_tbl_show(struct seq_file
*seq
, void *v
)
244 struct adapter
*adapter
= seq
->private;
245 struct clip_tbl
*ctbl
= adapter
->clipt
;
246 struct clip_entry
*ce
;
250 read_lock_bh(&ctbl
->lock
);
252 seq_puts(seq
, "IP Address Users\n");
253 for (i
= 0 ; i
< ctbl
->clipt_size
; ++i
) {
254 list_for_each_entry(ce
, &ctbl
->hash_list
[i
], list
) {
256 sprintf(ip
, "%pISc", &ce
->addr
);
257 seq_printf(seq
, "%-25s %u\n", ip
,
258 atomic_read(&ce
->refcnt
));
261 seq_printf(seq
, "Free clip entries : %d\n", atomic_read(&ctbl
->nfree
));
263 read_unlock_bh(&ctbl
->lock
);
268 struct clip_tbl
*t4_init_clip_tbl(unsigned int clipt_start
,
269 unsigned int clipt_end
)
271 struct clip_entry
*cl_list
;
272 struct clip_tbl
*ctbl
;
273 unsigned int clipt_size
;
276 if (clipt_start
>= clipt_end
)
278 clipt_size
= clipt_end
- clipt_start
+ 1;
279 if (clipt_size
< CLIPT_MIN_HASH_BUCKETS
)
282 ctbl
= t4_alloc_mem(sizeof(*ctbl
) +
283 clipt_size
*sizeof(struct list_head
));
287 ctbl
->clipt_start
= clipt_start
;
288 ctbl
->clipt_size
= clipt_size
;
289 INIT_LIST_HEAD(&ctbl
->ce_free_head
);
291 atomic_set(&ctbl
->nfree
, clipt_size
);
292 rwlock_init(&ctbl
->lock
);
294 for (i
= 0; i
< ctbl
->clipt_size
; ++i
)
295 INIT_LIST_HEAD(&ctbl
->hash_list
[i
]);
297 cl_list
= t4_alloc_mem(clipt_size
*sizeof(struct clip_entry
));
298 ctbl
->cl_list
= (void *)cl_list
;
300 for (i
= 0; i
< clipt_size
; i
++) {
301 INIT_LIST_HEAD(&cl_list
[i
].list
);
302 list_add_tail(&cl_list
[i
].list
, &ctbl
->ce_free_head
);
308 void t4_cleanup_clip_tbl(struct adapter
*adap
)
310 struct clip_tbl
*ctbl
= adap
->clipt
;
314 t4_free_mem(ctbl
->cl_list
);
318 EXPORT_SYMBOL(t4_cleanup_clip_tbl
);