Merge tag 'microblaze-4.0-rc4' of git://git.monstr.eu/linux-2.6-microblaze
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / clip_tbl.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
4 *
5 * Written by Deepak (deepak.s@chelsio.com)
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
11 */
12
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/jhash.h>
16 #include <linux/if_vlan.h>
17 #include <net/addrconf.h>
18 #include "cxgb4.h"
19 #include "clip_tbl.h"
20
21 static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
22 {
23 unsigned int clipt_size_half = c->clipt_size / 2;
24
25 return jhash_1word(*key, 0) % clipt_size_half;
26 }
27
28 static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
29 {
30 unsigned int clipt_size_half = d->clipt_size / 2;
31 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
32
33 return clipt_size_half +
34 (jhash_1word(xor, 0) % clipt_size_half);
35 }
36
37 static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 u8 v6)
39 {
40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv4_clip_hash(ctbl, addr);
42 }
43
44 static int clip6_get_mbox(const struct net_device *dev,
45 const struct in6_addr *lip)
46 {
47 struct adapter *adap = netdev2adap(dev);
48 struct fw_clip_cmd c;
49
50 memset(&c, 0, sizeof(c));
51 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
52 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
53 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
54 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
55 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
56 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
57 }
58
59 static int clip6_release_mbox(const struct net_device *dev,
60 const struct in6_addr *lip)
61 {
62 struct adapter *adap = netdev2adap(dev);
63 struct fw_clip_cmd c;
64
65 memset(&c, 0, sizeof(c));
66 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
67 FW_CMD_REQUEST_F | FW_CMD_READ_F);
68 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
69 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
70 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
71 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
72 }
73
74 int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
75 {
76 struct adapter *adap = netdev2adap(dev);
77 struct clip_tbl *ctbl = adap->clipt;
78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip;
80 int hash;
81 int ret = -1;
82
83 if (!ctbl)
84 return 0;
85
86 hash = clip_addr_hash(ctbl, addr, v6);
87
88 read_lock_bh(&ctbl->lock);
89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
90 if (cte->addr6.sin6_family == AF_INET6 && v6)
91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
97 ce = cte;
98 read_unlock_bh(&ctbl->lock);
99 goto found;
100 }
101 }
102 read_unlock_bh(&ctbl->lock);
103
104 write_lock_bh(&ctbl->lock);
105 if (!list_empty(&ctbl->ce_free_head)) {
106 ce = list_first_entry(&ctbl->ce_free_head,
107 struct clip_entry, list);
108 list_del(&ce->list);
109 INIT_LIST_HEAD(&ce->list);
110 spin_lock_init(&ce->lock);
111 atomic_set(&ce->refcnt, 0);
112 atomic_dec(&ctbl->nfree);
113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) {
120 write_unlock_bh(&ctbl->lock);
121 return ret;
122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
127 }
128 } else {
129 write_unlock_bh(&ctbl->lock);
130 return -ENOMEM;
131 }
132 write_unlock_bh(&ctbl->lock);
133 found:
134 atomic_inc(&ce->refcnt);
135
136 return 0;
137 }
138 EXPORT_SYMBOL(cxgb4_clip_get);
139
140 void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
141 {
142 struct adapter *adap = netdev2adap(dev);
143 struct clip_tbl *ctbl = adap->clipt;
144 struct clip_entry *ce, *cte;
145 u32 *addr = (u32 *)lip;
146 int hash;
147 int ret = -1;
148
149 hash = clip_addr_hash(ctbl, addr, v6);
150
151 read_lock_bh(&ctbl->lock);
152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
153 if (cte->addr6.sin6_family == AF_INET6 && v6)
154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
160 ce = cte;
161 read_unlock_bh(&ctbl->lock);
162 goto found;
163 }
164 }
165 read_unlock_bh(&ctbl->lock);
166
167 return;
168 found:
169 write_lock_bh(&ctbl->lock);
170 spin_lock_bh(&ce->lock);
171 if (atomic_dec_and_test(&ce->refcnt)) {
172 list_del(&ce->list);
173 INIT_LIST_HEAD(&ce->list);
174 list_add_tail(&ce->list, &ctbl->ce_free_head);
175 atomic_inc(&ctbl->nfree);
176 if (v6)
177 clip6_release_mbox(dev, (const struct in6_addr *)lip);
178 }
179 spin_unlock_bh(&ce->lock);
180 write_unlock_bh(&ctbl->lock);
181 }
182 EXPORT_SYMBOL(cxgb4_clip_release);
183
184 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
185 * a physical device.
186 * The physical device reference is needed to send the actul CLIP command.
187 */
188 static int cxgb4_update_dev_clip(struct net_device *root_dev,
189 struct net_device *dev)
190 {
191 struct inet6_dev *idev = NULL;
192 struct inet6_ifaddr *ifa;
193 int ret = 0;
194
195 idev = __in6_dev_get(root_dev);
196 if (!idev)
197 return ret;
198
199 read_lock_bh(&idev->lock);
200 list_for_each_entry(ifa, &idev->addr_list, if_list) {
201 ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
202 if (ret < 0)
203 break;
204 }
205 read_unlock_bh(&idev->lock);
206
207 return ret;
208 }
209
210 int cxgb4_update_root_dev_clip(struct net_device *dev)
211 {
212 struct net_device *root_dev = NULL;
213 int i, ret = 0;
214
215 /* First populate the real net device's IPv6 addresses */
216 ret = cxgb4_update_dev_clip(dev, dev);
217 if (ret)
218 return ret;
219
220 /* Parse all bond and vlan devices layered on top of the physical dev */
221 root_dev = netdev_master_upper_dev_get_rcu(dev);
222 if (root_dev) {
223 ret = cxgb4_update_dev_clip(root_dev, dev);
224 if (ret)
225 return ret;
226 }
227
228 for (i = 0; i < VLAN_N_VID; i++) {
229 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
230 if (!root_dev)
231 continue;
232
233 ret = cxgb4_update_dev_clip(root_dev, dev);
234 if (ret)
235 break;
236 }
237
238 return ret;
239 }
240 EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
241
242 int clip_tbl_show(struct seq_file *seq, void *v)
243 {
244 struct adapter *adapter = seq->private;
245 struct clip_tbl *ctbl = adapter->clipt;
246 struct clip_entry *ce;
247 char ip[60];
248 int i;
249
250 read_lock_bh(&ctbl->lock);
251
252 seq_puts(seq, "IP Address Users\n");
253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
255 ip[0] = '\0';
256 sprintf(ip, "%pISc", &ce->addr);
257 seq_printf(seq, "%-25s %u\n", ip,
258 atomic_read(&ce->refcnt));
259 }
260 }
261 seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
262
263 read_unlock_bh(&ctbl->lock);
264
265 return 0;
266 }
267
268 struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
269 unsigned int clipt_end)
270 {
271 struct clip_entry *cl_list;
272 struct clip_tbl *ctbl;
273 unsigned int clipt_size;
274 int i;
275
276 if (clipt_start >= clipt_end)
277 return NULL;
278 clipt_size = clipt_end - clipt_start + 1;
279 if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
280 return NULL;
281
282 ctbl = t4_alloc_mem(sizeof(*ctbl) +
283 clipt_size*sizeof(struct list_head));
284 if (!ctbl)
285 return NULL;
286
287 ctbl->clipt_start = clipt_start;
288 ctbl->clipt_size = clipt_size;
289 INIT_LIST_HEAD(&ctbl->ce_free_head);
290
291 atomic_set(&ctbl->nfree, clipt_size);
292 rwlock_init(&ctbl->lock);
293
294 for (i = 0; i < ctbl->clipt_size; ++i)
295 INIT_LIST_HEAD(&ctbl->hash_list[i]);
296
297 cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
298 ctbl->cl_list = (void *)cl_list;
299
300 for (i = 0; i < clipt_size; i++) {
301 INIT_LIST_HEAD(&cl_list[i].list);
302 list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
303 }
304
305 return ctbl;
306 }
307
308 void t4_cleanup_clip_tbl(struct adapter *adap)
309 {
310 struct clip_tbl *ctbl = adap->clipt;
311
312 if (ctbl) {
313 if (ctbl->cl_list)
314 t4_free_mem(ctbl->cl_list);
315 t4_free_mem(ctbl);
316 }
317 }
318 EXPORT_SYMBOL(t4_cleanup_clip_tbl);
This page took 0.039903 seconds and 6 git commands to generate.