Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
a02d44a0 | 2 | * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. |
4d22de3e DLR |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/list.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
4d22de3e DLR |
35 | #include <net/neighbour.h> |
36 | #include <linux/notifier.h> | |
60063497 | 37 | #include <linux/atomic.h> |
4d22de3e DLR |
38 | #include <linux/proc_fs.h> |
39 | #include <linux/if_vlan.h> | |
40 | #include <net/netevent.h> | |
41 | #include <linux/highmem.h> | |
42 | #include <linux/vmalloc.h> | |
ee40fa06 | 43 | #include <linux/export.h> |
4d22de3e DLR |
44 | |
45 | #include "common.h" | |
46 | #include "regs.h" | |
47 | #include "cxgb3_ioctl.h" | |
48 | #include "cxgb3_ctl_defs.h" | |
49 | #include "cxgb3_defs.h" | |
50 | #include "l2t.h" | |
51 | #include "firmware_exports.h" | |
52 | #include "cxgb3_offload.h" | |
53 | ||
54 | static LIST_HEAD(client_list); | |
55 | static LIST_HEAD(ofld_dev_list); | |
56 | static DEFINE_MUTEX(cxgb3_db_lock); | |
57 | ||
58 | static DEFINE_RWLOCK(adapter_list_lock); | |
59 | static LIST_HEAD(adapter_list); | |
60 | ||
61 | static const unsigned int MAX_ATIDS = 64 * 1024; | |
c9a6ce50 | 62 | static const unsigned int ATID_BASE = 0x10000; |
4d22de3e | 63 | |
a5190b4e | 64 | static void cxgb_neigh_update(struct neighbour *neigh); |
65 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); | |
66 | ||
4d22de3e DLR |
67 | static inline int offload_activated(struct t3cdev *tdev) |
68 | { | |
69 | const struct adapter *adapter = tdev2adap(tdev); | |
70 | ||
807540ba | 71 | return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); |
4d22de3e DLR |
72 | } |
73 | ||
74 | /** | |
75 | * cxgb3_register_client - register an offload client | |
76 | * @client: the client | |
77 | * | |
78 | * Add the client to the client list, | |
79 | * and call backs the client for each activated offload device | |
80 | */ | |
81 | void cxgb3_register_client(struct cxgb3_client *client) | |
82 | { | |
83 | struct t3cdev *tdev; | |
84 | ||
85 | mutex_lock(&cxgb3_db_lock); | |
86 | list_add_tail(&client->client_list, &client_list); | |
87 | ||
88 | if (client->add) { | |
89 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { | |
90 | if (offload_activated(tdev)) | |
91 | client->add(tdev); | |
92 | } | |
93 | } | |
94 | mutex_unlock(&cxgb3_db_lock); | |
95 | } | |
96 | ||
97 | EXPORT_SYMBOL(cxgb3_register_client); | |
98 | ||
99 | /** | |
100 | * cxgb3_unregister_client - unregister an offload client | |
101 | * @client: the client | |
102 | * | |
103 | * Remove the client to the client list, | |
104 | * and call backs the client for each activated offload device. | |
105 | */ | |
106 | void cxgb3_unregister_client(struct cxgb3_client *client) | |
107 | { | |
108 | struct t3cdev *tdev; | |
109 | ||
110 | mutex_lock(&cxgb3_db_lock); | |
111 | list_del(&client->client_list); | |
112 | ||
113 | if (client->remove) { | |
114 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { | |
115 | if (offload_activated(tdev)) | |
116 | client->remove(tdev); | |
117 | } | |
118 | } | |
119 | mutex_unlock(&cxgb3_db_lock); | |
120 | } | |
121 | ||
122 | EXPORT_SYMBOL(cxgb3_unregister_client); | |
123 | ||
124 | /** | |
125 | * cxgb3_add_clients - activate registered clients for an offload device | |
126 | * @tdev: the offload device | |
127 | * | |
128 | * Call backs all registered clients once a offload device is activated | |
129 | */ | |
130 | void cxgb3_add_clients(struct t3cdev *tdev) | |
131 | { | |
132 | struct cxgb3_client *client; | |
133 | ||
134 | mutex_lock(&cxgb3_db_lock); | |
135 | list_for_each_entry(client, &client_list, client_list) { | |
136 | if (client->add) | |
137 | client->add(tdev); | |
138 | } | |
139 | mutex_unlock(&cxgb3_db_lock); | |
140 | } | |
141 | ||
142 | /** | |
143 | * cxgb3_remove_clients - deactivates registered clients | |
144 | * for an offload device | |
145 | * @tdev: the offload device | |
146 | * | |
147 | * Call backs all registered clients once a offload device is deactivated | |
148 | */ | |
149 | void cxgb3_remove_clients(struct t3cdev *tdev) | |
150 | { | |
151 | struct cxgb3_client *client; | |
152 | ||
153 | mutex_lock(&cxgb3_db_lock); | |
154 | list_for_each_entry(client, &client_list, client_list) { | |
155 | if (client->remove) | |
156 | client->remove(tdev); | |
157 | } | |
158 | mutex_unlock(&cxgb3_db_lock); | |
159 | } | |
160 | ||
fa0d4c11 | 161 | void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) |
cb0bc205 DLR |
162 | { |
163 | struct cxgb3_client *client; | |
164 | ||
165 | mutex_lock(&cxgb3_db_lock); | |
166 | list_for_each_entry(client, &client_list, client_list) { | |
fa0d4c11 SW |
167 | if (client->event_handler) |
168 | client->event_handler(tdev, event, port); | |
cb0bc205 DLR |
169 | } |
170 | mutex_unlock(&cxgb3_db_lock); | |
171 | } | |
172 | ||
4d22de3e DLR |
173 | static struct net_device *get_iff_from_mac(struct adapter *adapter, |
174 | const unsigned char *mac, | |
175 | unsigned int vlan) | |
176 | { | |
177 | int i; | |
178 | ||
179 | for_each_port(adapter, i) { | |
4d22de3e | 180 | struct net_device *dev = adapter->port[i]; |
4d22de3e DLR |
181 | |
182 | if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { | |
183 | if (vlan && vlan != VLAN_VID_MASK) { | |
892ef5d8 JP |
184 | rcu_read_lock(); |
185 | dev = __vlan_find_dev_deep(dev, vlan); | |
186 | rcu_read_unlock(); | |
1765a575 | 187 | } else if (netif_is_bond_slave(dev)) { |
4d22de3e DLR |
188 | while (dev->master) |
189 | dev = dev->master; | |
1765a575 | 190 | } |
4d22de3e DLR |
191 | return dev; |
192 | } | |
193 | } | |
194 | return NULL; | |
195 | } | |
196 | ||
197 | static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, | |
198 | void *data) | |
199 | { | |
a109a5b9 | 200 | int i; |
4d22de3e | 201 | int ret = 0; |
a109a5b9 | 202 | unsigned int val = 0; |
4d22de3e DLR |
203 | struct ulp_iscsi_info *uiip = data; |
204 | ||
205 | switch (req) { | |
206 | case ULP_ISCSI_GET_PARAMS: | |
207 | uiip->pdev = adapter->pdev; | |
208 | uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); | |
209 | uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); | |
210 | uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); | |
a109a5b9 KX |
211 | |
212 | val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); | |
213 | for (i = 0; i < 4; i++, val >>= 8) | |
214 | uiip->pgsz_factor[i] = val & 0xFF; | |
215 | ||
216 | val = t3_read_reg(adapter, A_TP_PARA_REG7); | |
217 | uiip->max_txsz = | |
218 | uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, | |
219 | (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); | |
4d22de3e DLR |
220 | /* |
221 | * On tx, the iscsi pdu has to be <= tx page size and has to | |
222 | * fit into the Tx PM FIFO. | |
223 | */ | |
a109a5b9 KX |
224 | val = min(adapter->params.tp.tx_pg_size, |
225 | t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); | |
226 | uiip->max_txsz = min(val, uiip->max_txsz); | |
227 | ||
228 | /* set MaxRxData to 16224 */ | |
229 | val = t3_read_reg(adapter, A_TP_PARA_REG2); | |
230 | if ((val >> S_MAXRXDATA) != 0x3f60) { | |
231 | val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); | |
232 | val |= V_MAXRXDATA(0x3f60); | |
233 | printk(KERN_INFO | |
234 | "%s, iscsi set MaxRxData to 16224 (0x%x).\n", | |
235 | adapter->name, val); | |
236 | t3_write_reg(adapter, A_TP_PARA_REG2, val); | |
237 | } | |
238 | ||
239 | /* | |
240 | * on rx, the iscsi pdu has to be < rx page size and the | |
241 | * the max rx data length programmed in TP | |
242 | */ | |
243 | val = min(adapter->params.tp.rx_pg_size, | |
244 | ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> | |
245 | S_MAXRXDATA) & M_MAXRXDATA); | |
246 | uiip->max_rxsz = min(val, uiip->max_rxsz); | |
4d22de3e DLR |
247 | break; |
248 | case ULP_ISCSI_SET_PARAMS: | |
249 | t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); | |
9439f749 | 250 | /* program the ddp page sizes */ |
a109a5b9 KX |
251 | for (i = 0; i < 4; i++) |
252 | val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); | |
253 | if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { | |
254 | printk(KERN_INFO | |
255 | "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", | |
256 | adapter->name, val, uiip->pgsz_factor[0], | |
257 | uiip->pgsz_factor[1], uiip->pgsz_factor[2], | |
258 | uiip->pgsz_factor[3]); | |
259 | t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); | |
9439f749 | 260 | } |
4d22de3e DLR |
261 | break; |
262 | default: | |
263 | ret = -EOPNOTSUPP; | |
264 | } | |
265 | return ret; | |
266 | } | |
267 | ||
268 | /* Response queue used for RDMA events. */ | |
269 | #define ASYNC_NOTIF_RSPQ 0 | |
270 | ||
271 | static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) | |
272 | { | |
273 | int ret = 0; | |
274 | ||
275 | switch (req) { | |
9265fabf SH |
276 | case RDMA_GET_PARAMS: { |
277 | struct rdma_info *rdma = data; | |
4d22de3e DLR |
278 | struct pci_dev *pdev = adapter->pdev; |
279 | ||
9265fabf SH |
280 | rdma->udbell_physbase = pci_resource_start(pdev, 2); |
281 | rdma->udbell_len = pci_resource_len(pdev, 2); | |
282 | rdma->tpt_base = | |
4d22de3e | 283 | t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); |
9265fabf SH |
284 | rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); |
285 | rdma->pbl_base = | |
4d22de3e | 286 | t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); |
9265fabf SH |
287 | rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); |
288 | rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); | |
289 | rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); | |
290 | rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; | |
291 | rdma->pdev = pdev; | |
4d22de3e DLR |
292 | break; |
293 | } | |
294 | case RDMA_CQ_OP:{ | |
295 | unsigned long flags; | |
9265fabf | 296 | struct rdma_cq_op *rdma = data; |
4d22de3e DLR |
297 | |
298 | /* may be called in any context */ | |
299 | spin_lock_irqsave(&adapter->sge.reg_lock, flags); | |
9265fabf SH |
300 | ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, |
301 | rdma->credits); | |
4d22de3e DLR |
302 | spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); |
303 | break; | |
304 | } | |
305 | case RDMA_GET_MEM:{ | |
306 | struct ch_mem_range *t = data; | |
307 | struct mc7 *mem; | |
308 | ||
309 | if ((t->addr & 7) || (t->len & 7)) | |
310 | return -EINVAL; | |
311 | if (t->mem_id == MEM_CM) | |
312 | mem = &adapter->cm; | |
313 | else if (t->mem_id == MEM_PMRX) | |
314 | mem = &adapter->pmrx; | |
315 | else if (t->mem_id == MEM_PMTX) | |
316 | mem = &adapter->pmtx; | |
317 | else | |
318 | return -EINVAL; | |
319 | ||
320 | ret = | |
321 | t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, | |
322 | (u64 *) t->buf); | |
323 | if (ret) | |
324 | return ret; | |
325 | break; | |
326 | } | |
327 | case RDMA_CQ_SETUP:{ | |
9265fabf | 328 | struct rdma_cq_setup *rdma = data; |
4d22de3e DLR |
329 | |
330 | spin_lock_irq(&adapter->sge.reg_lock); | |
331 | ret = | |
9265fabf SH |
332 | t3_sge_init_cqcntxt(adapter, rdma->id, |
333 | rdma->base_addr, rdma->size, | |
4d22de3e | 334 | ASYNC_NOTIF_RSPQ, |
9265fabf SH |
335 | rdma->ovfl_mode, rdma->credits, |
336 | rdma->credit_thres); | |
4d22de3e DLR |
337 | spin_unlock_irq(&adapter->sge.reg_lock); |
338 | break; | |
339 | } | |
340 | case RDMA_CQ_DISABLE: | |
341 | spin_lock_irq(&adapter->sge.reg_lock); | |
342 | ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); | |
343 | spin_unlock_irq(&adapter->sge.reg_lock); | |
344 | break; | |
345 | case RDMA_CTRL_QP_SETUP:{ | |
9265fabf | 346 | struct rdma_ctrlqp_setup *rdma = data; |
4d22de3e DLR |
347 | |
348 | spin_lock_irq(&adapter->sge.reg_lock); | |
349 | ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, | |
350 | SGE_CNTXT_RDMA, | |
351 | ASYNC_NOTIF_RSPQ, | |
9265fabf | 352 | rdma->base_addr, rdma->size, |
4d22de3e DLR |
353 | FW_RI_TID_START, 1, 0); |
354 | spin_unlock_irq(&adapter->sge.reg_lock); | |
355 | break; | |
356 | } | |
14cc180f SW |
357 | case RDMA_GET_MIB: { |
358 | spin_lock(&adapter->stats_lock); | |
359 | t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); | |
360 | spin_unlock(&adapter->stats_lock); | |
361 | break; | |
362 | } | |
4d22de3e DLR |
363 | default: |
364 | ret = -EOPNOTSUPP; | |
365 | } | |
366 | return ret; | |
367 | } | |
368 | ||
369 | static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) | |
370 | { | |
371 | struct adapter *adapter = tdev2adap(tdev); | |
372 | struct tid_range *tid; | |
373 | struct mtutab *mtup; | |
374 | struct iff_mac *iffmacp; | |
375 | struct ddp_params *ddpp; | |
376 | struct adap_ports *ports; | |
e22bb45d DLR |
377 | struct ofld_page_info *rx_page_info; |
378 | struct tp_params *tp = &adapter->params.tp; | |
4d22de3e DLR |
379 | int i; |
380 | ||
381 | switch (req) { | |
382 | case GET_MAX_OUTSTANDING_WR: | |
383 | *(unsigned int *)data = FW_WR_NUM; | |
384 | break; | |
385 | case GET_WR_LEN: | |
386 | *(unsigned int *)data = WR_FLITS; | |
387 | break; | |
388 | case GET_TX_MAX_CHUNK: | |
389 | *(unsigned int *)data = 1 << 20; /* 1MB */ | |
390 | break; | |
391 | case GET_TID_RANGE: | |
392 | tid = data; | |
393 | tid->num = t3_mc5_size(&adapter->mc5) - | |
394 | adapter->params.mc5.nroutes - | |
395 | adapter->params.mc5.nfilters - adapter->params.mc5.nservers; | |
396 | tid->base = 0; | |
397 | break; | |
398 | case GET_STID_RANGE: | |
399 | tid = data; | |
400 | tid->num = adapter->params.mc5.nservers; | |
401 | tid->base = t3_mc5_size(&adapter->mc5) - tid->num - | |
402 | adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; | |
403 | break; | |
404 | case GET_L2T_CAPACITY: | |
405 | *(unsigned int *)data = 2048; | |
406 | break; | |
407 | case GET_MTUS: | |
408 | mtup = data; | |
409 | mtup->size = NMTUS; | |
410 | mtup->mtus = adapter->params.mtus; | |
411 | break; | |
412 | case GET_IFF_FROM_MAC: | |
413 | iffmacp = data; | |
414 | iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, | |
415 | iffmacp->vlan_tag & | |
416 | VLAN_VID_MASK); | |
417 | break; | |
418 | case GET_DDP_PARAMS: | |
419 | ddpp = data; | |
420 | ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); | |
421 | ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); | |
422 | ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); | |
423 | break; | |
424 | case GET_PORTS: | |
425 | ports = data; | |
426 | ports->nports = adapter->params.nports; | |
427 | for_each_port(adapter, i) | |
428 | ports->lldevs[i] = adapter->port[i]; | |
429 | break; | |
430 | case ULP_ISCSI_GET_PARAMS: | |
431 | case ULP_ISCSI_SET_PARAMS: | |
432 | if (!offload_running(adapter)) | |
433 | return -EAGAIN; | |
434 | return cxgb_ulp_iscsi_ctl(adapter, req, data); | |
435 | case RDMA_GET_PARAMS: | |
436 | case RDMA_CQ_OP: | |
437 | case RDMA_CQ_SETUP: | |
438 | case RDMA_CQ_DISABLE: | |
439 | case RDMA_CTRL_QP_SETUP: | |
440 | case RDMA_GET_MEM: | |
14cc180f | 441 | case RDMA_GET_MIB: |
4d22de3e DLR |
442 | if (!offload_running(adapter)) |
443 | return -EAGAIN; | |
444 | return cxgb_rdma_ctl(adapter, req, data); | |
e22bb45d DLR |
445 | case GET_RX_PAGE_INFO: |
446 | rx_page_info = data; | |
447 | rx_page_info->page_size = tp->rx_pg_size; | |
448 | rx_page_info->num = tp->rx_num_pgs; | |
449 | break; | |
a109a5b9 KX |
450 | case GET_ISCSI_IPV4ADDR: { |
451 | struct iscsi_ipv4addr *p = data; | |
452 | struct port_info *pi = netdev_priv(p->dev); | |
453 | p->ipv4addr = pi->iscsi_ipv4addr; | |
454 | break; | |
455 | } | |
4d8cd002 DLR |
456 | case GET_EMBEDDED_INFO: { |
457 | struct ch_embedded_info *e = data; | |
458 | ||
459 | spin_lock(&adapter->stats_lock); | |
460 | t3_get_fw_version(adapter, &e->fw_vers); | |
461 | t3_get_tp_version(adapter, &e->tp_vers); | |
462 | spin_unlock(&adapter->stats_lock); | |
463 | break; | |
464 | } | |
4d22de3e DLR |
465 | default: |
466 | return -EOPNOTSUPP; | |
467 | } | |
468 | return 0; | |
469 | } | |
470 | ||
471 | /* | |
472 | * Dummy handler for Rx offload packets in case we get an offload packet before | |
473 | * proper processing is setup. This complains and drops the packet as it isn't | |
474 | * normal to get offload packets at this stage. | |
475 | */ | |
476 | static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, | |
477 | int n) | |
478 | { | |
4d22de3e DLR |
479 | while (n--) |
480 | dev_kfree_skb_any(skbs[n]); | |
481 | return 0; | |
482 | } | |
483 | ||
484 | static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) | |
485 | { | |
486 | } | |
487 | ||
488 | void cxgb3_set_dummy_ops(struct t3cdev *dev) | |
489 | { | |
490 | dev->recv = rx_offload_blackhole; | |
491 | dev->neigh_update = dummy_neigh_update; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Free an active-open TID. | |
496 | */ | |
497 | void *cxgb3_free_atid(struct t3cdev *tdev, int atid) | |
498 | { | |
499 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
500 | union active_open_entry *p = atid2entry(t, atid); | |
501 | void *ctx = p->t3c_tid.ctx; | |
502 | ||
503 | spin_lock_bh(&t->atid_lock); | |
504 | p->next = t->afree; | |
505 | t->afree = p; | |
506 | t->atids_in_use--; | |
507 | spin_unlock_bh(&t->atid_lock); | |
508 | ||
509 | return ctx; | |
510 | } | |
511 | ||
512 | EXPORT_SYMBOL(cxgb3_free_atid); | |
513 | ||
514 | /* | |
515 | * Free a server TID and return it to the free pool. | |
516 | */ | |
517 | void cxgb3_free_stid(struct t3cdev *tdev, int stid) | |
518 | { | |
519 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
520 | union listen_entry *p = stid2entry(t, stid); | |
521 | ||
522 | spin_lock_bh(&t->stid_lock); | |
523 | p->next = t->sfree; | |
524 | t->sfree = p; | |
525 | t->stids_in_use--; | |
526 | spin_unlock_bh(&t->stid_lock); | |
527 | } | |
528 | ||
529 | EXPORT_SYMBOL(cxgb3_free_stid); | |
530 | ||
531 | void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, | |
532 | void *ctx, unsigned int tid) | |
533 | { | |
534 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
535 | ||
536 | t->tid_tab[tid].client = client; | |
537 | t->tid_tab[tid].ctx = ctx; | |
538 | atomic_inc(&t->tids_in_use); | |
539 | } | |
540 | ||
541 | EXPORT_SYMBOL(cxgb3_insert_tid); | |
542 | ||
543 | /* | |
544 | * Populate a TID_RELEASE WR. The skb must be already propely sized. | |
545 | */ | |
546 | static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) | |
547 | { | |
548 | struct cpl_tid_release *req; | |
549 | ||
550 | skb->priority = CPL_PRIORITY_SETUP; | |
551 | req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); | |
552 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
553 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); | |
554 | } | |
555 | ||
556 | static void t3_process_tid_release_list(struct work_struct *work) | |
557 | { | |
558 | struct t3c_data *td = container_of(work, struct t3c_data, | |
559 | tid_release_task); | |
560 | struct sk_buff *skb; | |
561 | struct t3cdev *tdev = td->dev; | |
2eab17ab | 562 | |
4d22de3e DLR |
563 | |
564 | spin_lock_bh(&td->tid_release_lock); | |
565 | while (td->tid_release_list) { | |
566 | struct t3c_tid_entry *p = td->tid_release_list; | |
567 | ||
43d620c8 | 568 | td->tid_release_list = p->ctx; |
4d22de3e DLR |
569 | spin_unlock_bh(&td->tid_release_lock); |
570 | ||
571 | skb = alloc_skb(sizeof(struct cpl_tid_release), | |
74b793e1 DLR |
572 | GFP_KERNEL); |
573 | if (!skb) | |
574 | skb = td->nofail_skb; | |
575 | if (!skb) { | |
576 | spin_lock_bh(&td->tid_release_lock); | |
577 | p->ctx = (void *)td->tid_release_list; | |
64699336 | 578 | td->tid_release_list = p; |
74b793e1 DLR |
579 | break; |
580 | } | |
4d22de3e DLR |
581 | mk_tid_release(skb, p - td->tid_maps.tid_tab); |
582 | cxgb3_ofld_send(tdev, skb); | |
583 | p->ctx = NULL; | |
74b793e1 DLR |
584 | if (skb == td->nofail_skb) |
585 | td->nofail_skb = | |
586 | alloc_skb(sizeof(struct cpl_tid_release), | |
587 | GFP_KERNEL); | |
4d22de3e DLR |
588 | spin_lock_bh(&td->tid_release_lock); |
589 | } | |
74b793e1 | 590 | td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; |
4d22de3e | 591 | spin_unlock_bh(&td->tid_release_lock); |
74b793e1 DLR |
592 | |
593 | if (!td->nofail_skb) | |
594 | td->nofail_skb = | |
595 | alloc_skb(sizeof(struct cpl_tid_release), | |
596 | GFP_KERNEL); | |
4d22de3e DLR |
597 | } |
598 | ||
599 | /* use ctx as a next pointer in the tid release list */ | |
600 | void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) | |
601 | { | |
602 | struct t3c_data *td = T3C_DATA(tdev); | |
603 | struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; | |
604 | ||
605 | spin_lock_bh(&td->tid_release_lock); | |
606 | p->ctx = (void *)td->tid_release_list; | |
606fcd0b | 607 | p->client = NULL; |
4d22de3e | 608 | td->tid_release_list = p; |
74b793e1 | 609 | if (!p->ctx || td->release_list_incomplete) |
4d22de3e DLR |
610 | schedule_work(&td->tid_release_task); |
611 | spin_unlock_bh(&td->tid_release_lock); | |
612 | } | |
613 | ||
614 | EXPORT_SYMBOL(cxgb3_queue_tid_release); | |
615 | ||
616 | /* | |
617 | * Remove a tid from the TID table. A client may defer processing its last | |
618 | * CPL message if it is locked at the time it arrives, and while the message | |
619 | * sits in the client's backlog the TID may be reused for another connection. | |
620 | * To handle this we atomically switch the TID association if it still points | |
621 | * to the original client context. | |
622 | */ | |
623 | void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) | |
624 | { | |
625 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
626 | ||
627 | BUG_ON(tid >= t->ntids); | |
628 | if (tdev->type == T3A) | |
629 | (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); | |
630 | else { | |
631 | struct sk_buff *skb; | |
632 | ||
633 | skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); | |
634 | if (likely(skb)) { | |
635 | mk_tid_release(skb, tid); | |
636 | cxgb3_ofld_send(tdev, skb); | |
637 | t->tid_tab[tid].ctx = NULL; | |
638 | } else | |
639 | cxgb3_queue_tid_release(tdev, tid); | |
640 | } | |
641 | atomic_dec(&t->tids_in_use); | |
642 | } | |
643 | ||
644 | EXPORT_SYMBOL(cxgb3_remove_tid); | |
645 | ||
646 | int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, | |
647 | void *ctx) | |
648 | { | |
649 | int atid = -1; | |
650 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
651 | ||
652 | spin_lock_bh(&t->atid_lock); | |
9f238486 DLR |
653 | if (t->afree && |
654 | t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= | |
655 | t->ntids) { | |
4d22de3e DLR |
656 | union active_open_entry *p = t->afree; |
657 | ||
658 | atid = (p - t->atid_tab) + t->atid_base; | |
659 | t->afree = p->next; | |
660 | p->t3c_tid.ctx = ctx; | |
661 | p->t3c_tid.client = client; | |
662 | t->atids_in_use++; | |
663 | } | |
664 | spin_unlock_bh(&t->atid_lock); | |
665 | return atid; | |
666 | } | |
667 | ||
668 | EXPORT_SYMBOL(cxgb3_alloc_atid); | |
669 | ||
670 | int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, | |
671 | void *ctx) | |
672 | { | |
673 | int stid = -1; | |
674 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
675 | ||
676 | spin_lock_bh(&t->stid_lock); | |
677 | if (t->sfree) { | |
678 | union listen_entry *p = t->sfree; | |
679 | ||
680 | stid = (p - t->stid_tab) + t->stid_base; | |
681 | t->sfree = p->next; | |
682 | p->t3c_tid.ctx = ctx; | |
683 | p->t3c_tid.client = client; | |
684 | t->stids_in_use++; | |
685 | } | |
686 | spin_unlock_bh(&t->stid_lock); | |
687 | return stid; | |
688 | } | |
689 | ||
690 | EXPORT_SYMBOL(cxgb3_alloc_stid); | |
691 | ||
5fbf816f DLR |
692 | /* Get the t3cdev associated with a net_device */ |
693 | struct t3cdev *dev2t3cdev(struct net_device *dev) | |
694 | { | |
695 | const struct port_info *pi = netdev_priv(dev); | |
696 | ||
697 | return (struct t3cdev *)pi->adapter; | |
698 | } | |
699 | ||
700 | EXPORT_SYMBOL(dev2t3cdev); | |
701 | ||
4d22de3e DLR |
702 | static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
703 | { | |
704 | struct cpl_smt_write_rpl *rpl = cplhdr(skb); | |
705 | ||
706 | if (rpl->status != CPL_ERR_NONE) | |
707 | printk(KERN_ERR | |
708 | "Unexpected SMT_WRITE_RPL status %u for entry %u\n", | |
709 | rpl->status, GET_TID(rpl)); | |
710 | ||
711 | return CPL_RET_BUF_DONE; | |
712 | } | |
713 | ||
714 | static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
715 | { | |
716 | struct cpl_l2t_write_rpl *rpl = cplhdr(skb); | |
717 | ||
718 | if (rpl->status != CPL_ERR_NONE) | |
719 | printk(KERN_ERR | |
720 | "Unexpected L2T_WRITE_RPL status %u for entry %u\n", | |
721 | rpl->status, GET_TID(rpl)); | |
722 | ||
723 | return CPL_RET_BUF_DONE; | |
724 | } | |
725 | ||
b881955b DLR |
726 | static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
727 | { | |
728 | struct cpl_rte_write_rpl *rpl = cplhdr(skb); | |
729 | ||
730 | if (rpl->status != CPL_ERR_NONE) | |
731 | printk(KERN_ERR | |
732 | "Unexpected RTE_WRITE_RPL status %u for entry %u\n", | |
733 | rpl->status, GET_TID(rpl)); | |
734 | ||
735 | return CPL_RET_BUF_DONE; | |
736 | } | |
737 | ||
4d22de3e DLR |
738 | static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) |
739 | { | |
740 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
741 | unsigned int atid = G_TID(ntohl(rpl->atid)); | |
742 | struct t3c_tid_entry *t3c_tid; | |
743 | ||
744 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | |
606fcd0b DLR |
745 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client && |
746 | t3c_tid->client->handlers && | |
4d22de3e DLR |
747 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { |
748 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, | |
749 | t3c_tid-> | |
750 | ctx); | |
751 | } else { | |
752 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
753 | dev->name, CPL_ACT_OPEN_RPL); | |
754 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
755 | } | |
756 | } | |
757 | ||
758 | static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
759 | { | |
760 | union opcode_tid *p = cplhdr(skb); | |
761 | unsigned int stid = G_TID(ntohl(p->opcode_tid)); | |
762 | struct t3c_tid_entry *t3c_tid; | |
763 | ||
764 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); | |
606fcd0b | 765 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
766 | t3c_tid->client->handlers[p->opcode]) { |
767 | return t3c_tid->client->handlers[p->opcode] (dev, skb, | |
768 | t3c_tid->ctx); | |
769 | } else { | |
770 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
771 | dev->name, p->opcode); | |
772 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
773 | } | |
774 | } | |
775 | ||
776 | static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
777 | { | |
778 | union opcode_tid *p = cplhdr(skb); | |
779 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); | |
780 | struct t3c_tid_entry *t3c_tid; | |
781 | ||
782 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 783 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
784 | t3c_tid->client->handlers[p->opcode]) { |
785 | return t3c_tid->client->handlers[p->opcode] | |
786 | (dev, skb, t3c_tid->ctx); | |
787 | } else { | |
788 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
789 | dev->name, p->opcode); | |
790 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
791 | } | |
792 | } | |
793 | ||
794 | static int do_cr(struct t3cdev *dev, struct sk_buff *skb) | |
795 | { | |
796 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
797 | unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
c9a6ce50 | 798 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
4d22de3e | 799 | struct t3c_tid_entry *t3c_tid; |
c9a6ce50 | 800 | unsigned int tid = GET_TID(req); |
4d22de3e | 801 | |
c9a6ce50 DLR |
802 | if (unlikely(tid >= t->ntids)) { |
803 | printk("%s: passive open TID %u too large\n", | |
804 | dev->name, tid); | |
805 | t3_fatal_err(tdev2adap(dev)); | |
806 | return CPL_RET_BUF_DONE; | |
807 | } | |
808 | ||
809 | t3c_tid = lookup_stid(t, stid); | |
810 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && | |
4d22de3e DLR |
811 | t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { |
812 | return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] | |
813 | (dev, skb, t3c_tid->ctx); | |
814 | } else { | |
815 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
816 | dev->name, CPL_PASS_ACCEPT_REQ); | |
817 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
818 | } | |
819 | } | |
820 | ||
606fcd0b DLR |
821 | /* |
822 | * Returns an sk_buff for a reply CPL message of size len. If the input | |
823 | * sk_buff has no other users it is trimmed and reused, otherwise a new buffer | |
824 | * is allocated. The input skb must be of size at least len. Note that this | |
825 | * operation does not destroy the original skb data even if it decides to reuse | |
826 | * the buffer. | |
827 | */ | |
828 | static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, | |
1f41bb3a | 829 | gfp_t gfp) |
606fcd0b DLR |
830 | { |
831 | if (likely(!skb_cloned(skb))) { | |
832 | BUG_ON(skb->len < len); | |
833 | __skb_trim(skb, len); | |
834 | skb_get(skb); | |
835 | } else { | |
836 | skb = alloc_skb(len, gfp); | |
837 | if (skb) | |
838 | __skb_put(skb, len); | |
839 | } | |
840 | return skb; | |
841 | } | |
842 | ||
4d22de3e DLR |
843 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) |
844 | { | |
845 | union opcode_tid *p = cplhdr(skb); | |
846 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); | |
847 | struct t3c_tid_entry *t3c_tid; | |
848 | ||
849 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 850 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
851 | t3c_tid->client->handlers[p->opcode]) { |
852 | return t3c_tid->client->handlers[p->opcode] | |
853 | (dev, skb, t3c_tid->ctx); | |
854 | } else { | |
855 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
856 | struct cpl_abort_rpl *rpl; | |
606fcd0b DLR |
857 | struct sk_buff *reply_skb; |
858 | unsigned int tid = GET_TID(req); | |
859 | u8 cmd = req->status; | |
860 | ||
861 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | |
862 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) | |
863 | goto out; | |
4d22de3e | 864 | |
606fcd0b DLR |
865 | reply_skb = cxgb3_get_cpl_reply_skb(skb, |
866 | sizeof(struct | |
867 | cpl_abort_rpl), | |
868 | GFP_ATOMIC); | |
869 | ||
870 | if (!reply_skb) { | |
4d22de3e DLR |
871 | printk("do_abort_req_rss: couldn't get skb!\n"); |
872 | goto out; | |
873 | } | |
606fcd0b DLR |
874 | reply_skb->priority = CPL_PRIORITY_DATA; |
875 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); | |
876 | rpl = cplhdr(reply_skb); | |
4d22de3e DLR |
877 | rpl->wr.wr_hi = |
878 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
606fcd0b DLR |
879 | rpl->wr.wr_lo = htonl(V_WR_TID(tid)); |
880 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); | |
881 | rpl->cmd = cmd; | |
882 | cxgb3_ofld_send(dev, reply_skb); | |
4d22de3e DLR |
883 | out: |
884 | return CPL_RET_BUF_DONE; | |
885 | } | |
886 | } | |
887 | ||
888 | static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) | |
889 | { | |
890 | struct cpl_act_establish *req = cplhdr(skb); | |
891 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
c9a6ce50 | 892 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
4d22de3e | 893 | struct t3c_tid_entry *t3c_tid; |
c9a6ce50 | 894 | unsigned int tid = GET_TID(req); |
4d22de3e | 895 | |
c9a6ce50 DLR |
896 | if (unlikely(tid >= t->ntids)) { |
897 | printk("%s: active establish TID %u too large\n", | |
898 | dev->name, tid); | |
899 | t3_fatal_err(tdev2adap(dev)); | |
900 | return CPL_RET_BUF_DONE; | |
901 | } | |
902 | ||
903 | t3c_tid = lookup_atid(t, atid); | |
606fcd0b | 904 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
905 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { |
906 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] | |
907 | (dev, skb, t3c_tid->ctx); | |
908 | } else { | |
909 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
c9a6ce50 | 910 | dev->name, CPL_ACT_ESTABLISH); |
4d22de3e DLR |
911 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
912 | } | |
913 | } | |
914 | ||
4d22de3e DLR |
915 | static int do_trace(struct t3cdev *dev, struct sk_buff *skb) |
916 | { | |
917 | struct cpl_trace_pkt *p = cplhdr(skb); | |
918 | ||
b5344972 | 919 | skb->protocol = htons(0xffff); |
4d22de3e DLR |
920 | skb->dev = dev->lldev; |
921 | skb_pull(skb, sizeof(*p)); | |
459a98ed | 922 | skb_reset_mac_header(skb); |
4d22de3e DLR |
923 | netif_receive_skb(skb); |
924 | return 0; | |
925 | } | |
926 | ||
fa3a6cb4 AV |
927 | /* |
928 | * That skb would better have come from process_responses() where we abuse | |
929 | * ->priority and ->csum to carry our data. NB: if we get to per-arch | |
930 | * ->csum, the things might get really interesting here. | |
931 | */ | |
932 | ||
933 | static inline u32 get_hwtid(struct sk_buff *skb) | |
934 | { | |
935 | return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; | |
936 | } | |
937 | ||
938 | static inline u32 get_opcode(struct sk_buff *skb) | |
939 | { | |
940 | return G_OPCODE(ntohl((__force __be32)skb->csum)); | |
941 | } | |
942 | ||
4d22de3e DLR |
943 | static int do_term(struct t3cdev *dev, struct sk_buff *skb) |
944 | { | |
fa3a6cb4 AV |
945 | unsigned int hwtid = get_hwtid(skb); |
946 | unsigned int opcode = get_opcode(skb); | |
4d22de3e DLR |
947 | struct t3c_tid_entry *t3c_tid; |
948 | ||
949 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 950 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
951 | t3c_tid->client->handlers[opcode]) { |
952 | return t3c_tid->client->handlers[opcode] (dev, skb, | |
953 | t3c_tid->ctx); | |
954 | } else { | |
955 | printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", | |
956 | dev->name, opcode); | |
957 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
958 | } | |
959 | } | |
960 | ||
961 | static int nb_callback(struct notifier_block *self, unsigned long event, | |
962 | void *ctx) | |
963 | { | |
964 | switch (event) { | |
965 | case (NETEVENT_NEIGH_UPDATE):{ | |
966 | cxgb_neigh_update((struct neighbour *)ctx); | |
967 | break; | |
968 | } | |
4d22de3e DLR |
969 | case (NETEVENT_REDIRECT):{ |
970 | struct netevent_redirect *nr = ctx; | |
971 | cxgb_redirect(nr->old, nr->new); | |
27217455 | 972 | cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); |
4d22de3e DLR |
973 | break; |
974 | } | |
975 | default: | |
976 | break; | |
977 | } | |
978 | return 0; | |
979 | } | |
980 | ||
981 | static struct notifier_block nb = { | |
982 | .notifier_call = nb_callback | |
983 | }; | |
984 | ||
985 | /* | |
986 | * Process a received packet with an unknown/unexpected CPL opcode. | |
987 | */ | |
988 | static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) | |
989 | { | |
990 | printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, | |
991 | *skb->data); | |
992 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
993 | } | |
994 | ||
995 | /* | |
996 | * Handlers for each CPL opcode | |
997 | */ | |
998 | static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; | |
999 | ||
1000 | /* | |
1001 | * Add a new handler to the CPL dispatch table. A NULL handler may be supplied | |
1002 | * to unregister an existing handler. | |
1003 | */ | |
1004 | void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) | |
1005 | { | |
1006 | if (opcode < NUM_CPL_CMDS) | |
1007 | cpl_handlers[opcode] = h ? h : do_bad_cpl; | |
1008 | else | |
1009 | printk(KERN_ERR "T3C: handler registration for " | |
1010 | "opcode %x failed\n", opcode); | |
1011 | } | |
1012 | ||
1013 | EXPORT_SYMBOL(t3_register_cpl_handler); | |
1014 | ||
1015 | /* | |
1016 | * T3CDEV's receive method. | |
1017 | */ | |
a5190b4e | 1018 | static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) |
4d22de3e DLR |
1019 | { |
1020 | while (n--) { | |
1021 | struct sk_buff *skb = *skbs++; | |
fa3a6cb4 | 1022 | unsigned int opcode = get_opcode(skb); |
4d22de3e DLR |
1023 | int ret = cpl_handlers[opcode] (dev, skb); |
1024 | ||
1025 | #if VALIDATE_TID | |
1026 | if (ret & CPL_RET_UNKNOWN_TID) { | |
1027 | union opcode_tid *p = cplhdr(skb); | |
1028 | ||
1029 | printk(KERN_ERR "%s: CPL message (opcode %u) had " | |
1030 | "unknown TID %u\n", dev->name, opcode, | |
1031 | G_TID(ntohl(p->opcode_tid))); | |
1032 | } | |
1033 | #endif | |
1034 | if (ret & CPL_RET_BUF_DONE) | |
1035 | kfree_skb(skb); | |
1036 | } | |
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Sends an sk_buff to a T3C driver after dealing with any active network taps. | |
1042 | */ | |
1043 | int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) | |
1044 | { | |
1045 | int r; | |
1046 | ||
1047 | local_bh_disable(); | |
1048 | r = dev->send(dev, skb); | |
1049 | local_bh_enable(); | |
1050 | return r; | |
1051 | } | |
1052 | ||
1053 | EXPORT_SYMBOL(cxgb3_ofld_send); | |
1054 | ||
1055 | static int is_offloading(struct net_device *dev) | |
1056 | { | |
1057 | struct adapter *adapter; | |
1058 | int i; | |
1059 | ||
1060 | read_lock_bh(&adapter_list_lock); | |
1061 | list_for_each_entry(adapter, &adapter_list, adapter_list) { | |
1062 | for_each_port(adapter, i) { | |
1063 | if (dev == adapter->port[i]) { | |
1064 | read_unlock_bh(&adapter_list_lock); | |
1065 | return 1; | |
1066 | } | |
1067 | } | |
1068 | } | |
1069 | read_unlock_bh(&adapter_list_lock); | |
1070 | return 0; | |
1071 | } | |
1072 | ||
a5190b4e | 1073 | static void cxgb_neigh_update(struct neighbour *neigh) |
4d22de3e | 1074 | { |
c4be62a4 | 1075 | struct net_device *dev; |
4d22de3e | 1076 | |
c4be62a4 DM |
1077 | if (!neigh) |
1078 | return; | |
1079 | dev = neigh->dev; | |
4d22de3e | 1080 | if (dev && (is_offloading(dev))) { |
5fbf816f | 1081 | struct t3cdev *tdev = dev2t3cdev(dev); |
4d22de3e DLR |
1082 | |
1083 | BUG_ON(!tdev); | |
1084 | t3_l2t_update(tdev, neigh); | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) | |
1089 | { | |
1090 | struct sk_buff *skb; | |
1091 | struct cpl_set_tcb_field *req; | |
1092 | ||
1093 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | |
1094 | if (!skb) { | |
b39d66a8 | 1095 | printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); |
4d22de3e DLR |
1096 | return; |
1097 | } | |
1098 | skb->priority = CPL_PRIORITY_CONTROL; | |
1099 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | |
1100 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1101 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1102 | req->reply = 0; | |
1103 | req->cpu_idx = 0; | |
1104 | req->word = htons(W_TCB_L2T_IX); | |
1105 | req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); | |
1106 | req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); | |
1107 | tdev->send(tdev, skb); | |
1108 | } | |
1109 | ||
a5190b4e | 1110 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) |
4d22de3e DLR |
1111 | { |
1112 | struct net_device *olddev, *newdev; | |
c4be62a4 | 1113 | struct neighbour *n; |
4d22de3e DLR |
1114 | struct tid_info *ti; |
1115 | struct t3cdev *tdev; | |
1116 | u32 tid; | |
1117 | int update_tcb; | |
1118 | struct l2t_entry *e; | |
1119 | struct t3c_tid_entry *te; | |
1120 | ||
c4be62a4 DM |
1121 | n = dst_get_neighbour_noref(old); |
1122 | if (!n) | |
1123 | return; | |
1124 | olddev = n->dev; | |
1125 | ||
1126 | n = dst_get_neighbour_noref(new); | |
1127 | if (!n) | |
1128 | return; | |
1129 | newdev = n->dev; | |
1130 | ||
4d22de3e DLR |
1131 | if (!is_offloading(olddev)) |
1132 | return; | |
1133 | if (!is_offloading(newdev)) { | |
f07b2e40 | 1134 | printk(KERN_WARNING "%s: Redirect to non-offload " |
b39d66a8 | 1135 | "device ignored.\n", __func__); |
4d22de3e DLR |
1136 | return; |
1137 | } | |
5fbf816f | 1138 | tdev = dev2t3cdev(olddev); |
4d22de3e | 1139 | BUG_ON(!tdev); |
5fbf816f | 1140 | if (tdev != dev2t3cdev(newdev)) { |
4d22de3e | 1141 | printk(KERN_WARNING "%s: Redirect to different " |
b39d66a8 | 1142 | "offload device ignored.\n", __func__); |
4d22de3e DLR |
1143 | return; |
1144 | } | |
1145 | ||
1146 | /* Add new L2T entry */ | |
a4757123 | 1147 | e = t3_l2t_get(tdev, new, newdev); |
4d22de3e DLR |
1148 | if (!e) { |
1149 | printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", | |
b39d66a8 | 1150 | __func__); |
4d22de3e DLR |
1151 | return; |
1152 | } | |
1153 | ||
1154 | /* Walk tid table and notify clients of dst change. */ | |
1155 | ti = &(T3C_DATA(tdev))->tid_maps; | |
1156 | for (tid = 0; tid < ti->ntids; tid++) { | |
1157 | te = lookup_tid(ti, tid); | |
1158 | BUG_ON(!te); | |
606fcd0b | 1159 | if (te && te->ctx && te->client && te->client->redirect) { |
4d22de3e DLR |
1160 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1161 | if (update_tcb) { | |
e48f129c | 1162 | rcu_read_lock(); |
4d22de3e | 1163 | l2t_hold(L2DATA(tdev), e); |
e48f129c | 1164 | rcu_read_unlock(); |
4d22de3e DLR |
1165 | set_l2t_ix(tdev, tid, e); |
1166 | } | |
1167 | } | |
1168 | } | |
e48f129c | 1169 | l2t_release(tdev, e); |
4d22de3e DLR |
1170 | } |
1171 | ||
1172 | /* | |
1173 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | |
1174 | * The allocated memory is cleared. | |
1175 | */ | |
1176 | void *cxgb_alloc_mem(unsigned long size) | |
1177 | { | |
89bf67f1 | 1178 | void *p = kzalloc(size, GFP_KERNEL); |
4d22de3e DLR |
1179 | |
1180 | if (!p) | |
89bf67f1 | 1181 | p = vzalloc(size); |
4d22de3e DLR |
1182 | return p; |
1183 | } | |
1184 | ||
1185 | /* | |
1186 | * Free memory allocated through t3_alloc_mem(). | |
1187 | */ | |
1188 | void cxgb_free_mem(void *addr) | |
1189 | { | |
9e2779fa | 1190 | if (is_vmalloc_addr(addr)) |
4d22de3e DLR |
1191 | vfree(addr); |
1192 | else | |
1193 | kfree(addr); | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * Allocate and initialize the TID tables. Returns 0 on success. | |
1198 | */ | |
1199 | static int init_tid_tabs(struct tid_info *t, unsigned int ntids, | |
1200 | unsigned int natids, unsigned int nstids, | |
1201 | unsigned int atid_base, unsigned int stid_base) | |
1202 | { | |
1203 | unsigned long size = ntids * sizeof(*t->tid_tab) + | |
1204 | natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); | |
1205 | ||
1206 | t->tid_tab = cxgb_alloc_mem(size); | |
1207 | if (!t->tid_tab) | |
1208 | return -ENOMEM; | |
1209 | ||
1210 | t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; | |
1211 | t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; | |
1212 | t->ntids = ntids; | |
1213 | t->nstids = nstids; | |
1214 | t->stid_base = stid_base; | |
1215 | t->sfree = NULL; | |
1216 | t->natids = natids; | |
1217 | t->atid_base = atid_base; | |
1218 | t->afree = NULL; | |
1219 | t->stids_in_use = t->atids_in_use = 0; | |
1220 | atomic_set(&t->tids_in_use, 0); | |
1221 | spin_lock_init(&t->stid_lock); | |
1222 | spin_lock_init(&t->atid_lock); | |
1223 | ||
1224 | /* | |
1225 | * Setup the free lists for stid_tab and atid_tab. | |
1226 | */ | |
1227 | if (nstids) { | |
1228 | while (--nstids) | |
1229 | t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; | |
1230 | t->sfree = t->stid_tab; | |
1231 | } | |
1232 | if (natids) { | |
1233 | while (--natids) | |
1234 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; | |
1235 | t->afree = t->atid_tab; | |
1236 | } | |
1237 | return 0; | |
1238 | } | |
1239 | ||
1240 | static void free_tid_maps(struct tid_info *t) | |
1241 | { | |
1242 | cxgb_free_mem(t->tid_tab); | |
1243 | } | |
1244 | ||
1245 | static inline void add_adapter(struct adapter *adap) | |
1246 | { | |
1247 | write_lock_bh(&adapter_list_lock); | |
1248 | list_add_tail(&adap->adapter_list, &adapter_list); | |
1249 | write_unlock_bh(&adapter_list_lock); | |
1250 | } | |
1251 | ||
1252 | static inline void remove_adapter(struct adapter *adap) | |
1253 | { | |
1254 | write_lock_bh(&adapter_list_lock); | |
1255 | list_del(&adap->adapter_list); | |
1256 | write_unlock_bh(&adapter_list_lock); | |
1257 | } | |
1258 | ||
1259 | int cxgb3_offload_activate(struct adapter *adapter) | |
1260 | { | |
1261 | struct t3cdev *dev = &adapter->tdev; | |
1262 | int natids, err; | |
1263 | struct t3c_data *t; | |
1264 | struct tid_range stid_range, tid_range; | |
1265 | struct mtutab mtutab; | |
1266 | unsigned int l2t_capacity; | |
1267 | ||
75ed0a89 | 1268 | t = kzalloc(sizeof(*t), GFP_KERNEL); |
4d22de3e DLR |
1269 | if (!t) |
1270 | return -ENOMEM; | |
1271 | ||
1272 | err = -EOPNOTSUPP; | |
1273 | if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || | |
1274 | dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || | |
1275 | dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || | |
1276 | dev->ctl(dev, GET_MTUS, &mtutab) < 0 || | |
1277 | dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || | |
1278 | dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) | |
1279 | goto out_free; | |
1280 | ||
1281 | err = -ENOMEM; | |
e48f129c | 1282 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
4d22de3e DLR |
1283 | if (!L2DATA(dev)) |
1284 | goto out_free; | |
1285 | ||
1286 | natids = min(tid_range.num / 2, MAX_ATIDS); | |
1287 | err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, | |
1288 | stid_range.num, ATID_BASE, stid_range.base); | |
1289 | if (err) | |
1290 | goto out_free_l2t; | |
1291 | ||
1292 | t->mtus = mtutab.mtus; | |
1293 | t->nmtus = mtutab.size; | |
1294 | ||
1295 | INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); | |
1296 | spin_lock_init(&t->tid_release_lock); | |
1297 | INIT_LIST_HEAD(&t->list_node); | |
1298 | t->dev = dev; | |
1299 | ||
1300 | T3C_DATA(dev) = t; | |
1301 | dev->recv = process_rx; | |
1302 | dev->neigh_update = t3_l2t_update; | |
1303 | ||
1304 | /* Register netevent handler once */ | |
1305 | if (list_empty(&adapter_list)) | |
1306 | register_netevent_notifier(&nb); | |
1307 | ||
74b793e1 DLR |
1308 | t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); |
1309 | t->release_list_incomplete = 0; | |
1310 | ||
4d22de3e DLR |
1311 | add_adapter(adapter); |
1312 | return 0; | |
1313 | ||
1314 | out_free_l2t: | |
1315 | t3_free_l2t(L2DATA(dev)); | |
2cfa5a04 | 1316 | RCU_INIT_POINTER(dev->l2opt, NULL); |
4d22de3e DLR |
1317 | out_free: |
1318 | kfree(t); | |
1319 | return err; | |
1320 | } | |
1321 | ||
e48f129c NH |
1322 | static void clean_l2_data(struct rcu_head *head) |
1323 | { | |
1324 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | |
1325 | t3_free_l2t(d); | |
1326 | } | |
1327 | ||
1328 | ||
4d22de3e DLR |
1329 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1330 | { | |
1331 | struct t3cdev *tdev = &adapter->tdev; | |
1332 | struct t3c_data *t = T3C_DATA(tdev); | |
e48f129c | 1333 | struct l2t_data *d; |
4d22de3e DLR |
1334 | |
1335 | remove_adapter(adapter); | |
1336 | if (list_empty(&adapter_list)) | |
1337 | unregister_netevent_notifier(&nb); | |
1338 | ||
1339 | free_tid_maps(&t->tid_maps); | |
1340 | T3C_DATA(tdev) = NULL; | |
e48f129c NH |
1341 | rcu_read_lock(); |
1342 | d = L2DATA(tdev); | |
1343 | rcu_read_unlock(); | |
2cfa5a04 | 1344 | RCU_INIT_POINTER(tdev->l2opt, NULL); |
e48f129c | 1345 | call_rcu(&d->rcu_head, clean_l2_data); |
74b793e1 DLR |
1346 | if (t->nofail_skb) |
1347 | kfree_skb(t->nofail_skb); | |
4d22de3e DLR |
1348 | kfree(t); |
1349 | } | |
1350 | ||
1351 | static inline void register_tdev(struct t3cdev *tdev) | |
1352 | { | |
1353 | static int unit; | |
1354 | ||
1355 | mutex_lock(&cxgb3_db_lock); | |
1356 | snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); | |
1357 | list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); | |
1358 | mutex_unlock(&cxgb3_db_lock); | |
1359 | } | |
1360 | ||
1361 | static inline void unregister_tdev(struct t3cdev *tdev) | |
1362 | { | |
1363 | mutex_lock(&cxgb3_db_lock); | |
1364 | list_del(&tdev->ofld_dev_list); | |
1365 | mutex_unlock(&cxgb3_db_lock); | |
1366 | } | |
1367 | ||
8f85cd7f DLR |
1368 | static inline int adap2type(struct adapter *adapter) |
1369 | { | |
1370 | int type = 0; | |
1371 | ||
1372 | switch (adapter->params.rev) { | |
1373 | case T3_REV_A: | |
1374 | type = T3A; | |
1375 | break; | |
1376 | case T3_REV_B: | |
1377 | case T3_REV_B2: | |
1378 | type = T3B; | |
1379 | break; | |
1380 | case T3_REV_C: | |
1381 | type = T3C; | |
1382 | break; | |
1383 | } | |
1384 | return type; | |
1385 | } | |
1386 | ||
4d22de3e DLR |
1387 | void __devinit cxgb3_adapter_ofld(struct adapter *adapter) |
1388 | { | |
1389 | struct t3cdev *tdev = &adapter->tdev; | |
1390 | ||
1391 | INIT_LIST_HEAD(&tdev->ofld_dev_list); | |
1392 | ||
1393 | cxgb3_set_dummy_ops(tdev); | |
1394 | tdev->send = t3_offload_tx; | |
1395 | tdev->ctl = cxgb_offload_ctl; | |
8f85cd7f | 1396 | tdev->type = adap2type(adapter); |
4d22de3e DLR |
1397 | |
1398 | register_tdev(tdev); | |
1399 | } | |
1400 | ||
1401 | void __devexit cxgb3_adapter_unofld(struct adapter *adapter) | |
1402 | { | |
1403 | struct t3cdev *tdev = &adapter->tdev; | |
1404 | ||
1405 | tdev->recv = NULL; | |
1406 | tdev->neigh_update = NULL; | |
1407 | ||
1408 | unregister_tdev(tdev); | |
1409 | } | |
1410 | ||
1411 | void __init cxgb3_offload_init(void) | |
1412 | { | |
1413 | int i; | |
1414 | ||
1415 | for (i = 0; i < NUM_CPL_CMDS; ++i) | |
1416 | cpl_handlers[i] = do_bad_cpl; | |
1417 | ||
1418 | t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); | |
1419 | t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); | |
b881955b | 1420 | t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); |
4d22de3e DLR |
1421 | t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); |
1422 | t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); | |
1423 | t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); | |
1424 | t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); | |
1425 | t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); | |
1426 | t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); | |
1427 | t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); | |
1428 | t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); | |
1429 | t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); | |
1430 | t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); | |
1431 | t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); | |
1432 | t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); | |
1433 | t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); | |
1434 | t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); | |
1435 | t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); | |
6cdbd77e DLR |
1436 | t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); |
1437 | t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); | |
4d22de3e DLR |
1438 | t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); |
1439 | t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); | |
1440 | t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); | |
1441 | t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); | |
1442 | t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); | |
1443 | t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); | |
1444 | } |