Merge branch 'pci/resource' into next
[deliverable/linux.git] / drivers / infiniband / hw / ocrdma / ocrdma_main.c
1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28 #include <linux/module.h>
29 #include <linux/idr.h>
30 #include <rdma/ib_verbs.h>
31 #include <rdma/ib_user_verbs.h>
32 #include <rdma/ib_addr.h>
33
34 #include <linux/netdevice.h>
35 #include <net/addrconf.h>
36
37 #include "ocrdma.h"
38 #include "ocrdma_verbs.h"
39 #include "ocrdma_ah.h"
40 #include "be_roce.h"
41 #include "ocrdma_hw.h"
42 #include "ocrdma_abi.h"
43
44 MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
45 MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
46 MODULE_AUTHOR("Emulex Corporation");
47 MODULE_LICENSE("GPL");
48
49 static LIST_HEAD(ocrdma_dev_list);
50 static DEFINE_SPINLOCK(ocrdma_devlist_lock);
51 static DEFINE_IDR(ocrdma_dev_id);
52
53 static union ib_gid ocrdma_zero_sgid;
54
55 void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
56 {
57 u8 mac_addr[6];
58
59 memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
60 guid[0] = mac_addr[0] ^ 2;
61 guid[1] = mac_addr[1];
62 guid[2] = mac_addr[2];
63 guid[3] = 0xff;
64 guid[4] = 0xfe;
65 guid[5] = mac_addr[3];
66 guid[6] = mac_addr[4];
67 guid[7] = mac_addr[5];
68 }
69
70 static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid)
71 {
72 int i;
73 unsigned long flags;
74
75 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
76
77
78 spin_lock_irqsave(&dev->sgid_lock, flags);
79 for (i = 0; i < OCRDMA_MAX_SGID; i++) {
80 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
81 sizeof(union ib_gid))) {
82 /* found free entry */
83 memcpy(&dev->sgid_tbl[i], new_sgid,
84 sizeof(union ib_gid));
85 spin_unlock_irqrestore(&dev->sgid_lock, flags);
86 return true;
87 } else if (!memcmp(&dev->sgid_tbl[i], new_sgid,
88 sizeof(union ib_gid))) {
89 /* entry already present, no addition is required. */
90 spin_unlock_irqrestore(&dev->sgid_lock, flags);
91 return false;
92 }
93 }
94 spin_unlock_irqrestore(&dev->sgid_lock, flags);
95 return false;
96 }
97
98 static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid)
99 {
100 int found = false;
101 int i;
102 unsigned long flags;
103
104
105 spin_lock_irqsave(&dev->sgid_lock, flags);
106 /* first is default sgid, which cannot be deleted. */
107 for (i = 1; i < OCRDMA_MAX_SGID; i++) {
108 if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) {
109 /* found matching entry */
110 memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
111 found = true;
112 break;
113 }
114 }
115 spin_unlock_irqrestore(&dev->sgid_lock, flags);
116 return found;
117 }
118
119 static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
120 union ib_gid *gid)
121 {
122 struct ib_event gid_event;
123 struct ocrdma_dev *dev;
124 bool found = false;
125 bool updated = false;
126 bool is_vlan = false;
127
128 is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
129 if (is_vlan)
130 netdev = vlan_dev_real_dev(netdev);
131
132 rcu_read_lock();
133 list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
134 if (dev->nic_info.netdev == netdev) {
135 found = true;
136 break;
137 }
138 }
139 rcu_read_unlock();
140
141 if (!found)
142 return NOTIFY_DONE;
143
144 mutex_lock(&dev->dev_lock);
145 switch (event) {
146 case NETDEV_UP:
147 updated = ocrdma_add_sgid(dev, gid);
148 break;
149 case NETDEV_DOWN:
150 updated = ocrdma_del_sgid(dev, gid);
151 break;
152 default:
153 break;
154 }
155 if (updated) {
156 /* GID table updated, notify the consumers about it */
157 gid_event.device = &dev->ibdev;
158 gid_event.element.port_num = 1;
159 gid_event.event = IB_EVENT_GID_CHANGE;
160 ib_dispatch_event(&gid_event);
161 }
162 mutex_unlock(&dev->dev_lock);
163 return NOTIFY_OK;
164 }
165
166 static int ocrdma_inetaddr_event(struct notifier_block *notifier,
167 unsigned long event, void *ptr)
168 {
169 struct in_ifaddr *ifa = ptr;
170 union ib_gid gid;
171 struct net_device *netdev = ifa->ifa_dev->dev;
172
173 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
174 return ocrdma_addr_event(event, netdev, &gid);
175 }
176
177 static struct notifier_block ocrdma_inetaddr_notifier = {
178 .notifier_call = ocrdma_inetaddr_event
179 };
180
181 #if IS_ENABLED(CONFIG_IPV6)
182
183 static int ocrdma_inet6addr_event(struct notifier_block *notifier,
184 unsigned long event, void *ptr)
185 {
186 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
187 union ib_gid *gid = (union ib_gid *)&ifa->addr;
188 struct net_device *netdev = ifa->idev->dev;
189 return ocrdma_addr_event(event, netdev, gid);
190 }
191
192 static struct notifier_block ocrdma_inet6addr_notifier = {
193 .notifier_call = ocrdma_inet6addr_event
194 };
195
196 #endif /* IPV6 and VLAN */
197
198 static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
199 u8 port_num)
200 {
201 return IB_LINK_LAYER_ETHERNET;
202 }
203
204 static int ocrdma_register_device(struct ocrdma_dev *dev)
205 {
206 strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
207 ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
208 memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
209 sizeof(OCRDMA_NODE_DESC));
210 dev->ibdev.owner = THIS_MODULE;
211 dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
212 dev->ibdev.uverbs_cmd_mask =
213 OCRDMA_UVERBS(GET_CONTEXT) |
214 OCRDMA_UVERBS(QUERY_DEVICE) |
215 OCRDMA_UVERBS(QUERY_PORT) |
216 OCRDMA_UVERBS(ALLOC_PD) |
217 OCRDMA_UVERBS(DEALLOC_PD) |
218 OCRDMA_UVERBS(REG_MR) |
219 OCRDMA_UVERBS(DEREG_MR) |
220 OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
221 OCRDMA_UVERBS(CREATE_CQ) |
222 OCRDMA_UVERBS(RESIZE_CQ) |
223 OCRDMA_UVERBS(DESTROY_CQ) |
224 OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
225 OCRDMA_UVERBS(CREATE_QP) |
226 OCRDMA_UVERBS(MODIFY_QP) |
227 OCRDMA_UVERBS(QUERY_QP) |
228 OCRDMA_UVERBS(DESTROY_QP) |
229 OCRDMA_UVERBS(POLL_CQ) |
230 OCRDMA_UVERBS(POST_SEND) |
231 OCRDMA_UVERBS(POST_RECV);
232
233 dev->ibdev.uverbs_cmd_mask |=
234 OCRDMA_UVERBS(CREATE_AH) |
235 OCRDMA_UVERBS(MODIFY_AH) |
236 OCRDMA_UVERBS(QUERY_AH) |
237 OCRDMA_UVERBS(DESTROY_AH);
238
239 dev->ibdev.node_type = RDMA_NODE_IB_CA;
240 dev->ibdev.phys_port_cnt = 1;
241 dev->ibdev.num_comp_vectors = 1;
242
243 /* mandatory verbs. */
244 dev->ibdev.query_device = ocrdma_query_device;
245 dev->ibdev.query_port = ocrdma_query_port;
246 dev->ibdev.modify_port = ocrdma_modify_port;
247 dev->ibdev.query_gid = ocrdma_query_gid;
248 dev->ibdev.get_link_layer = ocrdma_link_layer;
249 dev->ibdev.alloc_pd = ocrdma_alloc_pd;
250 dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
251
252 dev->ibdev.create_cq = ocrdma_create_cq;
253 dev->ibdev.destroy_cq = ocrdma_destroy_cq;
254 dev->ibdev.resize_cq = ocrdma_resize_cq;
255
256 dev->ibdev.create_qp = ocrdma_create_qp;
257 dev->ibdev.modify_qp = ocrdma_modify_qp;
258 dev->ibdev.query_qp = ocrdma_query_qp;
259 dev->ibdev.destroy_qp = ocrdma_destroy_qp;
260
261 dev->ibdev.query_pkey = ocrdma_query_pkey;
262 dev->ibdev.create_ah = ocrdma_create_ah;
263 dev->ibdev.destroy_ah = ocrdma_destroy_ah;
264 dev->ibdev.query_ah = ocrdma_query_ah;
265 dev->ibdev.modify_ah = ocrdma_modify_ah;
266
267 dev->ibdev.poll_cq = ocrdma_poll_cq;
268 dev->ibdev.post_send = ocrdma_post_send;
269 dev->ibdev.post_recv = ocrdma_post_recv;
270 dev->ibdev.req_notify_cq = ocrdma_arm_cq;
271
272 dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
273 dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
274 dev->ibdev.dereg_mr = ocrdma_dereg_mr;
275 dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
276
277 dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
278 dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
279 dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
280
281 /* mandatory to support user space verbs consumer. */
282 dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
283 dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
284 dev->ibdev.mmap = ocrdma_mmap;
285 dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
286
287 dev->ibdev.process_mad = ocrdma_process_mad;
288
289 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
290 dev->ibdev.uverbs_cmd_mask |=
291 OCRDMA_UVERBS(CREATE_SRQ) |
292 OCRDMA_UVERBS(MODIFY_SRQ) |
293 OCRDMA_UVERBS(QUERY_SRQ) |
294 OCRDMA_UVERBS(DESTROY_SRQ) |
295 OCRDMA_UVERBS(POST_SRQ_RECV);
296
297 dev->ibdev.create_srq = ocrdma_create_srq;
298 dev->ibdev.modify_srq = ocrdma_modify_srq;
299 dev->ibdev.query_srq = ocrdma_query_srq;
300 dev->ibdev.destroy_srq = ocrdma_destroy_srq;
301 dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
302 }
303 return ib_register_device(&dev->ibdev, NULL);
304 }
305
306 static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
307 {
308 mutex_init(&dev->dev_lock);
309 dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
310 OCRDMA_MAX_SGID, GFP_KERNEL);
311 if (!dev->sgid_tbl)
312 goto alloc_err;
313 spin_lock_init(&dev->sgid_lock);
314
315 dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
316 OCRDMA_MAX_CQ, GFP_KERNEL);
317 if (!dev->cq_tbl)
318 goto alloc_err;
319
320 if (dev->attr.max_qp) {
321 dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
322 OCRDMA_MAX_QP, GFP_KERNEL);
323 if (!dev->qp_tbl)
324 goto alloc_err;
325 }
326 spin_lock_init(&dev->av_tbl.lock);
327 spin_lock_init(&dev->flush_q_lock);
328 return 0;
329 alloc_err:
330 pr_err("%s(%d) error.\n", __func__, dev->id);
331 return -ENOMEM;
332 }
333
334 static void ocrdma_free_resources(struct ocrdma_dev *dev)
335 {
336 kfree(dev->qp_tbl);
337 kfree(dev->cq_tbl);
338 kfree(dev->sgid_tbl);
339 }
340
341 static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
342 {
343 int status = 0;
344 struct ocrdma_dev *dev;
345
346 dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
347 if (!dev) {
348 pr_err("Unable to allocate ib device\n");
349 return NULL;
350 }
351 dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
352 if (!dev->mbx_cmd)
353 goto idr_err;
354
355 memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
356 dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
357 if (dev->id < 0)
358 goto idr_err;
359
360 status = ocrdma_init_hw(dev);
361 if (status)
362 goto init_err;
363
364 status = ocrdma_alloc_resources(dev);
365 if (status)
366 goto alloc_err;
367
368 status = ocrdma_register_device(dev);
369 if (status)
370 goto alloc_err;
371
372 spin_lock(&ocrdma_devlist_lock);
373 list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
374 spin_unlock(&ocrdma_devlist_lock);
375 return dev;
376
377 alloc_err:
378 ocrdma_free_resources(dev);
379 ocrdma_cleanup_hw(dev);
380 init_err:
381 idr_remove(&ocrdma_dev_id, dev->id);
382 idr_err:
383 kfree(dev->mbx_cmd);
384 ib_dealloc_device(&dev->ibdev);
385 pr_err("%s() leaving. ret=%d\n", __func__, status);
386 return NULL;
387 }
388
389 static void ocrdma_remove_free(struct rcu_head *rcu)
390 {
391 struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
392
393 idr_remove(&ocrdma_dev_id, dev->id);
394 kfree(dev->mbx_cmd);
395 ib_dealloc_device(&dev->ibdev);
396 }
397
398 static void ocrdma_remove(struct ocrdma_dev *dev)
399 {
400 /* first unregister with stack to stop all the active traffic
401 * of the registered clients.
402 */
403 ib_unregister_device(&dev->ibdev);
404
405 spin_lock(&ocrdma_devlist_lock);
406 list_del_rcu(&dev->entry);
407 spin_unlock(&ocrdma_devlist_lock);
408
409 ocrdma_free_resources(dev);
410 ocrdma_cleanup_hw(dev);
411
412 call_rcu(&dev->rcu, ocrdma_remove_free);
413 }
414
415 static int ocrdma_open(struct ocrdma_dev *dev)
416 {
417 struct ib_event port_event;
418
419 port_event.event = IB_EVENT_PORT_ACTIVE;
420 port_event.element.port_num = 1;
421 port_event.device = &dev->ibdev;
422 ib_dispatch_event(&port_event);
423 return 0;
424 }
425
426 static int ocrdma_close(struct ocrdma_dev *dev)
427 {
428 int i;
429 struct ocrdma_qp *qp, **cur_qp;
430 struct ib_event err_event;
431 struct ib_qp_attr attrs;
432 int attr_mask = IB_QP_STATE;
433
434 attrs.qp_state = IB_QPS_ERR;
435 mutex_lock(&dev->dev_lock);
436 if (dev->qp_tbl) {
437 cur_qp = dev->qp_tbl;
438 for (i = 0; i < OCRDMA_MAX_QP; i++) {
439 qp = cur_qp[i];
440 if (qp) {
441 /* change the QP state to ERROR */
442 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
443
444 err_event.event = IB_EVENT_QP_FATAL;
445 err_event.element.qp = &qp->ibqp;
446 err_event.device = &dev->ibdev;
447 ib_dispatch_event(&err_event);
448 }
449 }
450 }
451 mutex_unlock(&dev->dev_lock);
452
453 err_event.event = IB_EVENT_PORT_ERR;
454 err_event.element.port_num = 1;
455 err_event.device = &dev->ibdev;
456 ib_dispatch_event(&err_event);
457 return 0;
458 }
459
460 /* event handling via NIC driver ensures that all the NIC specific
461 * initialization done before RoCE driver notifies
462 * event to stack.
463 */
464 static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
465 {
466 switch (event) {
467 case BE_DEV_UP:
468 ocrdma_open(dev);
469 break;
470 case BE_DEV_DOWN:
471 ocrdma_close(dev);
472 break;
473 }
474 }
475
476 static struct ocrdma_driver ocrdma_drv = {
477 .name = "ocrdma_driver",
478 .add = ocrdma_add,
479 .remove = ocrdma_remove,
480 .state_change_handler = ocrdma_event_handler,
481 };
482
483 static void ocrdma_unregister_inet6addr_notifier(void)
484 {
485 #if IS_ENABLED(CONFIG_IPV6)
486 unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
487 #endif
488 }
489
490 static int __init ocrdma_init_module(void)
491 {
492 int status;
493
494 status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier);
495 if (status)
496 return status;
497
498 #if IS_ENABLED(CONFIG_IPV6)
499 status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
500 if (status)
501 return status;
502 #endif
503
504 status = be_roce_register_driver(&ocrdma_drv);
505 if (status)
506 ocrdma_unregister_inet6addr_notifier();
507
508 return status;
509 }
510
511 static void __exit ocrdma_exit_module(void)
512 {
513 be_roce_unregister_driver(&ocrdma_drv);
514 ocrdma_unregister_inet6addr_notifier();
515 }
516
517 module_init(ocrdma_init_module);
518 module_exit(ocrdma_exit_module);
This page took 0.043003 seconds and 5 git commands to generate.