Merge tag 'platform-drivers-x86-v4.6-1' of git://git.infradead.org/users/dvhart/linux...
[deliverable/linux.git] / drivers / net / ethernet / hisilicon / hns / hnae.c
1 /*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14
15 #include "hnae.h"
16
17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
18
19 static struct class *hnae_class;
20
21 static void
22 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
23 {
24 unsigned long flags;
25
26 spin_lock_irqsave(lock, flags);
27 list_add_tail_rcu(node, head);
28 spin_unlock_irqrestore(lock, flags);
29 }
30
31 static void hnae_list_del(spinlock_t *lock, struct list_head *node)
32 {
33 unsigned long flags;
34
35 spin_lock_irqsave(lock, flags);
36 list_del_rcu(node);
37 spin_unlock_irqrestore(lock, flags);
38 }
39
40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
41 {
42 unsigned int order = hnae_page_order(ring);
43 struct page *p = dev_alloc_pages(order);
44
45 if (!p)
46 return -ENOMEM;
47
48 cb->priv = p;
49 cb->page_offset = 0;
50 cb->reuse_flag = 0;
51 cb->buf = page_address(p);
52 cb->length = hnae_page_size(ring);
53 cb->type = DESC_TYPE_PAGE;
54
55 return 0;
56 }
57
58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
59 {
60 if (cb->type == DESC_TYPE_SKB)
61 dev_kfree_skb_any((struct sk_buff *)cb->priv);
62 else if (unlikely(is_rx_ring(ring)))
63 put_page((struct page *)cb->priv);
64 memset(cb, 0, sizeof(*cb));
65 }
66
67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
68 {
69 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
70 cb->length, ring_to_dma_dir(ring));
71
72 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
73 return -EIO;
74
75 return 0;
76 }
77
78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
79 {
80 if (cb->type == DESC_TYPE_SKB)
81 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
82 ring_to_dma_dir(ring));
83 else
84 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
85 ring_to_dma_dir(ring));
86 }
87
88 static struct hnae_buf_ops hnae_bops = {
89 .alloc_buffer = hnae_alloc_buffer,
90 .free_buffer = hnae_free_buffer,
91 .map_buffer = hnae_map_buffer,
92 .unmap_buffer = hnae_unmap_buffer,
93 };
94
95 static int __ae_match(struct device *dev, const void *data)
96 {
97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
98
99 return hdev->dev->of_node == data;
100 }
101
102 static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
103 {
104 struct device *dev;
105
106 WARN_ON(!ae_node);
107
108 dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
109
110 return dev ? cls_to_ae_dev(dev) : NULL;
111 }
112
113 static void hnae_free_buffers(struct hnae_ring *ring)
114 {
115 int i;
116
117 for (i = 0; i < ring->desc_num; i++)
118 hnae_free_buffer_detach(ring, i);
119 }
120
121 /* Allocate memory for raw pkg, and map with dma */
122 static int hnae_alloc_buffers(struct hnae_ring *ring)
123 {
124 int i, j, ret;
125
126 for (i = 0; i < ring->desc_num; i++) {
127 ret = hnae_alloc_buffer_attach(ring, i);
128 if (ret)
129 goto out_buffer_fail;
130 }
131
132 return 0;
133
134 out_buffer_fail:
135 for (j = i - 1; j >= 0; j--)
136 hnae_free_buffer_detach(ring, j);
137 return ret;
138 }
139
140 /* free desc along with its attached buffer */
141 static void hnae_free_desc(struct hnae_ring *ring)
142 {
143 hnae_free_buffers(ring);
144 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
145 ring->desc_num * sizeof(ring->desc[0]),
146 ring_to_dma_dir(ring));
147 ring->desc_dma_addr = 0;
148 kfree(ring->desc);
149 ring->desc = NULL;
150 }
151
152 /* alloc desc, without buffer attached */
153 static int hnae_alloc_desc(struct hnae_ring *ring)
154 {
155 int size = ring->desc_num * sizeof(ring->desc[0]);
156
157 ring->desc = kzalloc(size, GFP_KERNEL);
158 if (!ring->desc)
159 return -ENOMEM;
160
161 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
162 ring->desc, size, ring_to_dma_dir(ring));
163 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
164 ring->desc_dma_addr = 0;
165 kfree(ring->desc);
166 ring->desc = NULL;
167 return -ENOMEM;
168 }
169
170 return 0;
171 }
172
173 /* fini ring, also free the buffer for the ring */
174 static void hnae_fini_ring(struct hnae_ring *ring)
175 {
176 hnae_free_desc(ring);
177 kfree(ring->desc_cb);
178 ring->desc_cb = NULL;
179 ring->next_to_clean = 0;
180 ring->next_to_use = 0;
181 }
182
183 /* init ring, and with buffer for rx ring */
184 static int
185 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
186 {
187 int ret;
188
189 if (ring->desc_num <= 0 || ring->buf_size <= 0)
190 return -EINVAL;
191
192 ring->q = q;
193 ring->flags = flags;
194 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
195
196 /* not matter for tx or rx ring, the ntc and ntc start from 0 */
197 assert(ring->next_to_use == 0);
198 assert(ring->next_to_clean == 0);
199
200 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
201 GFP_KERNEL);
202 if (!ring->desc_cb) {
203 ret = -ENOMEM;
204 goto out;
205 }
206
207 ret = hnae_alloc_desc(ring);
208 if (ret)
209 goto out_with_desc_cb;
210
211 if (is_rx_ring(ring)) {
212 ret = hnae_alloc_buffers(ring);
213 if (ret)
214 goto out_with_desc;
215 }
216
217 return 0;
218
219 out_with_desc:
220 hnae_free_desc(ring);
221 out_with_desc_cb:
222 kfree(ring->desc_cb);
223 ring->desc_cb = NULL;
224 out:
225 return ret;
226 }
227
228 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
229 struct hnae_ae_dev *dev)
230 {
231 int ret;
232
233 q->dev = dev;
234 q->handle = h;
235
236 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
237 if (ret)
238 goto out;
239
240 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
241 if (ret)
242 goto out_with_tx_ring;
243
244 if (dev->ops->init_queue)
245 dev->ops->init_queue(q);
246
247 return 0;
248
249 out_with_tx_ring:
250 hnae_fini_ring(&q->tx_ring);
251 out:
252 return ret;
253 }
254
255 static void hnae_fini_queue(struct hnae_queue *q)
256 {
257 if (q->dev->ops->fini_queue)
258 q->dev->ops->fini_queue(q);
259
260 hnae_fini_ring(&q->tx_ring);
261 hnae_fini_ring(&q->rx_ring);
262 }
263
264 /**
265 * ae_chain - define ae chain head
266 */
267 static RAW_NOTIFIER_HEAD(ae_chain);
268
269 int hnae_register_notifier(struct notifier_block *nb)
270 {
271 return raw_notifier_chain_register(&ae_chain, nb);
272 }
273 EXPORT_SYMBOL(hnae_register_notifier);
274
275 void hnae_unregister_notifier(struct notifier_block *nb)
276 {
277 if (raw_notifier_chain_unregister(&ae_chain, nb))
278 dev_err(NULL, "notifier chain unregister fail\n");
279 }
280 EXPORT_SYMBOL(hnae_unregister_notifier);
281
282 int hnae_reinit_handle(struct hnae_handle *handle)
283 {
284 int i, j;
285 int ret;
286
287 for (i = 0; i < handle->q_num; i++) /* free ring*/
288 hnae_fini_queue(handle->qs[i]);
289
290 if (handle->dev->ops->reset)
291 handle->dev->ops->reset(handle);
292
293 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
294 ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
295 if (ret)
296 goto out_when_init_queue;
297 }
298 return 0;
299 out_when_init_queue:
300 for (j = i - 1; j >= 0; j--)
301 hnae_fini_queue(handle->qs[j]);
302 return ret;
303 }
304 EXPORT_SYMBOL(hnae_reinit_handle);
305
306 /* hnae_get_handle - get a handle from the AE
307 * @owner_dev: the dev use this handle
308 * @ae_id: the id of the ae to be used
309 * @ae_opts: the options set for the handle
310 * @bops: the callbacks for buffer management
311 *
312 * return handle ptr or ERR_PTR
313 */
314 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
315 const struct device_node *ae_node,
316 u32 port_id,
317 struct hnae_buf_ops *bops)
318 {
319 struct hnae_ae_dev *dev;
320 struct hnae_handle *handle;
321 int i, j;
322 int ret;
323
324 dev = find_ae(ae_node);
325 if (!dev)
326 return ERR_PTR(-ENODEV);
327
328 handle = dev->ops->get_handle(dev, port_id);
329 if (IS_ERR(handle))
330 return handle;
331
332 handle->dev = dev;
333 handle->owner_dev = owner_dev;
334 handle->bops = bops ? bops : &hnae_bops;
335 handle->eport_id = port_id;
336
337 for (i = 0; i < handle->q_num; i++) {
338 ret = hnae_init_queue(handle, handle->qs[i], dev);
339 if (ret)
340 goto out_when_init_queue;
341 }
342
343 __module_get(dev->owner);
344
345 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
346
347 return handle;
348
349 out_when_init_queue:
350 for (j = i - 1; j >= 0; j--)
351 hnae_fini_queue(handle->qs[j]);
352
353 return ERR_PTR(-ENOMEM);
354 }
355 EXPORT_SYMBOL(hnae_get_handle);
356
357 void hnae_put_handle(struct hnae_handle *h)
358 {
359 struct hnae_ae_dev *dev = h->dev;
360 int i;
361
362 for (i = 0; i < h->q_num; i++)
363 hnae_fini_queue(h->qs[i]);
364
365 if (h->dev->ops->reset)
366 h->dev->ops->reset(h);
367
368 hnae_list_del(&dev->lock, &h->node);
369
370 if (dev->ops->put_handle)
371 dev->ops->put_handle(h);
372
373 module_put(dev->owner);
374 }
375 EXPORT_SYMBOL(hnae_put_handle);
376
377 static void hnae_release(struct device *dev)
378 {
379 }
380
381 /**
382 * hnae_ae_register - register a AE engine to hnae framework
383 * @hdev: the hnae ae engine device
384 * @owner: the module who provides this dev
385 * NOTE: the duplicated name will not be checked
386 */
387 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
388 {
389 static atomic_t id = ATOMIC_INIT(-1);
390 int ret;
391
392 if (!hdev->dev)
393 return -ENODEV;
394
395 if (!hdev->ops || !hdev->ops->get_handle ||
396 !hdev->ops->toggle_ring_irq ||
397 !hdev->ops->toggle_queue_status ||
398 !hdev->ops->get_status || !hdev->ops->adjust_link)
399 return -EINVAL;
400
401 hdev->owner = owner;
402 hdev->id = (int)atomic_inc_return(&id);
403 hdev->cls_dev.parent = hdev->dev;
404 hdev->cls_dev.class = hnae_class;
405 hdev->cls_dev.release = hnae_release;
406 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
407 ret = device_register(&hdev->cls_dev);
408 if (ret)
409 return ret;
410
411 __module_get(THIS_MODULE);
412
413 INIT_LIST_HEAD(&hdev->handle_list);
414 spin_lock_init(&hdev->lock);
415
416 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
417 if (ret)
418 dev_dbg(hdev->dev,
419 "has not notifier for AE: %s\n", hdev->name);
420
421 return 0;
422 }
423 EXPORT_SYMBOL(hnae_ae_register);
424
425 /**
426 * hnae_ae_unregister - unregisters a HNAE AE engine
427 * @cdev: the device to unregister
428 */
429 void hnae_ae_unregister(struct hnae_ae_dev *hdev)
430 {
431 device_unregister(&hdev->cls_dev);
432 module_put(THIS_MODULE);
433 }
434 EXPORT_SYMBOL(hnae_ae_unregister);
435
436 static int __init hnae_init(void)
437 {
438 hnae_class = class_create(THIS_MODULE, "hnae");
439 return PTR_ERR_OR_ZERO(hnae_class);
440 }
441
442 static void __exit hnae_exit(void)
443 {
444 class_destroy(hnae_class);
445 }
446
447 subsys_initcall(hnae_init);
448 module_exit(hnae_exit);
449
450 MODULE_AUTHOR("Hisilicon, Inc.");
451 MODULE_LICENSE("GPL");
452 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
453
454 /* vi: set tw=78 noet: */
This page took 0.051618 seconds and 6 git commands to generate.