Merge tag 'master-2014-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[deliverable/linux.git] / drivers / crypto / qat / qat_common / adf_transport.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
53
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
55 {
56 uint32_t div = data >> shift;
57 uint32_t mult = div << shift;
58
59 return data - mult;
60 }
61
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
63 {
64 if (((size - 1) & addr) != 0)
65 return -EFAULT;
66 return 0;
67 }
68
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
70 {
71 int i = ADF_MIN_RING_SIZE;
72
73 for (; i <= ADF_MAX_RING_SIZE; i++)
74 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75 return i;
76
77 return ADF_DEFAULT_RING_SIZE;
78 }
79
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
81 {
82 spin_lock(&bank->lock);
83 if (bank->ring_mask & (1 << ring)) {
84 spin_unlock(&bank->lock);
85 return -EFAULT;
86 }
87 bank->ring_mask |= (1 << ring);
88 spin_unlock(&bank->lock);
89 return 0;
90 }
91
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
93 {
94 spin_lock(&bank->lock);
95 bank->ring_mask &= ~(1 << ring);
96 spin_unlock(&bank->lock);
97 }
98
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
100 {
101 spin_lock_bh(&bank->lock);
102 bank->irq_mask |= (1 << ring);
103 spin_unlock_bh(&bank->lock);
104 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105 WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106 bank->irq_coalesc_timer);
107 }
108
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
110 {
111 spin_lock_bh(&bank->lock);
112 bank->irq_mask &= ~(1 << ring);
113 spin_unlock_bh(&bank->lock);
114 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
115 }
116
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
118 {
119 if (atomic_add_return(1, ring->inflights) >
120 ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121 atomic_dec(ring->inflights);
122 return -EAGAIN;
123 }
124 spin_lock_bh(&ring->lock);
125 memcpy(ring->base_addr + ring->tail, msg,
126 ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
127
128 ring->tail = adf_modulo(ring->tail +
129 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130 ADF_RING_SIZE_MODULO(ring->ring_size));
131 WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132 ring->ring_number, ring->tail);
133 spin_unlock_bh(&ring->lock);
134 return 0;
135 }
136
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
138 {
139 uint32_t msg_counter = 0;
140 uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
141
142 while (*msg != ADF_RING_EMPTY_SIG) {
143 ring->callback((uint32_t *)msg);
144 *msg = ADF_RING_EMPTY_SIG;
145 ring->head = adf_modulo(ring->head +
146 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
147 ADF_RING_SIZE_MODULO(ring->ring_size));
148 msg_counter++;
149 msg = (uint32_t *)(ring->base_addr + ring->head);
150 }
151 if (msg_counter > 0) {
152 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
153 ring->bank->bank_number,
154 ring->ring_number, ring->head);
155 atomic_sub(msg_counter, ring->inflights);
156 }
157 return 0;
158 }
159
160 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
161 {
162 uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
163
164 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
165 ring->ring_number, ring_config);
166 }
167
168 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
169 {
170 uint32_t ring_config =
171 BUILD_RESP_RING_CONFIG(ring->ring_size,
172 ADF_RING_NEAR_WATERMARK_512,
173 ADF_RING_NEAR_WATERMARK_0);
174
175 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
176 ring->ring_number, ring_config);
177 }
178
179 static int adf_init_ring(struct adf_etr_ring_data *ring)
180 {
181 struct adf_etr_bank_data *bank = ring->bank;
182 struct adf_accel_dev *accel_dev = bank->accel_dev;
183 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
184 uint64_t ring_base;
185 uint32_t ring_size_bytes =
186 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
187
188 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
189 ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
190 ring_size_bytes, &ring->dma_addr,
191 GFP_KERNEL);
192 if (!ring->base_addr)
193 return -ENOMEM;
194
195 memset(ring->base_addr, 0x7F, ring_size_bytes);
196 /* The base_addr has to be aligned to the size of the buffer */
197 if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
198 pr_err("QAT: Ring address not aligned\n");
199 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
200 ring->base_addr, ring->dma_addr);
201 return -EFAULT;
202 }
203
204 if (hw_data->tx_rings_mask & (1 << ring->ring_number))
205 adf_configure_tx_ring(ring);
206
207 else
208 adf_configure_rx_ring(ring);
209
210 ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
211 WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
212 ring->ring_number, ring_base);
213 spin_lock_init(&ring->lock);
214 return 0;
215 }
216
217 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
218 {
219 uint32_t ring_size_bytes =
220 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
221 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
222
223 if (ring->base_addr) {
224 memset(ring->base_addr, 0x7F, ring_size_bytes);
225 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
226 ring_size_bytes, ring->base_addr,
227 ring->dma_addr);
228 }
229 }
230
231 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
232 uint32_t bank_num, uint32_t num_msgs,
233 uint32_t msg_size, const char *ring_name,
234 adf_callback_fn callback, int poll_mode,
235 struct adf_etr_ring_data **ring_ptr)
236 {
237 struct adf_etr_data *transport_data = accel_dev->transport;
238 struct adf_etr_bank_data *bank;
239 struct adf_etr_ring_data *ring;
240 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
241 uint32_t ring_num;
242 int ret;
243
244 if (bank_num >= GET_MAX_BANKS(accel_dev)) {
245 pr_err("QAT: Invalid bank number\n");
246 return -EFAULT;
247 }
248 if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
249 pr_err("QAT: Invalid msg size\n");
250 return -EFAULT;
251 }
252 if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
253 ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
254 pr_err("QAT: Invalid ring size for given msg size\n");
255 return -EFAULT;
256 }
257 if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
258 pr_err("QAT: Section %s, no such entry : %s\n",
259 section, ring_name);
260 return -EFAULT;
261 }
262 if (kstrtouint(val, 10, &ring_num)) {
263 pr_err("QAT: Can't get ring number\n");
264 return -EFAULT;
265 }
266
267 bank = &transport_data->banks[bank_num];
268 if (adf_reserve_ring(bank, ring_num)) {
269 pr_err("QAT: Ring %d, %s already exists.\n",
270 ring_num, ring_name);
271 return -EFAULT;
272 }
273 ring = &bank->rings[ring_num];
274 ring->ring_number = ring_num;
275 ring->bank = bank;
276 ring->callback = callback;
277 ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
278 ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
279 ring->head = 0;
280 ring->tail = 0;
281 atomic_set(ring->inflights, 0);
282 ret = adf_init_ring(ring);
283 if (ret)
284 goto err;
285
286 /* Enable HW arbitration for the given ring */
287 accel_dev->hw_device->hw_arb_ring_enable(ring);
288
289 if (adf_ring_debugfs_add(ring, ring_name)) {
290 pr_err("QAT: Couldn't add ring debugfs entry\n");
291 ret = -EFAULT;
292 goto err;
293 }
294
295 /* Enable interrupts if needed */
296 if (callback && (!poll_mode))
297 adf_enable_ring_irq(bank, ring->ring_number);
298 *ring_ptr = ring;
299 return 0;
300 err:
301 adf_cleanup_ring(ring);
302 adf_unreserve_ring(bank, ring_num);
303 accel_dev->hw_device->hw_arb_ring_disable(ring);
304 return ret;
305 }
306
307 void adf_remove_ring(struct adf_etr_ring_data *ring)
308 {
309 struct adf_etr_bank_data *bank = ring->bank;
310 struct adf_accel_dev *accel_dev = bank->accel_dev;
311
312 /* Disable interrupts for the given ring */
313 adf_disable_ring_irq(bank, ring->ring_number);
314
315 /* Clear PCI config space */
316 WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
317 ring->ring_number, 0);
318 WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
319 ring->ring_number, 0);
320 adf_ring_debugfs_rm(ring);
321 adf_unreserve_ring(bank, ring->ring_number);
322 /* Disable HW arbitration for the given ring */
323 accel_dev->hw_device->hw_arb_ring_disable(ring);
324 adf_cleanup_ring(ring);
325 }
326
327 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
328 {
329 uint32_t empty_rings, i;
330
331 empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
332 empty_rings = ~empty_rings & bank->irq_mask;
333
334 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
335 if (empty_rings & (1 << i))
336 adf_handle_response(&bank->rings[i]);
337 }
338 }
339
340 /**
341 * adf_response_handler() - Bottom half handler response handler
342 * @bank_addr: Address of a ring bank for with the BH was scheduled.
343 *
344 * Function is the bottom half handler for the response from acceleration
345 * device. There is one handler for every ring bank. Function checks all
346 * communication rings in the bank.
347 * To be used by QAT device specific drivers.
348 *
349 * Return: void
350 */
351 void adf_response_handler(unsigned long bank_addr)
352 {
353 struct adf_etr_bank_data *bank = (void *)bank_addr;
354
355 /* Handle all the responses nad reenable IRQs */
356 adf_ring_response_handler(bank);
357 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
358 bank->irq_mask);
359 }
360 EXPORT_SYMBOL_GPL(adf_response_handler);
361
362 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
363 const char *section, const char *format,
364 uint32_t key, uint32_t *value)
365 {
366 char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
367 char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
368
369 snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
370
371 if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
372 return -EFAULT;
373
374 if (kstrtouint(val_buf, 10, value))
375 return -EFAULT;
376 return 0;
377 }
378
379 static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
380 const char *section, uint32_t bank_num_in_accel)
381 {
382 if (adf_get_cfg_int(bank->accel_dev, section,
383 ADF_ETRMGR_COALESCE_TIMER_FORMAT,
384 bank_num_in_accel, &bank->irq_coalesc_timer))
385 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
386
387 if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
388 ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
389 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
390 }
391
392 static int adf_init_bank(struct adf_accel_dev *accel_dev,
393 struct adf_etr_bank_data *bank,
394 uint32_t bank_num, void __iomem *csr_addr)
395 {
396 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
397 struct adf_etr_ring_data *ring;
398 struct adf_etr_ring_data *tx_ring;
399 uint32_t i, coalesc_enabled;
400
401 memset(bank, 0, sizeof(*bank));
402 bank->bank_number = bank_num;
403 bank->csr_addr = csr_addr;
404 bank->accel_dev = accel_dev;
405 spin_lock_init(&bank->lock);
406
407 /* Enable IRQ coalescing always. This will allow to use
408 * the optimised flag and coalesc register.
409 * If it is disabled in the config file just use min time value */
410 if (adf_get_cfg_int(accel_dev, "Accelerator0",
411 ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
412 bank_num, &coalesc_enabled) && coalesc_enabled)
413 adf_enable_coalesc(bank, "Accelerator0", bank_num);
414 else
415 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
416
417 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
418 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
419 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
420 ring = &bank->rings[i];
421 if (hw_data->tx_rings_mask & (1 << i)) {
422 ring->inflights =
423 kzalloc_node(sizeof(atomic_t),
424 GFP_KERNEL,
425 dev_to_node(&GET_DEV(accel_dev)));
426 if (!ring->inflights)
427 goto err;
428 } else {
429 if (i < hw_data->tx_rx_gap) {
430 pr_err("QAT: Invalid tx rings mask config\n");
431 goto err;
432 }
433 tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
434 ring->inflights = tx_ring->inflights;
435 }
436 }
437 if (adf_bank_debugfs_add(bank)) {
438 pr_err("QAT: Failed to add bank debugfs entry\n");
439 goto err;
440 }
441
442 WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
443 return 0;
444 err:
445 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
446 ring = &bank->rings[i];
447 if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
448 kfree(ring->inflights);
449 }
450 return -ENOMEM;
451 }
452
453 /**
454 * adf_init_etr_data() - Initialize transport rings for acceleration device
455 * @accel_dev: Pointer to acceleration device.
456 *
457 * Function is the initializes the communications channels (rings) to the
458 * acceleration device accel_dev.
459 * To be used by QAT device specific drivers.
460 *
461 * Return: 0 on success, error code othewise.
462 */
463 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
464 {
465 struct adf_etr_data *etr_data;
466 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
467 void __iomem *csr_addr;
468 uint32_t size;
469 uint32_t num_banks = 0;
470 int i, ret;
471
472 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
473 dev_to_node(&GET_DEV(accel_dev)));
474 if (!etr_data)
475 return -ENOMEM;
476
477 num_banks = GET_MAX_BANKS(accel_dev);
478 size = num_banks * sizeof(struct adf_etr_bank_data);
479 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
480 dev_to_node(&GET_DEV(accel_dev)));
481 if (!etr_data->banks) {
482 ret = -ENOMEM;
483 goto err_bank;
484 }
485
486 accel_dev->transport = etr_data;
487 i = hw_data->get_etr_bar_id(hw_data);
488 csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
489
490 /* accel_dev->debugfs_dir should always be non-NULL here */
491 etr_data->debug = debugfs_create_dir("transport",
492 accel_dev->debugfs_dir);
493 if (!etr_data->debug) {
494 pr_err("QAT: Unable to create transport debugfs entry\n");
495 ret = -ENOENT;
496 goto err_bank_debug;
497 }
498
499 for (i = 0; i < num_banks; i++) {
500 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
501 csr_addr);
502 if (ret)
503 goto err_bank_all;
504 }
505
506 return 0;
507
508 err_bank_all:
509 debugfs_remove(etr_data->debug);
510 err_bank_debug:
511 kfree(etr_data->banks);
512 err_bank:
513 kfree(etr_data);
514 accel_dev->transport = NULL;
515 return ret;
516 }
517 EXPORT_SYMBOL_GPL(adf_init_etr_data);
518
519 static void cleanup_bank(struct adf_etr_bank_data *bank)
520 {
521 uint32_t i;
522
523 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
524 struct adf_accel_dev *accel_dev = bank->accel_dev;
525 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
526 struct adf_etr_ring_data *ring = &bank->rings[i];
527
528 if (bank->ring_mask & (1 << i))
529 adf_cleanup_ring(ring);
530
531 if (hw_data->tx_rings_mask & (1 << i))
532 kfree(ring->inflights);
533 }
534 adf_bank_debugfs_rm(bank);
535 memset(bank, 0, sizeof(*bank));
536 }
537
538 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
539 {
540 struct adf_etr_data *etr_data = accel_dev->transport;
541 uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
542
543 for (i = 0; i < num_banks; i++)
544 cleanup_bank(&etr_data->banks[i]);
545 }
546
547 /**
548 * adf_cleanup_etr_data() - Clear transport rings for acceleration device
549 * @accel_dev: Pointer to acceleration device.
550 *
551 * Function is the clears the communications channels (rings) of the
552 * acceleration device accel_dev.
553 * To be used by QAT device specific drivers.
554 *
555 * Return: void
556 */
557 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
558 {
559 struct adf_etr_data *etr_data = accel_dev->transport;
560
561 if (etr_data) {
562 adf_cleanup_etr_handles(accel_dev);
563 debugfs_remove(etr_data->debug);
564 kfree(etr_data->banks);
565 kfree(etr_data);
566 accel_dev->transport = NULL;
567 }
568 }
569 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
This page took 0.043387 seconds and 5 git commands to generate.