crypto: qat - Enforce valid numa configuration
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_crypto.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
52 #include "adf_cfg.h"
53 #include "adf_cfg_strings.h"
54 #include "qat_crypto.h"
55 #include "icp_qat_fw.h"
56
57 #define SEC ADF_KERNEL_SEC
58
59 static struct service_hndl qat_crypto;
60
61 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
62 {
63 if (atomic_sub_return(1, &inst->refctr) == 0)
64 adf_dev_put(inst->accel_dev);
65 }
66
67 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
68 {
69 struct qat_crypto_instance *inst;
70 struct list_head *list_ptr, *tmp;
71 int i;
72
73 list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
74 inst = list_entry(list_ptr, struct qat_crypto_instance, list);
75
76 for (i = 0; i < atomic_read(&inst->refctr); i++)
77 qat_crypto_put_instance(inst);
78
79 if (inst->sym_tx)
80 adf_remove_ring(inst->sym_tx);
81
82 if (inst->sym_rx)
83 adf_remove_ring(inst->sym_rx);
84
85 if (inst->pke_tx)
86 adf_remove_ring(inst->pke_tx);
87
88 if (inst->pke_rx)
89 adf_remove_ring(inst->pke_rx);
90
91 if (inst->rnd_tx)
92 adf_remove_ring(inst->rnd_tx);
93
94 if (inst->rnd_rx)
95 adf_remove_ring(inst->rnd_rx);
96
97 list_del(list_ptr);
98 kfree(inst);
99 }
100 return 0;
101 }
102
103 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
104 {
105 struct adf_accel_dev *accel_dev = NULL;
106 struct qat_crypto_instance *inst_best = NULL;
107 struct list_head *itr;
108 unsigned long best = ~0;
109
110 list_for_each(itr, adf_devmgr_get_head()) {
111 accel_dev = list_entry(itr, struct adf_accel_dev, list);
112 if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
113 dev_to_node(&GET_DEV(accel_dev)) < 0)
114 && adf_dev_started(accel_dev))
115 break;
116 accel_dev = NULL;
117 }
118 if (!accel_dev) {
119 pr_err("QAT: Could not find device on node %d\n", node);
120 accel_dev = adf_devmgr_get_first();
121 }
122 if (!accel_dev || !adf_dev_started(accel_dev))
123 return NULL;
124
125 list_for_each(itr, &accel_dev->crypto_list) {
126 struct qat_crypto_instance *inst;
127 unsigned long cur;
128
129 inst = list_entry(itr, struct qat_crypto_instance, list);
130 cur = atomic_read(&inst->refctr);
131 if (best > cur) {
132 inst_best = inst;
133 best = cur;
134 }
135 }
136 if (inst_best) {
137 if (atomic_add_return(1, &inst_best->refctr) == 1) {
138 if (adf_dev_get(accel_dev)) {
139 atomic_dec(&inst_best->refctr);
140 pr_err("QAT: Could increment dev refctr\n");
141 return NULL;
142 }
143 }
144 }
145 return inst_best;
146 }
147
148 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
149 {
150 int i;
151 unsigned long bank;
152 unsigned long num_inst, num_msg_sym, num_msg_asym;
153 int msg_size;
154 struct qat_crypto_instance *inst;
155 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
156 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
157
158 INIT_LIST_HEAD(&accel_dev->crypto_list);
159 strlcpy(key, ADF_NUM_CY, sizeof(key));
160
161 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
162 return -EFAULT;
163
164 if (kstrtoul(val, 0, &num_inst))
165 return -EFAULT;
166
167 for (i = 0; i < num_inst; i++) {
168 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
169 dev_to_node(&GET_DEV(accel_dev)));
170 if (!inst)
171 goto err;
172
173 list_add_tail(&inst->list, &accel_dev->crypto_list);
174 inst->id = i;
175 atomic_set(&inst->refctr, 0);
176 inst->accel_dev = accel_dev;
177 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
178 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
179 goto err;
180
181 if (kstrtoul(val, 10, &bank))
182 goto err;
183 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
184 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
185 goto err;
186
187 if (kstrtoul(val, 10, &num_msg_sym))
188 goto err;
189 num_msg_sym = num_msg_sym >> 1;
190 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
191 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
192 goto err;
193
194 if (kstrtoul(val, 10, &num_msg_asym))
195 goto err;
196 num_msg_asym = num_msg_asym >> 1;
197
198 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
199 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
200 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
201 msg_size, key, NULL, 0, &inst->sym_tx))
202 goto err;
203
204 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
205 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
206 msg_size, key, NULL, 0, &inst->rnd_tx))
207 goto err;
208
209 msg_size = msg_size >> 1;
210 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
211 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
212 msg_size, key, NULL, 0, &inst->pke_tx))
213 goto err;
214
215 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
216 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
217 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
218 msg_size, key, qat_alg_callback, 0,
219 &inst->sym_rx))
220 goto err;
221
222 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
223 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
224 msg_size, key, qat_alg_callback, 0,
225 &inst->rnd_rx))
226 goto err;
227
228 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
229 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
230 msg_size, key, qat_alg_callback, 0,
231 &inst->pke_rx))
232 goto err;
233 }
234 return 0;
235 err:
236 qat_crypto_free_instances(accel_dev);
237 return -ENOMEM;
238 }
239
240 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
241 {
242 if (qat_crypto_create_instances(accel_dev))
243 return -EFAULT;
244
245 return 0;
246 }
247
248 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
249 {
250 return qat_crypto_free_instances(accel_dev);
251 }
252
253 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
254 enum adf_event event)
255 {
256 int ret;
257
258 switch (event) {
259 case ADF_EVENT_INIT:
260 ret = qat_crypto_init(accel_dev);
261 break;
262 case ADF_EVENT_SHUTDOWN:
263 ret = qat_crypto_shutdown(accel_dev);
264 break;
265 case ADF_EVENT_RESTARTING:
266 case ADF_EVENT_RESTARTED:
267 case ADF_EVENT_START:
268 case ADF_EVENT_STOP:
269 default:
270 ret = 0;
271 }
272 return ret;
273 }
274
275 int qat_crypto_register(void)
276 {
277 memset(&qat_crypto, 0, sizeof(qat_crypto));
278 qat_crypto.event_hld = qat_crypto_event_handler;
279 qat_crypto.name = "qat_crypto";
280 return adf_service_register(&qat_crypto);
281 }
282
283 int qat_crypto_unregister(void)
284 {
285 return adf_service_unregister(&qat_crypto);
286 }
This page took 0.037618 seconds and 5 git commands to generate.