crypto: testmgr - Reenable rfc4309 test
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_hal.c
CommitLineData
b3416fb8
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
48
49#include "adf_accel_devices.h"
50#include "adf_common_drv.h"
51#include "icp_qat_hal.h"
52#include "icp_qat_uclo.h"
53
54#define BAD_REGADDR 0xffff
55#define MAX_RETRY_TIMES 10000
56#define INIT_CTX_ARB_VALUE 0x0
57#define INIT_CTX_ENABLE_VALUE 0x0
58#define INIT_PC_VALUE 0x0
59#define INIT_WAKEUP_EVENTS_VALUE 0x1
60#define INIT_SIG_EVENTS_VALUE 0x1
61#define INIT_CCENABLE_VALUE 0x2000
62#define RST_CSR_QAT_LSB 20
63#define RST_CSR_AE_LSB 0
64#define MC_TIMESTAMP_ENABLE (0x1 << 7)
65
66#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
67 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
68 (~(1 << CE_REG_PAR_ERR_BITPOS)))
69#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
70 (inst = ((inst & 0xFFFF00C03FFull) | \
71 ((((const_val) << 12) & 0x0FF00000ull) | \
72 (((const_val) << 10) & 0x0003FC00ull))))
73#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
74 (inst = ((inst & 0xFFFF00FFF00ull) | \
75 ((((const_val) << 12) & 0x0FF00000ull) | \
76 (((const_val) << 0) & 0x000000FFull))))
77
78#define AE(handle, ae) handle->hal_handle->aes[ae]
79
80static const uint64_t inst_4b[] = {
81 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
82 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
83 0x0A021000000ull
84};
85
86static const uint64_t inst[] = {
87 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
88 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
89 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
90 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
91 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
92 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
93 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
94 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
95 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
96 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
97 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
98 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
99 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
100 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
101 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
102 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
103 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
104 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
105 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
106 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
107 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
108 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
109};
110
111void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
112 unsigned char ae, unsigned int ctx_mask)
113{
114 AE(handle, ae).live_ctx_mask = ctx_mask;
115}
116
117#define CSR_RETRY_TIMES 500
118static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
119 unsigned char ae, unsigned int csr,
120 unsigned int *value)
121{
122 unsigned int iterations = CSR_RETRY_TIMES;
123
124 do {
125 *value = GET_AE_CSR(handle, ae, csr);
126 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
127 return 0;
128 } while (iterations--);
129
130 pr_err("QAT: Read CSR timeout\n");
131 return -EFAULT;
132}
133
134static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
135 unsigned char ae, unsigned int csr,
136 unsigned int value)
137{
138 unsigned int iterations = CSR_RETRY_TIMES;
139
140 do {
141 SET_AE_CSR(handle, ae, csr, value);
142 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
143 return 0;
144 } while (iterations--);
145
146 pr_err("QAT: Write CSR Timeout\n");
147 return -EFAULT;
148}
149
150static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
151 unsigned char ae, unsigned char ctx,
152 unsigned int *events)
153{
154 unsigned int cur_ctx;
155
156 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
157 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
158 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
159 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
160}
161
162static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
163 unsigned char ae, unsigned int cycles,
164 int chk_inactive)
165{
166 unsigned int base_cnt = 0, cur_cnt = 0;
167 unsigned int csr = (1 << ACS_ABO_BITPOS);
168 int times = MAX_RETRY_TIMES;
169 int elapsed_cycles = 0;
170
171 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
172 base_cnt &= 0xffff;
173 while ((int)cycles > elapsed_cycles && times--) {
174 if (chk_inactive)
175 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
176
177 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
178 cur_cnt &= 0xffff;
179 elapsed_cycles = cur_cnt - base_cnt;
180
181 if (elapsed_cycles < 0)
182 elapsed_cycles += 0x10000;
183
184 /* ensure at least 8 time cycles elapsed in wait_cycles */
185 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
186 return 0;
187 }
188 if (!times) {
189 pr_err("QAT: wait_num_cycles time out\n");
190 return -EFAULT;
191 }
192 return 0;
193}
194
195#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
196#define SET_BIT(wrd, bit) (wrd | 1 << bit)
197
198int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
199 unsigned char ae, unsigned char mode)
200{
201 unsigned int csr, new_csr;
202
203 if ((mode != 4) && (mode != 8)) {
204 pr_err("QAT: bad ctx mode=%d\n", mode);
205 return -EINVAL;
206 }
207
208 /* Sets the accelaration engine context mode to either four or eight */
209 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
210 csr = IGNORE_W1C_MASK & csr;
211 new_csr = (mode == 4) ?
212 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
213 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
214 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
215 return 0;
216}
217
218int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
219 unsigned char ae, unsigned char mode)
220{
221 unsigned int csr, new_csr;
222
223 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
224 csr &= IGNORE_W1C_MASK;
225
226 new_csr = (mode) ?
227 SET_BIT(csr, CE_NN_MODE_BITPOS) :
228 CLR_BIT(csr, CE_NN_MODE_BITPOS);
229
230 if (new_csr != csr)
231 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
232
233 return 0;
234}
235
236int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
237 unsigned char ae, enum icp_qat_uof_regtype lm_type,
238 unsigned char mode)
239{
240 unsigned int csr, new_csr;
241
242 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
243 csr &= IGNORE_W1C_MASK;
244 switch (lm_type) {
245 case ICP_LMEM0:
246 new_csr = (mode) ?
247 SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
248 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
249 break;
250 case ICP_LMEM1:
251 new_csr = (mode) ?
252 SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
253 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
254 break;
255 default:
256 pr_err("QAT: lmType = 0x%x\n", lm_type);
257 return -EINVAL;
258 }
259
260 if (new_csr != csr)
261 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
262 return 0;
263}
264
265static unsigned short qat_hal_get_reg_addr(unsigned int type,
266 unsigned short reg_num)
267{
268 unsigned short reg_addr;
d65071ec 269
b3416fb8
TS
270 switch (type) {
271 case ICP_GPA_ABS:
272 case ICP_GPB_ABS:
273 reg_addr = 0x80 | (reg_num & 0x7f);
274 break;
275 case ICP_GPA_REL:
276 case ICP_GPB_REL:
277 reg_addr = reg_num & 0x1f;
278 break;
279 case ICP_SR_RD_REL:
280 case ICP_SR_WR_REL:
281 case ICP_SR_REL:
282 reg_addr = 0x180 | (reg_num & 0x1f);
283 break;
284 case ICP_SR_ABS:
285 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
286 break;
287 case ICP_DR_RD_REL:
288 case ICP_DR_WR_REL:
289 case ICP_DR_REL:
290 reg_addr = 0x1c0 | (reg_num & 0x1f);
291 break;
292 case ICP_DR_ABS:
293 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
294 break;
295 case ICP_NEIGH_REL:
296 reg_addr = 0x280 | (reg_num & 0x1f);
297 break;
298 case ICP_LMEM0:
299 reg_addr = 0x200;
300 break;
301 case ICP_LMEM1:
302 reg_addr = 0x220;
303 break;
304 case ICP_NO_DEST:
305 reg_addr = 0x300 | (reg_num & 0xff);
306 break;
307 default:
308 reg_addr = BAD_REGADDR;
309 break;
310 }
311 return reg_addr;
312}
313
314void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
315{
316 unsigned int ae_reset_csr;
317
318 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
319 ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
320 ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
321 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
322}
323
324static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
325 unsigned char ae, unsigned int ctx_mask,
326 unsigned int ae_csr, unsigned int csr_val)
327{
328 unsigned int ctx, cur_ctx;
329
330 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
331
332 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
333 if (!(ctx_mask & (1 << ctx)))
334 continue;
335 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
336 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
337 }
338
339 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
340}
341
342static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
343 unsigned char ae, unsigned char ctx,
344 unsigned int ae_csr, unsigned int *csr_val)
345{
346 unsigned int cur_ctx;
347
348 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
349 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
350 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
351 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
352}
353
354static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
355 unsigned char ae, unsigned int ctx_mask,
356 unsigned int events)
357{
358 unsigned int ctx, cur_ctx;
359
360 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
361 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
362 if (!(ctx_mask & (1 << ctx)))
363 continue;
364 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
365 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
366 }
367 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
368}
369
370static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
371 unsigned char ae, unsigned int ctx_mask,
372 unsigned int events)
373{
374 unsigned int ctx, cur_ctx;
375
376 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
377 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
378 if (!(ctx_mask & (1 << ctx)))
379 continue;
380 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
381 qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
382 events);
383 }
384 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
385}
386
387static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
388{
389 unsigned int base_cnt, cur_cnt;
390 unsigned char ae;
391 unsigned int times = MAX_RETRY_TIMES;
392
393 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
394 if (!(handle->hal_handle->ae_mask & (1 << ae)))
395 continue;
396
397 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
398 (unsigned int *)&base_cnt);
399 base_cnt &= 0xffff;
400
401 do {
402 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
403 (unsigned int *)&cur_cnt);
404 cur_cnt &= 0xffff;
405 } while (times-- && (cur_cnt == base_cnt));
406
407 if (!times) {
408 pr_err("QAT: AE%d is inactive!!\n", ae);
409 return -EFAULT;
410 }
411 }
412
413 return 0;
414}
415
416static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
417{
418 unsigned int misc_ctl;
419 unsigned char ae;
420
421 /* stop the timestamp timers */
422 misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
423 if (misc_ctl & MC_TIMESTAMP_ENABLE)
424 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
425 (~MC_TIMESTAMP_ENABLE));
426
9a147cb3 427 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
428 if (!(handle->hal_handle->ae_mask & (1 << ae)))
429 continue;
430 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
431 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
432 }
433 /* start timestamp timers */
434 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
435}
436
af6f2a7b
AB
437#define ESRAM_AUTO_TINIT BIT(2)
438#define ESRAM_AUTO_TINIT_DONE BIT(3)
b3416fb8
TS
439#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
440#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
441static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
442{
443 void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
444 ESRAM_AUTO_INIT_CSR_OFFSET;
445 unsigned int csr_val, times = 30;
446
447 csr_val = ADF_CSR_RD(csr_addr, 0);
448 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
449 return 0;
450
451 csr_val = ADF_CSR_RD(csr_addr, 0);
452 csr_val |= ESRAM_AUTO_TINIT;
453 ADF_CSR_WR(csr_addr, 0, csr_val);
454
455 do {
456 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
457 csr_val = ADF_CSR_RD(csr_addr, 0);
458 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
459 if ((!times)) {
460 pr_err("QAT: Fail to init eSram!\n");
461 return -EFAULT;
462 }
463 return 0;
464}
465
466#define SHRAM_INIT_CYCLES 2060
467int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
468{
469 unsigned int ae_reset_csr;
470 unsigned char ae;
471 unsigned int clk_csr;
472 unsigned int times = 100;
473 unsigned int csr;
474
475 /* write to the reset csr */
476 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
477 ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
478 ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
479 do {
480 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
481 if (!(times--))
482 goto out_err;
483 csr = GET_GLB_CSR(handle, ICP_RESET);
484 } while ((handle->hal_handle->ae_mask |
485 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
486 /* enable clock */
487 clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
488 clk_csr |= handle->hal_handle->ae_mask << 0;
489 clk_csr |= handle->hal_handle->slice_mask << 20;
490 SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
491 if (qat_hal_check_ae_alive(handle))
492 goto out_err;
493
494 /* Set undefined power-up/reset states to reasonable default values */
9a147cb3 495 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
496 if (!(handle->hal_handle->ae_mask & (1 << ae)))
497 continue;
498 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
499 INIT_CTX_ENABLE_VALUE);
500 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
501 CTX_STS_INDIRECT,
502 handle->hal_handle->upc_mask &
503 INIT_PC_VALUE);
504 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
505 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
506 qat_hal_put_wakeup_event(handle, ae,
507 ICP_QAT_UCLO_AE_ALL_CTX,
508 INIT_WAKEUP_EVENTS_VALUE);
509 qat_hal_put_sig_event(handle, ae,
510 ICP_QAT_UCLO_AE_ALL_CTX,
511 INIT_SIG_EVENTS_VALUE);
512 }
513 if (qat_hal_init_esram(handle))
514 goto out_err;
515 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
516 goto out_err;
517 qat_hal_reset_timestamp(handle);
518
519 return 0;
520out_err:
521 pr_err("QAT: failed to get device out of reset\n");
522 return -EFAULT;
523}
524
525static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
526 unsigned char ae, unsigned int ctx_mask)
527{
528 unsigned int ctx;
529
530 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
531 ctx &= IGNORE_W1C_MASK &
532 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
533 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
534}
535
536static uint64_t qat_hal_parity_64bit(uint64_t word)
537{
538 word ^= word >> 1;
539 word ^= word >> 2;
540 word ^= word >> 4;
541 word ^= word >> 8;
542 word ^= word >> 16;
543 word ^= word >> 32;
544 return word & 1;
545}
546
547static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
548{
549 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
550 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
551 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
552 bit6_mask = 0xdaf69a46910ULL;
553
554 /* clear the ecc bits */
555 uword &= ~(0x7fULL << 0x2C);
556 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
557 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
558 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
559 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
560 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
561 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
562 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
563 return uword;
564}
565
566void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
567 unsigned char ae, unsigned int uaddr,
568 unsigned int words_num, uint64_t *uword)
569{
570 unsigned int ustore_addr;
571 unsigned int i;
572
573 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
574 uaddr |= UA_ECS;
575 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
576 for (i = 0; i < words_num; i++) {
577 unsigned int uwrd_lo, uwrd_hi;
578 uint64_t tmp;
579
580 tmp = qat_hal_set_uword_ecc(uword[i]);
581 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
582 uwrd_hi = (unsigned int)(tmp >> 0x20);
583 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
584 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
585 }
586 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
587}
588
589static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
590 unsigned char ae, unsigned int ctx_mask)
591{
592 unsigned int ctx;
593
594 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
595 ctx &= IGNORE_W1C_MASK;
596 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
597 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
598 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
599}
600
601static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
602{
603 unsigned char ae;
604 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
605 int times = MAX_RETRY_TIMES;
606 unsigned int csr_val = 0;
607 unsigned short reg;
608 unsigned int savctx = 0;
609 int ret = 0;
610
9a147cb3 611 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
612 if (!(handle->hal_handle->ae_mask & (1 << ae)))
613 continue;
614 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
615 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
616 reg, 0);
617 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
618 reg, 0);
619 }
620 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
621 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
622 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
623 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
624 csr_val &= IGNORE_W1C_MASK;
625 csr_val |= CE_NN_MODE;
626 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
627 qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
628 (uint64_t *)inst);
629 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
630 handle->hal_handle->upc_mask &
631 INIT_PC_VALUE);
632 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
633 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
634 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
635 qat_hal_wr_indr_csr(handle, ae, ctx_mask,
636 CTX_SIG_EVENTS_INDIRECT, 0);
637 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
638 qat_hal_enable_ctx(handle, ae, ctx_mask);
639 }
9a147cb3 640 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
b3416fb8
TS
641 if (!(handle->hal_handle->ae_mask & (1 << ae)))
642 continue;
643 /* wait for AE to finish */
644 do {
645 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
646 } while (ret && times--);
647
648 if (!times) {
649 pr_err("QAT: clear GPR of AE %d failed", ae);
650 return -EINVAL;
651 }
652 qat_hal_disable_ctx(handle, ae, ctx_mask);
653 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
654 savctx & ACS_ACNO);
655 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
656 INIT_CTX_ENABLE_VALUE);
657 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
658 handle->hal_handle->upc_mask &
659 INIT_PC_VALUE);
660 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
661 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
662 qat_hal_put_wakeup_event(handle, ae, ctx_mask,
663 INIT_WAKEUP_EVENTS_VALUE);
664 qat_hal_put_sig_event(handle, ae, ctx_mask,
665 INIT_SIG_EVENTS_VALUE);
666 }
667 return 0;
668}
669
670#define ICP_DH895XCC_AE_OFFSET 0x20000
671#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
672#define LOCAL_TO_XFER_REG_OFFSET 0x800
673#define ICP_DH895XCC_EP_OFFSET 0x3a000
674#define ICP_DH895XCC_PMISC_BAR 1
675int qat_hal_init(struct adf_accel_dev *accel_dev)
676{
9a147cb3
TS
677 unsigned char ae;
678 unsigned int max_en_ae_id = 0;
679 struct icp_qat_fw_loader_handle *handle;
b3416fb8
TS
680 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
681 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
a727c4b6
TS
682 struct adf_bar *bar =
683 &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
b3416fb8
TS
684
685 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
686 if (!handle)
9a147cb3 687 return -ENOMEM;
b3416fb8
TS
688
689 handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
690 ICP_DH895XCC_CAP_OFFSET;
691 handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
692 ICP_DH895XCC_AE_OFFSET;
693 handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
694 handle->hal_cap_ae_local_csr_addr_v =
695 handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
696
697 handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
698 if (!handle->hal_handle)
699 goto out_hal_handle;
700 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
701 handle->hal_handle->ae_mask = hw_data->ae_mask;
702 handle->hal_handle->slice_mask = hw_data->accel_mask;
703 /* create AE objects */
704 handle->hal_handle->upc_mask = 0x1ffff;
705 handle->hal_handle->max_ustore = 0x4000;
706 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
707 if (!(hw_data->ae_mask & (1 << ae)))
708 continue;
709 handle->hal_handle->aes[ae].free_addr = 0;
710 handle->hal_handle->aes[ae].free_size =
711 handle->hal_handle->max_ustore;
712 handle->hal_handle->aes[ae].ustore_size =
713 handle->hal_handle->max_ustore;
714 handle->hal_handle->aes[ae].live_ctx_mask =
715 ICP_QAT_UCLO_AE_ALL_CTX;
9a147cb3 716 max_en_ae_id = ae;
b3416fb8 717 }
9a147cb3 718 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
b3416fb8
TS
719 /* take all AEs out of reset */
720 if (qat_hal_clr_reset(handle)) {
66550304 721 dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
b3416fb8
TS
722 goto out_err;
723 }
724 if (qat_hal_clear_gpr(handle))
725 goto out_err;
726 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
9a147cb3
TS
727 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
728 unsigned int csr_val = 0;
729
b3416fb8
TS
730 if (!(hw_data->ae_mask & (1 << ae)))
731 continue;
732 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
733 csr_val |= 0x1;
734 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
735 }
736 accel_dev->fw_loader->fw_loader = handle;
737 return 0;
738
739out_err:
740 kfree(handle->hal_handle);
741out_hal_handle:
742 kfree(handle);
b3416fb8
TS
743 return -EFAULT;
744}
745
746void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
747{
748 if (!handle)
749 return;
750 kfree(handle->hal_handle);
751 kfree(handle);
752}
753
754void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
755 unsigned int ctx_mask)
756{
757 qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
758 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
759 qat_hal_enable_ctx(handle, ae, ctx_mask);
760}
761
762void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
763 unsigned int ctx_mask)
764{
765 qat_hal_disable_ctx(handle, ae, ctx_mask);
766}
767
768void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
769 unsigned char ae, unsigned int ctx_mask, unsigned int upc)
770{
771 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
772 handle->hal_handle->upc_mask & upc);
773}
774
775static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
776 unsigned char ae, unsigned int uaddr,
777 unsigned int words_num, uint64_t *uword)
778{
779 unsigned int i, uwrd_lo, uwrd_hi;
780 unsigned int ustore_addr, misc_control;
781
782 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
783 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
784 misc_control & 0xfffffffb);
785 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
786 uaddr |= UA_ECS;
787 for (i = 0; i < words_num; i++) {
788 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
789 uaddr++;
790 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
791 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
792 uword[i] = uwrd_hi;
793 uword[i] = (uword[i] << 0x20) | uwrd_lo;
794 }
795 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
796 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
797}
798
b3416fb8
TS
799void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
800 unsigned char ae, unsigned int uaddr,
801 unsigned int words_num, unsigned int *data)
802{
803 unsigned int i, ustore_addr;
804
805 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
806 uaddr |= UA_ECS;
807 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
808 for (i = 0; i < words_num; i++) {
809 unsigned int uwrd_lo, uwrd_hi, tmp;
d65071ec 810
b3416fb8
TS
811 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
812 ((data[i] & 0xff00) << 2) |
813 (0x3 << 8) | (data[i] & 0xff);
814 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
d9a44abf 815 uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
b3416fb8 816 tmp = ((data[i] >> 0x10) & 0xffff);
d9a44abf 817 uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
b3416fb8
TS
818 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
819 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
820 }
821 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
822}
823
824#define MAX_EXEC_INST 100
825static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
826 unsigned char ae, unsigned char ctx,
827 uint64_t *micro_inst, unsigned int inst_num,
828 int code_off, unsigned int max_cycle,
829 unsigned int *endpc)
830{
831 uint64_t savuwords[MAX_EXEC_INST];
832 unsigned int ind_lm_addr0, ind_lm_addr1;
833 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
834 unsigned int ind_cnt_sig;
835 unsigned int ind_sig, act_sig;
836 unsigned int csr_val = 0, newcsr_val;
837 unsigned int savctx;
838 unsigned int savcc, wakeup_events, savpc;
839 unsigned int ctxarb_ctl, ctx_enables;
840
841 if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
68991721 842 pr_err("QAT: invalid instruction num %d\n", inst_num);
b3416fb8
TS
843 return -EINVAL;
844 }
845 /* save current context */
846 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
847 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
848 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
849 &ind_lm_addr_byte0);
850 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
851 &ind_lm_addr_byte1);
852 if (inst_num <= MAX_EXEC_INST)
853 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
854 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
855 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
856 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
857 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
858 ctx_enables &= IGNORE_W1C_MASK;
859 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
860 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
861 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
862 qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
863 &ind_cnt_sig);
864 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
865 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
866 /* execute micro codes */
867 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
868 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
869 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
870 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
871 if (code_off)
872 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
873 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
874 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
875 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
876 qat_hal_enable_ctx(handle, ae, (1 << ctx));
877 /* wait for micro codes to finish */
878 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
879 return -EFAULT;
880 if (endpc) {
881 unsigned int ctx_status;
d65071ec 882
b3416fb8
TS
883 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
884 &ctx_status);
885 *endpc = ctx_status & handle->hal_handle->upc_mask;
886 }
887 /* retore to saved context */
888 qat_hal_disable_ctx(handle, ae, (1 << ctx));
889 if (inst_num <= MAX_EXEC_INST)
890 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
891 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
892 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
893 handle->hal_handle->upc_mask & savpc);
894 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
895 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
896 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
897 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
898 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
899 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
900 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
901 LM_ADDR_0_INDIRECT, ind_lm_addr0);
902 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
903 LM_ADDR_1_INDIRECT, ind_lm_addr1);
904 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
905 INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
906 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
907 INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
908 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
909 FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
910 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
911 CTX_SIG_EVENTS_INDIRECT, ind_sig);
912 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
913 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
914
915 return 0;
916}
917
918static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
919 unsigned char ae, unsigned char ctx,
920 enum icp_qat_uof_regtype reg_type,
921 unsigned short reg_num, unsigned int *data)
922{
923 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
924 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
925 unsigned short reg_addr;
926 int status = 0;
927 uint64_t insts, savuword;
928
929 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
930 if (reg_addr == BAD_REGADDR) {
931 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
932 return -EINVAL;
933 }
934 switch (reg_type) {
935 case ICP_GPA_REL:
936 insts = 0xA070000000ull | (reg_addr & 0x3ff);
937 break;
938 default:
939 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
940 break;
941 }
942 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
943 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
944 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
945 ctx_enables &= IGNORE_W1C_MASK;
946 if (ctx != (savctx & ACS_ACNO))
947 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
948 ctx & ACS_ACNO);
949 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
950 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
951 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
952 uaddr = UA_ECS;
953 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
954 insts = qat_hal_set_uword_ecc(insts);
955 uwrd_lo = (unsigned int)(insts & 0xffffffff);
956 uwrd_hi = (unsigned int)(insts >> 0x20);
957 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
958 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
959 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
960 /* delay for at least 8 cycles */
961 qat_hal_wait_cycles(handle, ae, 0x8, 0);
962 /*
963 * read ALU output
964 * the instruction should have been executed
965 * prior to clearing the ECS in putUwords
966 */
967 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
968 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
969 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
970 if (ctx != (savctx & ACS_ACNO))
971 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
972 savctx & ACS_ACNO);
973 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
974 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
975
976 return status;
977}
978
979static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
980 unsigned char ae, unsigned char ctx,
981 enum icp_qat_uof_regtype reg_type,
982 unsigned short reg_num, unsigned int data)
983{
984 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
985 uint64_t insts[] = {
986 0x0F440000000ull,
987 0x0F040000000ull,
988 0x0F0000C0300ull,
989 0x0E000010000ull
990 };
991 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
992 const int imm_w1 = 0, imm_w0 = 1;
993
994 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
995 if (dest_addr == BAD_REGADDR) {
996 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
997 return -EINVAL;
998 }
999
1000 data16lo = 0xffff & data;
1001 data16hi = 0xffff & (data >> 0x10);
1002 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1003 (0xff & data16hi));
1004 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1005 (0xff & data16lo));
1006 switch (reg_type) {
1007 case ICP_GPA_REL:
1008 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1009 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1010 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1011 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1012 break;
1013 default:
1014 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1015 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1016
1017 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1018 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1019 break;
1020 }
1021
1022 return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1023 code_off, num_inst * 0x5, NULL);
1024}
1025
1026int qat_hal_get_ins_num(void)
1027{
1028 return ARRAY_SIZE(inst_4b);
1029}
1030
1031static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1032 unsigned int inst_num, unsigned int size,
1033 unsigned int addr, unsigned int *value)
1034{
1035 int i, val_indx;
1036 unsigned int cur_value;
1037 const uint64_t *inst_arr;
1038 int fixup_offset;
1039 int usize = 0;
1040 int orig_num;
1041
1042 orig_num = inst_num;
1043 val_indx = 0;
1044 cur_value = value[val_indx++];
1045 inst_arr = inst_4b;
1046 usize = ARRAY_SIZE(inst_4b);
1047 fixup_offset = inst_num;
1048 for (i = 0; i < usize; i++)
1049 micro_inst[inst_num++] = inst_arr[i];
1050 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1051 fixup_offset++;
1052 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1053 fixup_offset++;
1054 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1055 fixup_offset++;
1056 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1057
1058 return inst_num - orig_num;
1059}
1060
1061static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1062 unsigned char ae, unsigned char ctx,
1063 int *pfirst_exec, uint64_t *micro_inst,
1064 unsigned int inst_num)
1065{
1066 int stat = 0;
1067 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1068 unsigned int gprb0 = 0, gprb1 = 0;
1069
1070 if (*pfirst_exec) {
1071 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1072 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1073 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1074 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1075 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1076 *pfirst_exec = 0;
1077 }
1078 stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1079 inst_num * 0x5, NULL);
1080 if (stat != 0)
1081 return -EFAULT;
1082 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1083 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1084 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1085 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1086 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1087
1088 return 0;
1089}
1090
1091int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1092 unsigned char ae,
1093 struct icp_qat_uof_batch_init *lm_init_header)
1094{
1095 struct icp_qat_uof_batch_init *plm_init;
1096 uint64_t *micro_inst_arry;
1097 int micro_inst_num;
1098 int alloc_inst_size;
1099 int first_exec = 1;
1100 int stat = 0;
1101
1102 plm_init = lm_init_header->next;
1103 alloc_inst_size = lm_init_header->size;
1104 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1105 alloc_inst_size = handle->hal_handle->max_ustore;
d65071ec
TS
1106 micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
1107 GFP_KERNEL);
b3416fb8
TS
1108 if (!micro_inst_arry)
1109 return -ENOMEM;
1110 micro_inst_num = 0;
1111 while (plm_init) {
1112 unsigned int addr, *value, size;
1113
1114 ae = plm_init->ae;
1115 addr = plm_init->addr;
1116 value = plm_init->value;
1117 size = plm_init->size;
1118 micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1119 micro_inst_num,
1120 size, addr, value);
1121 plm_init = plm_init->next;
1122 }
1123 /* exec micro codes */
1124 if (micro_inst_arry && (micro_inst_num > 0)) {
1125 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1126 stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1127 micro_inst_arry,
1128 micro_inst_num);
1129 }
1130 kfree(micro_inst_arry);
1131 return stat;
1132}
1133
1134static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1135 unsigned char ae, unsigned char ctx,
1136 enum icp_qat_uof_regtype reg_type,
1137 unsigned short reg_num, unsigned int val)
1138{
1139 int status = 0;
1140 unsigned int reg_addr;
1141 unsigned int ctx_enables;
1142 unsigned short mask;
1143 unsigned short dr_offset = 0x10;
1144
1145 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1146 if (CE_INUSE_CONTEXTS & ctx_enables) {
1147 if (ctx & 0x1) {
1148 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1149 return -EINVAL;
1150 }
1151 mask = 0x1f;
1152 dr_offset = 0x20;
1153 } else {
1154 mask = 0x0f;
1155 }
1156 if (reg_num & ~mask)
1157 return -EINVAL;
1158 reg_addr = reg_num + (ctx << 0x5);
1159 switch (reg_type) {
1160 case ICP_SR_RD_REL:
1161 case ICP_SR_REL:
1162 SET_AE_XFER(handle, ae, reg_addr, val);
1163 break;
1164 case ICP_DR_RD_REL:
1165 case ICP_DR_REL:
1166 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1167 break;
1168 default:
1169 status = -EINVAL;
1170 break;
1171 }
1172 return status;
1173}
1174
1175static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1176 unsigned char ae, unsigned char ctx,
1177 enum icp_qat_uof_regtype reg_type,
1178 unsigned short reg_num, unsigned int data)
1179{
1180 unsigned int gprval, ctx_enables;
1181 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1182 data16low;
1183 unsigned short reg_mask;
1184 int status = 0;
1185 uint64_t micro_inst[] = {
1186 0x0F440000000ull,
1187 0x0F040000000ull,
1188 0x0A000000000ull,
1189 0x0F0000C0300ull,
1190 0x0E000010000ull
1191 };
1192 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1193 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1194
1195 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1196 if (CE_INUSE_CONTEXTS & ctx_enables) {
1197 if (ctx & 0x1) {
1198 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1199 return -EINVAL;
1200 }
1201 reg_mask = (unsigned short)~0x1f;
1202 } else {
1203 reg_mask = (unsigned short)~0xf;
1204 }
1205 if (reg_num & reg_mask)
1206 return -EINVAL;
1207 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1208 if (xfr_addr == BAD_REGADDR) {
1209 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1210 return -EINVAL;
1211 }
1212 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1213 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1214 data16low = 0xffff & data;
1215 data16hi = 0xffff & (data >> 0x10);
1216 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1217 (unsigned short)(0xff & data16hi));
1218 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1219 (unsigned short)(0xff & data16low));
1220 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1221 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1222 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1223 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1224 micro_inst[0x2] = micro_inst[0x2] |
1225 ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1226 status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1227 code_off, dly, NULL);
1228 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1229 return status;
1230}
1231
1232static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1233 unsigned char ae, unsigned char ctx,
1234 unsigned short nn, unsigned int val)
1235{
1236 unsigned int ctx_enables;
1237 int stat = 0;
1238
1239 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1240 ctx_enables &= IGNORE_W1C_MASK;
1241 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1242
1243 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1244 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1245 return stat;
1246}
1247
1248static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1249 *handle, unsigned char ae,
1250 unsigned short absreg_num,
1251 unsigned short *relreg,
1252 unsigned char *ctx)
1253{
1254 unsigned int ctx_enables;
1255
1256 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1257 if (ctx_enables & CE_INUSE_CONTEXTS) {
1258 /* 4-ctx mode */
1259 *relreg = absreg_num & 0x1F;
1260 *ctx = (absreg_num >> 0x4) & 0x6;
1261 } else {
1262 /* 8-ctx mode */
1263 *relreg = absreg_num & 0x0F;
1264 *ctx = (absreg_num >> 0x4) & 0x7;
1265 }
1266 return 0;
1267}
1268
1269int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1270 unsigned char ae, unsigned char ctx_mask,
1271 enum icp_qat_uof_regtype reg_type,
1272 unsigned short reg_num, unsigned int regdata)
1273{
1274 int stat = 0;
1275 unsigned short reg;
1276 unsigned char ctx = 0;
1277 enum icp_qat_uof_regtype type;
1278
1279 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1280 return -EINVAL;
1281
1282 do {
1283 if (ctx_mask == 0) {
1284 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1285 &ctx);
1286 type = reg_type - 1;
1287 } else {
1288 reg = reg_num;
1289 type = reg_type;
1290 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1291 continue;
1292 }
1293 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1294 if (stat) {
1295 pr_err("QAT: write gpr fail\n");
1296 return -EINVAL;
1297 }
1298 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1299
1300 return 0;
1301}
1302
1303int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1304 unsigned char ae, unsigned char ctx_mask,
1305 enum icp_qat_uof_regtype reg_type,
1306 unsigned short reg_num, unsigned int regdata)
1307{
1308 int stat = 0;
1309 unsigned short reg;
1310 unsigned char ctx = 0;
1311 enum icp_qat_uof_regtype type;
1312
1313 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1314 return -EINVAL;
1315
1316 do {
1317 if (ctx_mask == 0) {
1318 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1319 &ctx);
1320 type = reg_type - 3;
1321 } else {
1322 reg = reg_num;
1323 type = reg_type;
1324 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1325 continue;
1326 }
1327 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1328 regdata);
1329 if (stat) {
1330 pr_err("QAT: write wr xfer fail\n");
1331 return -EINVAL;
1332 }
1333 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1334
1335 return 0;
1336}
1337
1338int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1339 unsigned char ae, unsigned char ctx_mask,
1340 enum icp_qat_uof_regtype reg_type,
1341 unsigned short reg_num, unsigned int regdata)
1342{
1343 int stat = 0;
1344 unsigned short reg;
1345 unsigned char ctx = 0;
1346 enum icp_qat_uof_regtype type;
1347
1348 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1349 return -EINVAL;
1350
1351 do {
1352 if (ctx_mask == 0) {
1353 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1354 &ctx);
1355 type = reg_type - 3;
1356 } else {
1357 reg = reg_num;
1358 type = reg_type;
1359 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1360 continue;
1361 }
1362 stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1363 regdata);
1364 if (stat) {
1365 pr_err("QAT: write rd xfer fail\n");
1366 return -EINVAL;
1367 }
1368 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1369
1370 return 0;
1371}
1372
1373int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1374 unsigned char ae, unsigned char ctx_mask,
1375 unsigned short reg_num, unsigned int regdata)
1376{
1377 int stat = 0;
1378 unsigned char ctx;
1379
1380 if (ctx_mask == 0)
1381 return -EINVAL;
1382
1383 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1384 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1385 continue;
1386 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1387 if (stat) {
1388 pr_err("QAT: write neigh error\n");
1389 return -EINVAL;
1390 }
1391 }
1392
1393 return 0;
1394}
This page took 0.118336 seconds and 5 git commands to generate.