crypto: qat - Updated Firmware Info Metadata
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_hal.c
CommitLineData
b3416fb8
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/slab.h>
48
49#include "adf_accel_devices.h"
50#include "adf_common_drv.h"
51#include "icp_qat_hal.h"
52#include "icp_qat_uclo.h"
53
54#define BAD_REGADDR 0xffff
55#define MAX_RETRY_TIMES 10000
56#define INIT_CTX_ARB_VALUE 0x0
57#define INIT_CTX_ENABLE_VALUE 0x0
58#define INIT_PC_VALUE 0x0
59#define INIT_WAKEUP_EVENTS_VALUE 0x1
60#define INIT_SIG_EVENTS_VALUE 0x1
61#define INIT_CCENABLE_VALUE 0x2000
62#define RST_CSR_QAT_LSB 20
63#define RST_CSR_AE_LSB 0
64#define MC_TIMESTAMP_ENABLE (0x1 << 7)
65
66#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
67 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
68 (~(1 << CE_REG_PAR_ERR_BITPOS)))
69#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
70 (inst = ((inst & 0xFFFF00C03FFull) | \
71 ((((const_val) << 12) & 0x0FF00000ull) | \
72 (((const_val) << 10) & 0x0003FC00ull))))
73#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
74 (inst = ((inst & 0xFFFF00FFF00ull) | \
75 ((((const_val) << 12) & 0x0FF00000ull) | \
76 (((const_val) << 0) & 0x000000FFull))))
77
78#define AE(handle, ae) handle->hal_handle->aes[ae]
79
80static const uint64_t inst_4b[] = {
81 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
82 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
83 0x0A021000000ull
84};
85
86static const uint64_t inst[] = {
87 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
88 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
89 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
90 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
91 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
92 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
93 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
94 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
95 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
96 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
97 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
98 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
99 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
100 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
101 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
102 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
103 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
104 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
105 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
106 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
107 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
108 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
109};
110
111void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
112 unsigned char ae, unsigned int ctx_mask)
113{
114 AE(handle, ae).live_ctx_mask = ctx_mask;
115}
116
117#define CSR_RETRY_TIMES 500
118static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
119 unsigned char ae, unsigned int csr,
120 unsigned int *value)
121{
122 unsigned int iterations = CSR_RETRY_TIMES;
123
124 do {
125 *value = GET_AE_CSR(handle, ae, csr);
126 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
127 return 0;
128 } while (iterations--);
129
130 pr_err("QAT: Read CSR timeout\n");
131 return -EFAULT;
132}
133
134static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
135 unsigned char ae, unsigned int csr,
136 unsigned int value)
137{
138 unsigned int iterations = CSR_RETRY_TIMES;
139
140 do {
141 SET_AE_CSR(handle, ae, csr, value);
142 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
143 return 0;
144 } while (iterations--);
145
146 pr_err("QAT: Write CSR Timeout\n");
147 return -EFAULT;
148}
149
150static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
151 unsigned char ae, unsigned char ctx,
152 unsigned int *events)
153{
154 unsigned int cur_ctx;
155
156 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
157 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
158 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
159 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
160}
161
162static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
163 unsigned char ae, unsigned int cycles,
164 int chk_inactive)
165{
166 unsigned int base_cnt = 0, cur_cnt = 0;
167 unsigned int csr = (1 << ACS_ABO_BITPOS);
168 int times = MAX_RETRY_TIMES;
169 int elapsed_cycles = 0;
170
171 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
172 base_cnt &= 0xffff;
173 while ((int)cycles > elapsed_cycles && times--) {
174 if (chk_inactive)
175 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
176
177 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
178 cur_cnt &= 0xffff;
179 elapsed_cycles = cur_cnt - base_cnt;
180
181 if (elapsed_cycles < 0)
182 elapsed_cycles += 0x10000;
183
184 /* ensure at least 8 time cycles elapsed in wait_cycles */
185 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
186 return 0;
187 }
188 if (!times) {
189 pr_err("QAT: wait_num_cycles time out\n");
190 return -EFAULT;
191 }
192 return 0;
193}
194
195#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
196#define SET_BIT(wrd, bit) (wrd | 1 << bit)
197
198int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
199 unsigned char ae, unsigned char mode)
200{
201 unsigned int csr, new_csr;
202
203 if ((mode != 4) && (mode != 8)) {
204 pr_err("QAT: bad ctx mode=%d\n", mode);
205 return -EINVAL;
206 }
207
208 /* Sets the accelaration engine context mode to either four or eight */
209 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
210 csr = IGNORE_W1C_MASK & csr;
211 new_csr = (mode == 4) ?
212 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
213 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
214 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
215 return 0;
216}
217
218int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
219 unsigned char ae, unsigned char mode)
220{
221 unsigned int csr, new_csr;
222
223 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
224 csr &= IGNORE_W1C_MASK;
225
226 new_csr = (mode) ?
227 SET_BIT(csr, CE_NN_MODE_BITPOS) :
228 CLR_BIT(csr, CE_NN_MODE_BITPOS);
229
230 if (new_csr != csr)
231 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
232
233 return 0;
234}
235
236int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
237 unsigned char ae, enum icp_qat_uof_regtype lm_type,
238 unsigned char mode)
239{
240 unsigned int csr, new_csr;
241
242 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
243 csr &= IGNORE_W1C_MASK;
244 switch (lm_type) {
245 case ICP_LMEM0:
246 new_csr = (mode) ?
247 SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
248 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
249 break;
250 case ICP_LMEM1:
251 new_csr = (mode) ?
252 SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
253 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
254 break;
255 default:
256 pr_err("QAT: lmType = 0x%x\n", lm_type);
257 return -EINVAL;
258 }
259
260 if (new_csr != csr)
261 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
262 return 0;
263}
264
265static unsigned short qat_hal_get_reg_addr(unsigned int type,
266 unsigned short reg_num)
267{
268 unsigned short reg_addr;
269 switch (type) {
270 case ICP_GPA_ABS:
271 case ICP_GPB_ABS:
272 reg_addr = 0x80 | (reg_num & 0x7f);
273 break;
274 case ICP_GPA_REL:
275 case ICP_GPB_REL:
276 reg_addr = reg_num & 0x1f;
277 break;
278 case ICP_SR_RD_REL:
279 case ICP_SR_WR_REL:
280 case ICP_SR_REL:
281 reg_addr = 0x180 | (reg_num & 0x1f);
282 break;
283 case ICP_SR_ABS:
284 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
285 break;
286 case ICP_DR_RD_REL:
287 case ICP_DR_WR_REL:
288 case ICP_DR_REL:
289 reg_addr = 0x1c0 | (reg_num & 0x1f);
290 break;
291 case ICP_DR_ABS:
292 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
293 break;
294 case ICP_NEIGH_REL:
295 reg_addr = 0x280 | (reg_num & 0x1f);
296 break;
297 case ICP_LMEM0:
298 reg_addr = 0x200;
299 break;
300 case ICP_LMEM1:
301 reg_addr = 0x220;
302 break;
303 case ICP_NO_DEST:
304 reg_addr = 0x300 | (reg_num & 0xff);
305 break;
306 default:
307 reg_addr = BAD_REGADDR;
308 break;
309 }
310 return reg_addr;
311}
312
313void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
314{
315 unsigned int ae_reset_csr;
316
317 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
318 ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
319 ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
320 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
321}
322
323static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
324 unsigned char ae, unsigned int ctx_mask,
325 unsigned int ae_csr, unsigned int csr_val)
326{
327 unsigned int ctx, cur_ctx;
328
329 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
330
331 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
332 if (!(ctx_mask & (1 << ctx)))
333 continue;
334 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
335 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
336 }
337
338 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
339}
340
341static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
342 unsigned char ae, unsigned char ctx,
343 unsigned int ae_csr, unsigned int *csr_val)
344{
345 unsigned int cur_ctx;
346
347 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
348 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
349 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
350 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
351}
352
353static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
354 unsigned char ae, unsigned int ctx_mask,
355 unsigned int events)
356{
357 unsigned int ctx, cur_ctx;
358
359 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
360 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
361 if (!(ctx_mask & (1 << ctx)))
362 continue;
363 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
364 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
365 }
366 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
367}
368
369static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
370 unsigned char ae, unsigned int ctx_mask,
371 unsigned int events)
372{
373 unsigned int ctx, cur_ctx;
374
375 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
376 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
377 if (!(ctx_mask & (1 << ctx)))
378 continue;
379 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
380 qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
381 events);
382 }
383 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
384}
385
386static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
387{
388 unsigned int base_cnt, cur_cnt;
389 unsigned char ae;
390 unsigned int times = MAX_RETRY_TIMES;
391
392 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
393 if (!(handle->hal_handle->ae_mask & (1 << ae)))
394 continue;
395
396 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
397 (unsigned int *)&base_cnt);
398 base_cnt &= 0xffff;
399
400 do {
401 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
402 (unsigned int *)&cur_cnt);
403 cur_cnt &= 0xffff;
404 } while (times-- && (cur_cnt == base_cnt));
405
406 if (!times) {
407 pr_err("QAT: AE%d is inactive!!\n", ae);
408 return -EFAULT;
409 }
410 }
411
412 return 0;
413}
414
415static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
416{
417 unsigned int misc_ctl;
418 unsigned char ae;
419
420 /* stop the timestamp timers */
421 misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
422 if (misc_ctl & MC_TIMESTAMP_ENABLE)
423 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
424 (~MC_TIMESTAMP_ENABLE));
425
426 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
427 if (!(handle->hal_handle->ae_mask & (1 << ae)))
428 continue;
429 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
430 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
431 }
432 /* start timestamp timers */
433 SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
434}
435
436#define ESRAM_AUTO_TINIT (1<<2)
437#define ESRAM_AUTO_TINIT_DONE (1<<3)
438#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
439#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
440static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
441{
442 void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
443 ESRAM_AUTO_INIT_CSR_OFFSET;
444 unsigned int csr_val, times = 30;
445
446 csr_val = ADF_CSR_RD(csr_addr, 0);
447 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
448 return 0;
449
450 csr_val = ADF_CSR_RD(csr_addr, 0);
451 csr_val |= ESRAM_AUTO_TINIT;
452 ADF_CSR_WR(csr_addr, 0, csr_val);
453
454 do {
455 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
456 csr_val = ADF_CSR_RD(csr_addr, 0);
457 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
458 if ((!times)) {
459 pr_err("QAT: Fail to init eSram!\n");
460 return -EFAULT;
461 }
462 return 0;
463}
464
465#define SHRAM_INIT_CYCLES 2060
466int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
467{
468 unsigned int ae_reset_csr;
469 unsigned char ae;
470 unsigned int clk_csr;
471 unsigned int times = 100;
472 unsigned int csr;
473
474 /* write to the reset csr */
475 ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
476 ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
477 ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
478 do {
479 SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
480 if (!(times--))
481 goto out_err;
482 csr = GET_GLB_CSR(handle, ICP_RESET);
483 } while ((handle->hal_handle->ae_mask |
484 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
485 /* enable clock */
486 clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
487 clk_csr |= handle->hal_handle->ae_mask << 0;
488 clk_csr |= handle->hal_handle->slice_mask << 20;
489 SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
490 if (qat_hal_check_ae_alive(handle))
491 goto out_err;
492
493 /* Set undefined power-up/reset states to reasonable default values */
494 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
495 if (!(handle->hal_handle->ae_mask & (1 << ae)))
496 continue;
497 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
498 INIT_CTX_ENABLE_VALUE);
499 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
500 CTX_STS_INDIRECT,
501 handle->hal_handle->upc_mask &
502 INIT_PC_VALUE);
503 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
504 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
505 qat_hal_put_wakeup_event(handle, ae,
506 ICP_QAT_UCLO_AE_ALL_CTX,
507 INIT_WAKEUP_EVENTS_VALUE);
508 qat_hal_put_sig_event(handle, ae,
509 ICP_QAT_UCLO_AE_ALL_CTX,
510 INIT_SIG_EVENTS_VALUE);
511 }
512 if (qat_hal_init_esram(handle))
513 goto out_err;
514 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
515 goto out_err;
516 qat_hal_reset_timestamp(handle);
517
518 return 0;
519out_err:
520 pr_err("QAT: failed to get device out of reset\n");
521 return -EFAULT;
522}
523
524static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
525 unsigned char ae, unsigned int ctx_mask)
526{
527 unsigned int ctx;
528
529 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
530 ctx &= IGNORE_W1C_MASK &
531 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
532 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
533}
534
535static uint64_t qat_hal_parity_64bit(uint64_t word)
536{
537 word ^= word >> 1;
538 word ^= word >> 2;
539 word ^= word >> 4;
540 word ^= word >> 8;
541 word ^= word >> 16;
542 word ^= word >> 32;
543 return word & 1;
544}
545
546static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
547{
548 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
549 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
550 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
551 bit6_mask = 0xdaf69a46910ULL;
552
553 /* clear the ecc bits */
554 uword &= ~(0x7fULL << 0x2C);
555 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
556 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
557 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
558 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
559 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
560 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
561 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
562 return uword;
563}
564
565void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
566 unsigned char ae, unsigned int uaddr,
567 unsigned int words_num, uint64_t *uword)
568{
569 unsigned int ustore_addr;
570 unsigned int i;
571
572 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
573 uaddr |= UA_ECS;
574 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
575 for (i = 0; i < words_num; i++) {
576 unsigned int uwrd_lo, uwrd_hi;
577 uint64_t tmp;
578
579 tmp = qat_hal_set_uword_ecc(uword[i]);
580 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
581 uwrd_hi = (unsigned int)(tmp >> 0x20);
582 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
583 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
584 }
585 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
586}
587
588static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
589 unsigned char ae, unsigned int ctx_mask)
590{
591 unsigned int ctx;
592
593 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
594 ctx &= IGNORE_W1C_MASK;
595 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
596 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
597 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
598}
599
600static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
601{
602 unsigned char ae;
603 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
604 int times = MAX_RETRY_TIMES;
605 unsigned int csr_val = 0;
606 unsigned short reg;
607 unsigned int savctx = 0;
608 int ret = 0;
609
610 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
611 if (!(handle->hal_handle->ae_mask & (1 << ae)))
612 continue;
613 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
614 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
615 reg, 0);
616 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
617 reg, 0);
618 }
619 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
620 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
621 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
622 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
623 csr_val &= IGNORE_W1C_MASK;
624 csr_val |= CE_NN_MODE;
625 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
626 qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
627 (uint64_t *)inst);
628 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
629 handle->hal_handle->upc_mask &
630 INIT_PC_VALUE);
631 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
632 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
633 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
634 qat_hal_wr_indr_csr(handle, ae, ctx_mask,
635 CTX_SIG_EVENTS_INDIRECT, 0);
636 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
637 qat_hal_enable_ctx(handle, ae, ctx_mask);
638 }
639 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
640 if (!(handle->hal_handle->ae_mask & (1 << ae)))
641 continue;
642 /* wait for AE to finish */
643 do {
644 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
645 } while (ret && times--);
646
647 if (!times) {
648 pr_err("QAT: clear GPR of AE %d failed", ae);
649 return -EINVAL;
650 }
651 qat_hal_disable_ctx(handle, ae, ctx_mask);
652 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
653 savctx & ACS_ACNO);
654 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
655 INIT_CTX_ENABLE_VALUE);
656 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
657 handle->hal_handle->upc_mask &
658 INIT_PC_VALUE);
659 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
660 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
661 qat_hal_put_wakeup_event(handle, ae, ctx_mask,
662 INIT_WAKEUP_EVENTS_VALUE);
663 qat_hal_put_sig_event(handle, ae, ctx_mask,
664 INIT_SIG_EVENTS_VALUE);
665 }
666 return 0;
667}
668
669#define ICP_DH895XCC_AE_OFFSET 0x20000
670#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000)
671#define LOCAL_TO_XFER_REG_OFFSET 0x800
672#define ICP_DH895XCC_EP_OFFSET 0x3a000
673#define ICP_DH895XCC_PMISC_BAR 1
674int qat_hal_init(struct adf_accel_dev *accel_dev)
675{
676 unsigned char ae = 0;
677 unsigned int csr_val = 0;
678 unsigned int max_en_ae_num = 0;
679 struct icp_qat_fw_loader_handle *handle = NULL;
680 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
681 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
682 struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
683
684 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
685 if (!handle)
686 goto out_handle;
687
688 handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
689 ICP_DH895XCC_CAP_OFFSET;
690 handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
691 ICP_DH895XCC_AE_OFFSET;
692 handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
693 handle->hal_cap_ae_local_csr_addr_v =
694 handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
695
696 handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
697 if (!handle->hal_handle)
698 goto out_hal_handle;
699 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
700 handle->hal_handle->ae_mask = hw_data->ae_mask;
701 handle->hal_handle->slice_mask = hw_data->accel_mask;
702 /* create AE objects */
703 handle->hal_handle->upc_mask = 0x1ffff;
704 handle->hal_handle->max_ustore = 0x4000;
705 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
706 if (!(hw_data->ae_mask & (1 << ae)))
707 continue;
708 handle->hal_handle->aes[ae].free_addr = 0;
709 handle->hal_handle->aes[ae].free_size =
710 handle->hal_handle->max_ustore;
711 handle->hal_handle->aes[ae].ustore_size =
712 handle->hal_handle->max_ustore;
713 handle->hal_handle->aes[ae].live_ctx_mask =
714 ICP_QAT_UCLO_AE_ALL_CTX;
715 max_en_ae_num = ae;
716 }
717 handle->hal_handle->ae_max_num = max_en_ae_num;
718 /* take all AEs out of reset */
719 if (qat_hal_clr_reset(handle)) {
720 pr_err("QAT: qat_hal_clr_reset error\n");
721 goto out_err;
722 }
723 if (qat_hal_clear_gpr(handle))
724 goto out_err;
725 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
726 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
727 if (!(hw_data->ae_mask & (1 << ae)))
728 continue;
729 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
730 csr_val |= 0x1;
731 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
732 }
733 accel_dev->fw_loader->fw_loader = handle;
734 return 0;
735
736out_err:
737 kfree(handle->hal_handle);
738out_hal_handle:
739 kfree(handle);
740out_handle:
741 return -EFAULT;
742}
743
744void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
745{
746 if (!handle)
747 return;
748 kfree(handle->hal_handle);
749 kfree(handle);
750}
751
752void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
753 unsigned int ctx_mask)
754{
755 qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
756 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
757 qat_hal_enable_ctx(handle, ae, ctx_mask);
758}
759
760void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
761 unsigned int ctx_mask)
762{
763 qat_hal_disable_ctx(handle, ae, ctx_mask);
764}
765
766void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
767 unsigned char ae, unsigned int ctx_mask, unsigned int upc)
768{
769 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
770 handle->hal_handle->upc_mask & upc);
771}
772
773static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
774 unsigned char ae, unsigned int uaddr,
775 unsigned int words_num, uint64_t *uword)
776{
777 unsigned int i, uwrd_lo, uwrd_hi;
778 unsigned int ustore_addr, misc_control;
779
780 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
781 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
782 misc_control & 0xfffffffb);
783 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
784 uaddr |= UA_ECS;
785 for (i = 0; i < words_num; i++) {
786 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
787 uaddr++;
788 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
789 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
790 uword[i] = uwrd_hi;
791 uword[i] = (uword[i] << 0x20) | uwrd_lo;
792 }
793 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
794 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
795}
796
797static int qat_hal_count_bits(unsigned int word)
798{
799 int n = 0;
800
801 while (word) {
802 n++;
803 word &= word - 1;
804 }
805 return n;
806}
807
808void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
809 unsigned char ae, unsigned int uaddr,
810 unsigned int words_num, unsigned int *data)
811{
812 unsigned int i, ustore_addr;
813
814 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
815 uaddr |= UA_ECS;
816 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
817 for (i = 0; i < words_num; i++) {
818 unsigned int uwrd_lo, uwrd_hi, tmp;
819 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
820 ((data[i] & 0xff00) << 2) |
821 (0x3 << 8) | (data[i] & 0xff);
822 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
823 uwrd_hi |= (qat_hal_count_bits(data[i] & 0xffff) & 0x1) << 8;
824 tmp = ((data[i] >> 0x10) & 0xffff);
825 uwrd_hi |= (qat_hal_count_bits(tmp) & 0x1) << 9;
826 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
827 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
828 }
829 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
830}
831
832#define MAX_EXEC_INST 100
833static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
834 unsigned char ae, unsigned char ctx,
835 uint64_t *micro_inst, unsigned int inst_num,
836 int code_off, unsigned int max_cycle,
837 unsigned int *endpc)
838{
839 uint64_t savuwords[MAX_EXEC_INST];
840 unsigned int ind_lm_addr0, ind_lm_addr1;
841 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
842 unsigned int ind_cnt_sig;
843 unsigned int ind_sig, act_sig;
844 unsigned int csr_val = 0, newcsr_val;
845 unsigned int savctx;
846 unsigned int savcc, wakeup_events, savpc;
847 unsigned int ctxarb_ctl, ctx_enables;
848
849 if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
850 pr_err("QAT: invalid instructs inst_num=%d, micro_inst=0x%p\n ",
851 inst_num, (unsigned int *)micro_inst);
852 return -EINVAL;
853 }
854 /* save current context */
855 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
856 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
857 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
858 &ind_lm_addr_byte0);
859 qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
860 &ind_lm_addr_byte1);
861 if (inst_num <= MAX_EXEC_INST)
862 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
863 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
864 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
865 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
866 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
867 ctx_enables &= IGNORE_W1C_MASK;
868 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
869 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
870 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
871 qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
872 &ind_cnt_sig);
873 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
874 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
875 /* execute micro codes */
876 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
877 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
878 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
879 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
880 if (code_off)
881 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
882 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
883 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
884 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
885 qat_hal_enable_ctx(handle, ae, (1 << ctx));
886 /* wait for micro codes to finish */
887 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
888 return -EFAULT;
889 if (endpc) {
890 unsigned int ctx_status;
891 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
892 &ctx_status);
893 *endpc = ctx_status & handle->hal_handle->upc_mask;
894 }
895 /* retore to saved context */
896 qat_hal_disable_ctx(handle, ae, (1 << ctx));
897 if (inst_num <= MAX_EXEC_INST)
898 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
899 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
900 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
901 handle->hal_handle->upc_mask & savpc);
902 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
903 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
904 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
905 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
906 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
907 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
908 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
909 LM_ADDR_0_INDIRECT, ind_lm_addr0);
910 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
911 LM_ADDR_1_INDIRECT, ind_lm_addr1);
912 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
913 INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
914 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
915 INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
916 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
917 FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
918 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
919 CTX_SIG_EVENTS_INDIRECT, ind_sig);
920 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
921 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
922
923 return 0;
924}
925
926static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
927 unsigned char ae, unsigned char ctx,
928 enum icp_qat_uof_regtype reg_type,
929 unsigned short reg_num, unsigned int *data)
930{
931 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
932 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
933 unsigned short reg_addr;
934 int status = 0;
935 uint64_t insts, savuword;
936
937 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
938 if (reg_addr == BAD_REGADDR) {
939 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
940 return -EINVAL;
941 }
942 switch (reg_type) {
943 case ICP_GPA_REL:
944 insts = 0xA070000000ull | (reg_addr & 0x3ff);
945 break;
946 default:
947 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
948 break;
949 }
950 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
951 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
952 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
953 ctx_enables &= IGNORE_W1C_MASK;
954 if (ctx != (savctx & ACS_ACNO))
955 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
956 ctx & ACS_ACNO);
957 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
958 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
959 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
960 uaddr = UA_ECS;
961 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
962 insts = qat_hal_set_uword_ecc(insts);
963 uwrd_lo = (unsigned int)(insts & 0xffffffff);
964 uwrd_hi = (unsigned int)(insts >> 0x20);
965 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
966 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
967 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
968 /* delay for at least 8 cycles */
969 qat_hal_wait_cycles(handle, ae, 0x8, 0);
970 /*
971 * read ALU output
972 * the instruction should have been executed
973 * prior to clearing the ECS in putUwords
974 */
975 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
976 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
977 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
978 if (ctx != (savctx & ACS_ACNO))
979 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
980 savctx & ACS_ACNO);
981 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
982 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
983
984 return status;
985}
986
987static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
988 unsigned char ae, unsigned char ctx,
989 enum icp_qat_uof_regtype reg_type,
990 unsigned short reg_num, unsigned int data)
991{
992 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
993 uint64_t insts[] = {
994 0x0F440000000ull,
995 0x0F040000000ull,
996 0x0F0000C0300ull,
997 0x0E000010000ull
998 };
999 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1000 const int imm_w1 = 0, imm_w0 = 1;
1001
1002 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1003 if (dest_addr == BAD_REGADDR) {
1004 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1005 return -EINVAL;
1006 }
1007
1008 data16lo = 0xffff & data;
1009 data16hi = 0xffff & (data >> 0x10);
1010 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1011 (0xff & data16hi));
1012 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1013 (0xff & data16lo));
1014 switch (reg_type) {
1015 case ICP_GPA_REL:
1016 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1017 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1018 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1019 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1020 break;
1021 default:
1022 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1023 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1024
1025 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1026 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1027 break;
1028 }
1029
1030 return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1031 code_off, num_inst * 0x5, NULL);
1032}
1033
1034int qat_hal_get_ins_num(void)
1035{
1036 return ARRAY_SIZE(inst_4b);
1037}
1038
1039static int qat_hal_concat_micro_code(uint64_t *micro_inst,
1040 unsigned int inst_num, unsigned int size,
1041 unsigned int addr, unsigned int *value)
1042{
1043 int i, val_indx;
1044 unsigned int cur_value;
1045 const uint64_t *inst_arr;
1046 int fixup_offset;
1047 int usize = 0;
1048 int orig_num;
1049
1050 orig_num = inst_num;
1051 val_indx = 0;
1052 cur_value = value[val_indx++];
1053 inst_arr = inst_4b;
1054 usize = ARRAY_SIZE(inst_4b);
1055 fixup_offset = inst_num;
1056 for (i = 0; i < usize; i++)
1057 micro_inst[inst_num++] = inst_arr[i];
1058 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1059 fixup_offset++;
1060 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1061 fixup_offset++;
1062 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1063 fixup_offset++;
1064 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1065
1066 return inst_num - orig_num;
1067}
1068
1069static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1070 unsigned char ae, unsigned char ctx,
1071 int *pfirst_exec, uint64_t *micro_inst,
1072 unsigned int inst_num)
1073{
1074 int stat = 0;
1075 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1076 unsigned int gprb0 = 0, gprb1 = 0;
1077
1078 if (*pfirst_exec) {
1079 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1080 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1081 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1082 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1083 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1084 *pfirst_exec = 0;
1085 }
1086 stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1087 inst_num * 0x5, NULL);
1088 if (stat != 0)
1089 return -EFAULT;
1090 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1091 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1092 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1093 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1094 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1095
1096 return 0;
1097}
1098
1099int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1100 unsigned char ae,
1101 struct icp_qat_uof_batch_init *lm_init_header)
1102{
1103 struct icp_qat_uof_batch_init *plm_init;
1104 uint64_t *micro_inst_arry;
1105 int micro_inst_num;
1106 int alloc_inst_size;
1107 int first_exec = 1;
1108 int stat = 0;
1109
1110 plm_init = lm_init_header->next;
1111 alloc_inst_size = lm_init_header->size;
1112 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1113 alloc_inst_size = handle->hal_handle->max_ustore;
1114 micro_inst_arry = kmalloc(alloc_inst_size * sizeof(uint64_t),
1115 GFP_KERNEL);
1116 if (!micro_inst_arry)
1117 return -ENOMEM;
1118 micro_inst_num = 0;
1119 while (plm_init) {
1120 unsigned int addr, *value, size;
1121
1122 ae = plm_init->ae;
1123 addr = plm_init->addr;
1124 value = plm_init->value;
1125 size = plm_init->size;
1126 micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1127 micro_inst_num,
1128 size, addr, value);
1129 plm_init = plm_init->next;
1130 }
1131 /* exec micro codes */
1132 if (micro_inst_arry && (micro_inst_num > 0)) {
1133 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1134 stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1135 micro_inst_arry,
1136 micro_inst_num);
1137 }
1138 kfree(micro_inst_arry);
1139 return stat;
1140}
1141
1142static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1143 unsigned char ae, unsigned char ctx,
1144 enum icp_qat_uof_regtype reg_type,
1145 unsigned short reg_num, unsigned int val)
1146{
1147 int status = 0;
1148 unsigned int reg_addr;
1149 unsigned int ctx_enables;
1150 unsigned short mask;
1151 unsigned short dr_offset = 0x10;
1152
1153 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1154 if (CE_INUSE_CONTEXTS & ctx_enables) {
1155 if (ctx & 0x1) {
1156 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1157 return -EINVAL;
1158 }
1159 mask = 0x1f;
1160 dr_offset = 0x20;
1161 } else {
1162 mask = 0x0f;
1163 }
1164 if (reg_num & ~mask)
1165 return -EINVAL;
1166 reg_addr = reg_num + (ctx << 0x5);
1167 switch (reg_type) {
1168 case ICP_SR_RD_REL:
1169 case ICP_SR_REL:
1170 SET_AE_XFER(handle, ae, reg_addr, val);
1171 break;
1172 case ICP_DR_RD_REL:
1173 case ICP_DR_REL:
1174 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1175 break;
1176 default:
1177 status = -EINVAL;
1178 break;
1179 }
1180 return status;
1181}
1182
1183static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1184 unsigned char ae, unsigned char ctx,
1185 enum icp_qat_uof_regtype reg_type,
1186 unsigned short reg_num, unsigned int data)
1187{
1188 unsigned int gprval, ctx_enables;
1189 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1190 data16low;
1191 unsigned short reg_mask;
1192 int status = 0;
1193 uint64_t micro_inst[] = {
1194 0x0F440000000ull,
1195 0x0F040000000ull,
1196 0x0A000000000ull,
1197 0x0F0000C0300ull,
1198 0x0E000010000ull
1199 };
1200 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1201 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1202
1203 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1204 if (CE_INUSE_CONTEXTS & ctx_enables) {
1205 if (ctx & 0x1) {
1206 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1207 return -EINVAL;
1208 }
1209 reg_mask = (unsigned short)~0x1f;
1210 } else {
1211 reg_mask = (unsigned short)~0xf;
1212 }
1213 if (reg_num & reg_mask)
1214 return -EINVAL;
1215 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1216 if (xfr_addr == BAD_REGADDR) {
1217 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1218 return -EINVAL;
1219 }
1220 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1221 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1222 data16low = 0xffff & data;
1223 data16hi = 0xffff & (data >> 0x10);
1224 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1225 (unsigned short)(0xff & data16hi));
1226 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1227 (unsigned short)(0xff & data16low));
1228 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1229 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1230 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1231 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1232 micro_inst[0x2] = micro_inst[0x2] |
1233 ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1234 status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1235 code_off, dly, NULL);
1236 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1237 return status;
1238}
1239
1240static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1241 unsigned char ae, unsigned char ctx,
1242 unsigned short nn, unsigned int val)
1243{
1244 unsigned int ctx_enables;
1245 int stat = 0;
1246
1247 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1248 ctx_enables &= IGNORE_W1C_MASK;
1249 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1250
1251 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1252 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1253 return stat;
1254}
1255
1256static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1257 *handle, unsigned char ae,
1258 unsigned short absreg_num,
1259 unsigned short *relreg,
1260 unsigned char *ctx)
1261{
1262 unsigned int ctx_enables;
1263
1264 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1265 if (ctx_enables & CE_INUSE_CONTEXTS) {
1266 /* 4-ctx mode */
1267 *relreg = absreg_num & 0x1F;
1268 *ctx = (absreg_num >> 0x4) & 0x6;
1269 } else {
1270 /* 8-ctx mode */
1271 *relreg = absreg_num & 0x0F;
1272 *ctx = (absreg_num >> 0x4) & 0x7;
1273 }
1274 return 0;
1275}
1276
1277int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1278 unsigned char ae, unsigned char ctx_mask,
1279 enum icp_qat_uof_regtype reg_type,
1280 unsigned short reg_num, unsigned int regdata)
1281{
1282 int stat = 0;
1283 unsigned short reg;
1284 unsigned char ctx = 0;
1285 enum icp_qat_uof_regtype type;
1286
1287 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1288 return -EINVAL;
1289
1290 do {
1291 if (ctx_mask == 0) {
1292 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1293 &ctx);
1294 type = reg_type - 1;
1295 } else {
1296 reg = reg_num;
1297 type = reg_type;
1298 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1299 continue;
1300 }
1301 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1302 if (stat) {
1303 pr_err("QAT: write gpr fail\n");
1304 return -EINVAL;
1305 }
1306 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1307
1308 return 0;
1309}
1310
1311int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1312 unsigned char ae, unsigned char ctx_mask,
1313 enum icp_qat_uof_regtype reg_type,
1314 unsigned short reg_num, unsigned int regdata)
1315{
1316 int stat = 0;
1317 unsigned short reg;
1318 unsigned char ctx = 0;
1319 enum icp_qat_uof_regtype type;
1320
1321 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1322 return -EINVAL;
1323
1324 do {
1325 if (ctx_mask == 0) {
1326 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1327 &ctx);
1328 type = reg_type - 3;
1329 } else {
1330 reg = reg_num;
1331 type = reg_type;
1332 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1333 continue;
1334 }
1335 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1336 regdata);
1337 if (stat) {
1338 pr_err("QAT: write wr xfer fail\n");
1339 return -EINVAL;
1340 }
1341 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1342
1343 return 0;
1344}
1345
1346int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1347 unsigned char ae, unsigned char ctx_mask,
1348 enum icp_qat_uof_regtype reg_type,
1349 unsigned short reg_num, unsigned int regdata)
1350{
1351 int stat = 0;
1352 unsigned short reg;
1353 unsigned char ctx = 0;
1354 enum icp_qat_uof_regtype type;
1355
1356 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1357 return -EINVAL;
1358
1359 do {
1360 if (ctx_mask == 0) {
1361 qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
1362 &ctx);
1363 type = reg_type - 3;
1364 } else {
1365 reg = reg_num;
1366 type = reg_type;
1367 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1368 continue;
1369 }
1370 stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1371 regdata);
1372 if (stat) {
1373 pr_err("QAT: write rd xfer fail\n");
1374 return -EINVAL;
1375 }
1376 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1377
1378 return 0;
1379}
1380
1381int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1382 unsigned char ae, unsigned char ctx_mask,
1383 unsigned short reg_num, unsigned int regdata)
1384{
1385 int stat = 0;
1386 unsigned char ctx;
1387
1388 if (ctx_mask == 0)
1389 return -EINVAL;
1390
1391 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1392 if (!test_bit(ctx, (unsigned long *)&ctx_mask))
1393 continue;
1394 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1395 if (stat) {
1396 pr_err("QAT: write neigh error\n");
1397 return -EINVAL;
1398 }
1399 }
1400
1401 return 0;
1402}
This page took 0.158809 seconds and 5 git commands to generate.