crypto: qat - move isr files to qat common so that they can be reused
[deliverable/linux.git] / drivers / crypto / qat / qat_dh895xcc / adf_dh895xcc_hw_data.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <adf_accel_devices.h>
48 #include <adf_pf2vf_msg.h>
49 #include <adf_common_drv.h>
50 #include "adf_dh895xcc_hw_data.h"
51
52 /* Worker thread to service arbiter mappings based on dev SKUs */
53 static const uint32_t thrd_to_arb_map_sku4[] = {
54 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
55 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
56 0x00000000, 0x00000000, 0x00000000, 0x00000000
57 };
58
59 static const uint32_t thrd_to_arb_map_sku6[] = {
60 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
61 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
62 0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
63 };
64
65 static struct adf_hw_device_class dh895xcc_class = {
66 .name = ADF_DH895XCC_DEVICE_NAME,
67 .type = DEV_DH895XCC,
68 .instances = 0
69 };
70
71 static uint32_t get_accel_mask(uint32_t fuse)
72 {
73 return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
74 ADF_DH895XCC_ACCELERATORS_MASK;
75 }
76
77 static uint32_t get_ae_mask(uint32_t fuse)
78 {
79 return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
80 }
81
82 static uint32_t get_num_accels(struct adf_hw_device_data *self)
83 {
84 uint32_t i, ctr = 0;
85
86 if (!self || !self->accel_mask)
87 return 0;
88
89 for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
90 if (self->accel_mask & (1 << i))
91 ctr++;
92 }
93 return ctr;
94 }
95
96 static uint32_t get_num_aes(struct adf_hw_device_data *self)
97 {
98 uint32_t i, ctr = 0;
99
100 if (!self || !self->ae_mask)
101 return 0;
102
103 for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
104 if (self->ae_mask & (1 << i))
105 ctr++;
106 }
107 return ctr;
108 }
109
110 static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
111 {
112 return ADF_DH895XCC_PMISC_BAR;
113 }
114
115 static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
116 {
117 return ADF_DH895XCC_ETR_BAR;
118 }
119
120 static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
121 {
122 return ADF_DH895XCC_SRAM_BAR;
123 }
124
125 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
126 {
127 int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
128 >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
129
130 switch (sku) {
131 case ADF_DH895XCC_FUSECTL_SKU_1:
132 return DEV_SKU_1;
133 case ADF_DH895XCC_FUSECTL_SKU_2:
134 return DEV_SKU_2;
135 case ADF_DH895XCC_FUSECTL_SKU_3:
136 return DEV_SKU_3;
137 case ADF_DH895XCC_FUSECTL_SKU_4:
138 return DEV_SKU_4;
139 default:
140 return DEV_SKU_UNKNOWN;
141 }
142 return DEV_SKU_UNKNOWN;
143 }
144
145 static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
146 u32 const **arb_map_config)
147 {
148 switch (accel_dev->accel_pci_dev.sku) {
149 case DEV_SKU_1:
150 *arb_map_config = thrd_to_arb_map_sku4;
151 break;
152
153 case DEV_SKU_2:
154 case DEV_SKU_4:
155 *arb_map_config = thrd_to_arb_map_sku6;
156 break;
157 default:
158 dev_err(&GET_DEV(accel_dev),
159 "The configuration doesn't match any SKU");
160 *arb_map_config = NULL;
161 }
162 }
163
164 static uint32_t get_pf2vf_offset(uint32_t i)
165 {
166 return ADF_DH895XCC_PF2VF_OFFSET(i);
167 }
168
169 static uint32_t get_vintmsk_offset(uint32_t i)
170 {
171 return ADF_DH895XCC_VINTMSK_OFFSET(i);
172 }
173
174 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
175 {
176 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
177 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
178 void __iomem *csr = misc_bar->virt_addr;
179 unsigned int val, i;
180
181 /* Enable Accel Engine error detection & correction */
182 for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
183 val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
184 val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
185 ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
186 val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
187 val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
188 ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
189 }
190
191 /* Enable shared memory error detection & correction */
192 for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
193 val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
194 val |= ADF_DH895XCC_ERRSSMSH_EN;
195 ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
196 val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
197 val |= ADF_DH895XCC_ERRSSMSH_EN;
198 ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
199 }
200 }
201
202 static void adf_enable_ints(struct adf_accel_dev *accel_dev)
203 {
204 void __iomem *addr;
205
206 addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
207
208 /* Enable bundle and misc interrupts */
209 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
210 accel_dev->pf.vf_info ? 0 :
211 GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
212 ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
213 ADF_DH895XCC_SMIA1_MASK);
214 }
215
216 static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
217 {
218 return 0;
219 }
220
221 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
222 {
223 hw_data->dev_class = &dh895xcc_class;
224 hw_data->instance_id = dh895xcc_class.instances++;
225 hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
226 hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
227 hw_data->num_logical_accel = 1;
228 hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
229 hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
230 hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
231 hw_data->alloc_irq = adf_isr_resource_alloc;
232 hw_data->free_irq = adf_isr_resource_free;
233 hw_data->enable_error_correction = adf_enable_error_correction;
234 hw_data->get_accel_mask = get_accel_mask;
235 hw_data->get_ae_mask = get_ae_mask;
236 hw_data->get_num_accels = get_num_accels;
237 hw_data->get_num_aes = get_num_aes;
238 hw_data->get_etr_bar_id = get_etr_bar_id;
239 hw_data->get_misc_bar_id = get_misc_bar_id;
240 hw_data->get_pf2vf_offset = get_pf2vf_offset;
241 hw_data->get_vintmsk_offset = get_vintmsk_offset;
242 hw_data->get_sram_bar_id = get_sram_bar_id;
243 hw_data->get_sku = get_sku;
244 hw_data->fw_name = ADF_DH895XCC_FW;
245 hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
246 hw_data->init_admin_comms = adf_init_admin_comms;
247 hw_data->exit_admin_comms = adf_exit_admin_comms;
248 hw_data->disable_iov = adf_disable_sriov;
249 hw_data->send_admin_init = adf_send_admin_init;
250 hw_data->init_arb = adf_init_arb;
251 hw_data->exit_arb = adf_exit_arb;
252 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
253 hw_data->enable_ints = adf_enable_ints;
254 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
255 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
256 }
257
258 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
259 {
260 hw_data->dev_class->instances--;
261 }
This page took 0.123207 seconds and 5 git commands to generate.