intel_pmic_gpio: Make explicitly non-modular
[deliverable/linux.git] / drivers / crypto / nx / nx-sha256.c
1 /**
2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/module.h>
25 #include <asm/vio.h>
26 #include <asm/byteorder.h>
27
28 #include "nx_csbcpb.h"
29 #include "nx.h"
30
31
32 static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
33 {
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
35 int err;
36
37 err = nx_crypto_ctx_sha_init(tfm);
38 if (err)
39 return err;
40
41 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
42
43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
44
45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
46
47 return 0;
48 }
49
50 static int nx_sha256_init(struct shash_desc *desc) {
51 struct sha256_state *sctx = shash_desc_ctx(desc);
52
53 memset(sctx, 0, sizeof *sctx);
54
55 sctx->state[0] = __cpu_to_be32(SHA256_H0);
56 sctx->state[1] = __cpu_to_be32(SHA256_H1);
57 sctx->state[2] = __cpu_to_be32(SHA256_H2);
58 sctx->state[3] = __cpu_to_be32(SHA256_H3);
59 sctx->state[4] = __cpu_to_be32(SHA256_H4);
60 sctx->state[5] = __cpu_to_be32(SHA256_H5);
61 sctx->state[6] = __cpu_to_be32(SHA256_H6);
62 sctx->state[7] = __cpu_to_be32(SHA256_H7);
63 sctx->count = 0;
64
65 return 0;
66 }
67
68 static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
69 unsigned int len)
70 {
71 struct sha256_state *sctx = shash_desc_ctx(desc);
72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
74 struct nx_sg *out_sg;
75 u64 to_process = 0, leftover, total;
76 unsigned long irq_flags;
77 int rc = 0;
78 int data_len;
79 u32 max_sg_len;
80 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
81
82 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
83
84 /* 2 cases for total data len:
85 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
86 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
87 */
88 total = (sctx->count % SHA256_BLOCK_SIZE) + len;
89 if (total < SHA256_BLOCK_SIZE) {
90 memcpy(sctx->buf + buf_len, data, len);
91 sctx->count += len;
92 goto out;
93 }
94
95 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
98
99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
101 max_sg_len = min_t(u64, max_sg_len,
102 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
103
104 data_len = SHA256_DIGEST_SIZE;
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
106 &data_len, max_sg_len);
107 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
108
109 if (data_len != SHA256_DIGEST_SIZE) {
110 rc = -EINVAL;
111 goto out;
112 }
113
114 do {
115 int used_sgs = 0;
116 struct nx_sg *in_sg = nx_ctx->in_sg;
117
118 if (buf_len) {
119 data_len = buf_len;
120 in_sg = nx_build_sg_list(in_sg,
121 (u8 *) sctx->buf,
122 &data_len,
123 max_sg_len);
124
125 if (data_len != buf_len) {
126 rc = -EINVAL;
127 goto out;
128 }
129 used_sgs = in_sg - nx_ctx->in_sg;
130 }
131
132 /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
133 * processed in this iteration. This value is restricted
134 * by sg list limits and number of sgs we already used
135 * for leftover data. (see above)
136 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
137 * but because data may not be aligned, we need to account
138 * for that too. */
139 to_process = min_t(u64, total,
140 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
141 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
142
143 data_len = to_process - buf_len;
144 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
145 &data_len, max_sg_len);
146
147 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
148
149 to_process = data_len + buf_len;
150 leftover = total - to_process;
151
152 /*
153 * we've hit the nx chip previously and we're updating
154 * again, so copy over the partial digest.
155 */
156 memcpy(csbcpb->cpb.sha256.input_partial_digest,
157 csbcpb->cpb.sha256.message_digest,
158 SHA256_DIGEST_SIZE);
159
160 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
161 rc = -EINVAL;
162 goto out;
163 }
164
165 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
166 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
167 if (rc)
168 goto out;
169
170 atomic_inc(&(nx_ctx->stats->sha256_ops));
171
172 total -= to_process;
173 data += to_process - buf_len;
174 buf_len = 0;
175
176 } while (leftover >= SHA256_BLOCK_SIZE);
177
178 /* copy the leftover back into the state struct */
179 if (leftover)
180 memcpy(sctx->buf, data, leftover);
181
182 sctx->count += len;
183 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
184 out:
185 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
186 return rc;
187 }
188
189 static int nx_sha256_final(struct shash_desc *desc, u8 *out)
190 {
191 struct sha256_state *sctx = shash_desc_ctx(desc);
192 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
193 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
194 struct nx_sg *in_sg, *out_sg;
195 unsigned long irq_flags;
196 u32 max_sg_len;
197 int rc = 0;
198 int len;
199
200 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
201
202 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
203 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
204 max_sg_len = min_t(u64, max_sg_len,
205 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
206
207 /* final is represented by continuing the operation and indicating that
208 * this is not an intermediate operation */
209 if (sctx->count >= SHA256_BLOCK_SIZE) {
210 /* we've hit the nx chip previously, now we're finalizing,
211 * so copy over the partial digest */
212 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
213 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
214 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
215 } else {
216 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
217 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
218 }
219
220 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
221
222 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
223 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
224 &len, max_sg_len);
225
226 if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
227 rc = -EINVAL;
228 goto out;
229 }
230
231 len = SHA256_DIGEST_SIZE;
232 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
233
234 if (len != SHA256_DIGEST_SIZE) {
235 rc = -EINVAL;
236 goto out;
237 }
238
239 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
240 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
241 if (!nx_ctx->op.outlen) {
242 rc = -EINVAL;
243 goto out;
244 }
245
246 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
247 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
248 if (rc)
249 goto out;
250
251 atomic_inc(&(nx_ctx->stats->sha256_ops));
252
253 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
254 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
255 out:
256 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
257 return rc;
258 }
259
260 static int nx_sha256_export(struct shash_desc *desc, void *out)
261 {
262 struct sha256_state *sctx = shash_desc_ctx(desc);
263
264 memcpy(out, sctx, sizeof(*sctx));
265
266 return 0;
267 }
268
269 static int nx_sha256_import(struct shash_desc *desc, const void *in)
270 {
271 struct sha256_state *sctx = shash_desc_ctx(desc);
272
273 memcpy(sctx, in, sizeof(*sctx));
274
275 return 0;
276 }
277
278 struct shash_alg nx_shash_sha256_alg = {
279 .digestsize = SHA256_DIGEST_SIZE,
280 .init = nx_sha256_init,
281 .update = nx_sha256_update,
282 .final = nx_sha256_final,
283 .export = nx_sha256_export,
284 .import = nx_sha256_import,
285 .descsize = sizeof(struct sha256_state),
286 .statesize = sizeof(struct sha256_state),
287 .base = {
288 .cra_name = "sha256",
289 .cra_driver_name = "sha256-nx",
290 .cra_priority = 300,
291 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
292 .cra_blocksize = SHA256_BLOCK_SIZE,
293 .cra_module = THIS_MODULE,
294 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
295 .cra_init = nx_crypto_ctx_sha256_init,
296 .cra_exit = nx_crypto_ctx_exit,
297 }
298 };
This page took 0.049072 seconds and 5 git commands to generate.