Commit | Line | Data |
---|---|---|
63b94509 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/kthread.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/delay.h> | |
21 | #include <linux/hw_random.h> | |
22 | #include <linux/cpu.h> | |
c4f4b325 | 23 | #ifdef CONFIG_X86 |
63b94509 | 24 | #include <asm/cpu_device_id.h> |
c4f4b325 | 25 | #endif |
63b94509 TL |
26 | #include <linux/ccp.h> |
27 | ||
28 | #include "ccp-dev.h" | |
29 | ||
30 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | |
31 | MODULE_LICENSE("GPL"); | |
32 | MODULE_VERSION("1.0.0"); | |
33 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); | |
34 | ||
530abd89 TL |
35 | struct ccp_tasklet_data { |
36 | struct completion completion; | |
37 | struct ccp_cmd *cmd; | |
38 | }; | |
39 | ||
63b94509 TL |
40 | static struct ccp_device *ccp_dev; |
41 | static inline struct ccp_device *ccp_get_device(void) | |
42 | { | |
43 | return ccp_dev; | |
44 | } | |
45 | ||
46 | static inline void ccp_add_device(struct ccp_device *ccp) | |
47 | { | |
48 | ccp_dev = ccp; | |
49 | } | |
50 | ||
51 | static inline void ccp_del_device(struct ccp_device *ccp) | |
52 | { | |
53 | ccp_dev = NULL; | |
54 | } | |
55 | ||
c9f21cb6 TL |
56 | /** |
57 | * ccp_present - check if a CCP device is present | |
58 | * | |
59 | * Returns zero if a CCP device is present, -ENODEV otherwise. | |
60 | */ | |
61 | int ccp_present(void) | |
62 | { | |
63 | if (ccp_get_device()) | |
64 | return 0; | |
65 | ||
66 | return -ENODEV; | |
67 | } | |
68 | EXPORT_SYMBOL_GPL(ccp_present); | |
69 | ||
63b94509 TL |
70 | /** |
71 | * ccp_enqueue_cmd - queue an operation for processing by the CCP | |
72 | * | |
73 | * @cmd: ccp_cmd struct to be processed | |
74 | * | |
75 | * Queue a cmd to be processed by the CCP. If queueing the cmd | |
76 | * would exceed the defined length of the cmd queue the cmd will | |
77 | * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will | |
78 | * result in a return code of -EBUSY. | |
79 | * | |
80 | * The callback routine specified in the ccp_cmd struct will be | |
81 | * called to notify the caller of completion (if the cmd was not | |
82 | * backlogged) or advancement out of the backlog. If the cmd has | |
83 | * advanced out of the backlog the "err" value of the callback | |
84 | * will be -EINPROGRESS. Any other "err" value during callback is | |
85 | * the result of the operation. | |
86 | * | |
87 | * The cmd has been successfully queued if: | |
88 | * the return code is -EINPROGRESS or | |
89 | * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set | |
90 | */ | |
91 | int ccp_enqueue_cmd(struct ccp_cmd *cmd) | |
92 | { | |
93 | struct ccp_device *ccp = ccp_get_device(); | |
94 | unsigned long flags; | |
95 | unsigned int i; | |
96 | int ret; | |
97 | ||
98 | if (!ccp) | |
99 | return -ENODEV; | |
100 | ||
101 | /* Caller must supply a callback routine */ | |
102 | if (!cmd->callback) | |
103 | return -EINVAL; | |
104 | ||
105 | cmd->ccp = ccp; | |
106 | ||
107 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
108 | ||
109 | i = ccp->cmd_q_count; | |
110 | ||
111 | if (ccp->cmd_count >= MAX_CMD_QLEN) { | |
112 | ret = -EBUSY; | |
113 | if (cmd->flags & CCP_CMD_MAY_BACKLOG) | |
114 | list_add_tail(&cmd->entry, &ccp->backlog); | |
115 | } else { | |
116 | ret = -EINPROGRESS; | |
117 | ccp->cmd_count++; | |
118 | list_add_tail(&cmd->entry, &ccp->cmd); | |
119 | ||
120 | /* Find an idle queue */ | |
121 | if (!ccp->suspending) { | |
122 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
123 | if (ccp->cmd_q[i].active) | |
124 | continue; | |
125 | ||
126 | break; | |
127 | } | |
128 | } | |
129 | } | |
130 | ||
131 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
132 | ||
133 | /* If we found an idle queue, wake it up */ | |
134 | if (i < ccp->cmd_q_count) | |
135 | wake_up_process(ccp->cmd_q[i].kthread); | |
136 | ||
137 | return ret; | |
138 | } | |
139 | EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); | |
140 | ||
141 | static void ccp_do_cmd_backlog(struct work_struct *work) | |
142 | { | |
143 | struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); | |
144 | struct ccp_device *ccp = cmd->ccp; | |
145 | unsigned long flags; | |
146 | unsigned int i; | |
147 | ||
148 | cmd->callback(cmd->data, -EINPROGRESS); | |
149 | ||
150 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
151 | ||
152 | ccp->cmd_count++; | |
153 | list_add_tail(&cmd->entry, &ccp->cmd); | |
154 | ||
155 | /* Find an idle queue */ | |
156 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
157 | if (ccp->cmd_q[i].active) | |
158 | continue; | |
159 | ||
160 | break; | |
161 | } | |
162 | ||
163 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
164 | ||
165 | /* If we found an idle queue, wake it up */ | |
166 | if (i < ccp->cmd_q_count) | |
167 | wake_up_process(ccp->cmd_q[i].kthread); | |
168 | } | |
169 | ||
170 | static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) | |
171 | { | |
172 | struct ccp_device *ccp = cmd_q->ccp; | |
173 | struct ccp_cmd *cmd = NULL; | |
174 | struct ccp_cmd *backlog = NULL; | |
175 | unsigned long flags; | |
176 | ||
177 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
178 | ||
179 | cmd_q->active = 0; | |
180 | ||
181 | if (ccp->suspending) { | |
182 | cmd_q->suspended = 1; | |
183 | ||
184 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
185 | wake_up_interruptible(&ccp->suspend_queue); | |
186 | ||
187 | return NULL; | |
188 | } | |
189 | ||
190 | if (ccp->cmd_count) { | |
191 | cmd_q->active = 1; | |
192 | ||
193 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); | |
194 | list_del(&cmd->entry); | |
195 | ||
196 | ccp->cmd_count--; | |
197 | } | |
198 | ||
199 | if (!list_empty(&ccp->backlog)) { | |
200 | backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, | |
201 | entry); | |
202 | list_del(&backlog->entry); | |
203 | } | |
204 | ||
205 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
206 | ||
207 | if (backlog) { | |
208 | INIT_WORK(&backlog->work, ccp_do_cmd_backlog); | |
209 | schedule_work(&backlog->work); | |
210 | } | |
211 | ||
212 | return cmd; | |
213 | } | |
214 | ||
530abd89 | 215 | static void ccp_do_cmd_complete(unsigned long data) |
63b94509 | 216 | { |
530abd89 TL |
217 | struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; |
218 | struct ccp_cmd *cmd = tdata->cmd; | |
63b94509 TL |
219 | |
220 | cmd->callback(cmd->data, cmd->ret); | |
530abd89 | 221 | complete(&tdata->completion); |
63b94509 TL |
222 | } |
223 | ||
224 | static int ccp_cmd_queue_thread(void *data) | |
225 | { | |
226 | struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; | |
227 | struct ccp_cmd *cmd; | |
530abd89 TL |
228 | struct ccp_tasklet_data tdata; |
229 | struct tasklet_struct tasklet; | |
230 | ||
231 | tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); | |
63b94509 TL |
232 | |
233 | set_current_state(TASK_INTERRUPTIBLE); | |
234 | while (!kthread_should_stop()) { | |
235 | schedule(); | |
236 | ||
237 | set_current_state(TASK_INTERRUPTIBLE); | |
238 | ||
239 | cmd = ccp_dequeue_cmd(cmd_q); | |
240 | if (!cmd) | |
241 | continue; | |
242 | ||
243 | __set_current_state(TASK_RUNNING); | |
244 | ||
245 | /* Execute the command */ | |
246 | cmd->ret = ccp_run_cmd(cmd_q, cmd); | |
247 | ||
248 | /* Schedule the completion callback */ | |
530abd89 TL |
249 | tdata.cmd = cmd; |
250 | init_completion(&tdata.completion); | |
251 | tasklet_schedule(&tasklet); | |
252 | wait_for_completion(&tdata.completion); | |
63b94509 TL |
253 | } |
254 | ||
255 | __set_current_state(TASK_RUNNING); | |
256 | ||
257 | return 0; | |
258 | } | |
259 | ||
260 | static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) | |
261 | { | |
262 | struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); | |
263 | u32 trng_value; | |
264 | int len = min_t(int, sizeof(trng_value), max); | |
265 | ||
266 | /* | |
267 | * Locking is provided by the caller so we can update device | |
268 | * hwrng-related fields safely | |
269 | */ | |
270 | trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); | |
271 | if (!trng_value) { | |
272 | /* Zero is returned if not data is available or if a | |
273 | * bad-entropy error is present. Assume an error if | |
274 | * we exceed TRNG_RETRIES reads of zero. | |
275 | */ | |
276 | if (ccp->hwrng_retries++ > TRNG_RETRIES) | |
277 | return -EIO; | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
282 | /* Reset the counter and save the rng value */ | |
283 | ccp->hwrng_retries = 0; | |
284 | memcpy(data, &trng_value, len); | |
285 | ||
286 | return len; | |
287 | } | |
288 | ||
289 | /** | |
290 | * ccp_alloc_struct - allocate and initialize the ccp_device struct | |
291 | * | |
292 | * @dev: device struct of the CCP | |
293 | */ | |
294 | struct ccp_device *ccp_alloc_struct(struct device *dev) | |
295 | { | |
296 | struct ccp_device *ccp; | |
297 | ||
298 | ccp = kzalloc(sizeof(*ccp), GFP_KERNEL); | |
8db88467 | 299 | if (!ccp) |
63b94509 | 300 | return NULL; |
63b94509 TL |
301 | ccp->dev = dev; |
302 | ||
303 | INIT_LIST_HEAD(&ccp->cmd); | |
304 | INIT_LIST_HEAD(&ccp->backlog); | |
305 | ||
306 | spin_lock_init(&ccp->cmd_lock); | |
307 | mutex_init(&ccp->req_mutex); | |
308 | mutex_init(&ccp->ksb_mutex); | |
309 | ccp->ksb_count = KSB_COUNT; | |
310 | ccp->ksb_start = 0; | |
311 | ||
312 | return ccp; | |
313 | } | |
314 | ||
315 | /** | |
316 | * ccp_init - initialize the CCP device | |
317 | * | |
318 | * @ccp: ccp_device struct | |
319 | */ | |
320 | int ccp_init(struct ccp_device *ccp) | |
321 | { | |
322 | struct device *dev = ccp->dev; | |
323 | struct ccp_cmd_queue *cmd_q; | |
324 | struct dma_pool *dma_pool; | |
325 | char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; | |
326 | unsigned int qmr, qim, i; | |
327 | int ret; | |
328 | ||
329 | /* Find available queues */ | |
330 | qim = 0; | |
331 | qmr = ioread32(ccp->io_regs + Q_MASK_REG); | |
332 | for (i = 0; i < MAX_HW_QUEUES; i++) { | |
333 | if (!(qmr & (1 << i))) | |
334 | continue; | |
335 | ||
336 | /* Allocate a dma pool for this queue */ | |
337 | snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i); | |
338 | dma_pool = dma_pool_create(dma_pool_name, dev, | |
339 | CCP_DMAPOOL_MAX_SIZE, | |
340 | CCP_DMAPOOL_ALIGN, 0); | |
341 | if (!dma_pool) { | |
342 | dev_err(dev, "unable to allocate dma pool\n"); | |
343 | ret = -ENOMEM; | |
344 | goto e_pool; | |
345 | } | |
346 | ||
347 | cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; | |
348 | ccp->cmd_q_count++; | |
349 | ||
350 | cmd_q->ccp = ccp; | |
351 | cmd_q->id = i; | |
352 | cmd_q->dma_pool = dma_pool; | |
353 | ||
354 | /* Reserve 2 KSB regions for the queue */ | |
355 | cmd_q->ksb_key = KSB_START + ccp->ksb_start++; | |
356 | cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; | |
357 | ccp->ksb_count -= 2; | |
358 | ||
359 | /* Preset some register values and masks that are queue | |
360 | * number dependent | |
361 | */ | |
362 | cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + | |
363 | (CMD_Q_STATUS_INCR * i); | |
364 | cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + | |
365 | (CMD_Q_STATUS_INCR * i); | |
366 | cmd_q->int_ok = 1 << (i * 2); | |
367 | cmd_q->int_err = 1 << ((i * 2) + 1); | |
368 | ||
369 | cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); | |
370 | ||
371 | init_waitqueue_head(&cmd_q->int_queue); | |
372 | ||
373 | /* Build queue interrupt mask (two interrupts per queue) */ | |
374 | qim |= cmd_q->int_ok | cmd_q->int_err; | |
375 | ||
c4f4b325 TL |
376 | #ifdef CONFIG_ARM64 |
377 | /* For arm64 set the recommended queue cache settings */ | |
126ae9ad | 378 | iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + |
c4f4b325 TL |
379 | (CMD_Q_CACHE_INC * i)); |
380 | #endif | |
381 | ||
63b94509 TL |
382 | dev_dbg(dev, "queue #%u available\n", i); |
383 | } | |
384 | if (ccp->cmd_q_count == 0) { | |
385 | dev_notice(dev, "no command queues available\n"); | |
386 | ret = -EIO; | |
387 | goto e_pool; | |
388 | } | |
389 | dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); | |
390 | ||
391 | /* Disable and clear interrupts until ready */ | |
392 | iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); | |
393 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
394 | cmd_q = &ccp->cmd_q[i]; | |
395 | ||
396 | ioread32(cmd_q->reg_int_status); | |
397 | ioread32(cmd_q->reg_status); | |
398 | } | |
399 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | |
400 | ||
401 | /* Request an irq */ | |
402 | ret = ccp->get_irq(ccp); | |
403 | if (ret) { | |
404 | dev_err(dev, "unable to allocate an IRQ\n"); | |
405 | goto e_pool; | |
406 | } | |
407 | ||
408 | /* Initialize the queues used to wait for KSB space and suspend */ | |
409 | init_waitqueue_head(&ccp->ksb_queue); | |
410 | init_waitqueue_head(&ccp->suspend_queue); | |
411 | ||
412 | /* Create a kthread for each queue */ | |
413 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
414 | struct task_struct *kthread; | |
415 | ||
416 | cmd_q = &ccp->cmd_q[i]; | |
417 | ||
418 | kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, | |
419 | "ccp-q%u", cmd_q->id); | |
420 | if (IS_ERR(kthread)) { | |
421 | dev_err(dev, "error creating queue thread (%ld)\n", | |
422 | PTR_ERR(kthread)); | |
423 | ret = PTR_ERR(kthread); | |
424 | goto e_kthread; | |
425 | } | |
426 | ||
427 | cmd_q->kthread = kthread; | |
428 | wake_up_process(kthread); | |
429 | } | |
430 | ||
431 | /* Register the RNG */ | |
432 | ccp->hwrng.name = "ccp-rng"; | |
433 | ccp->hwrng.read = ccp_trng_read; | |
434 | ret = hwrng_register(&ccp->hwrng); | |
435 | if (ret) { | |
436 | dev_err(dev, "error registering hwrng (%d)\n", ret); | |
437 | goto e_kthread; | |
438 | } | |
439 | ||
440 | /* Make the device struct available before enabling interrupts */ | |
441 | ccp_add_device(ccp); | |
442 | ||
443 | /* Enable interrupts */ | |
444 | iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); | |
445 | ||
446 | return 0; | |
447 | ||
448 | e_kthread: | |
449 | for (i = 0; i < ccp->cmd_q_count; i++) | |
450 | if (ccp->cmd_q[i].kthread) | |
451 | kthread_stop(ccp->cmd_q[i].kthread); | |
452 | ||
453 | ccp->free_irq(ccp); | |
454 | ||
455 | e_pool: | |
456 | for (i = 0; i < ccp->cmd_q_count; i++) | |
457 | dma_pool_destroy(ccp->cmd_q[i].dma_pool); | |
458 | ||
459 | return ret; | |
460 | } | |
461 | ||
462 | /** | |
463 | * ccp_destroy - tear down the CCP device | |
464 | * | |
465 | * @ccp: ccp_device struct | |
466 | */ | |
467 | void ccp_destroy(struct ccp_device *ccp) | |
468 | { | |
469 | struct ccp_cmd_queue *cmd_q; | |
470 | struct ccp_cmd *cmd; | |
471 | unsigned int qim, i; | |
472 | ||
473 | /* Remove general access to the device struct */ | |
474 | ccp_del_device(ccp); | |
475 | ||
476 | /* Unregister the RNG */ | |
477 | hwrng_unregister(&ccp->hwrng); | |
478 | ||
479 | /* Stop the queue kthreads */ | |
480 | for (i = 0; i < ccp->cmd_q_count; i++) | |
481 | if (ccp->cmd_q[i].kthread) | |
482 | kthread_stop(ccp->cmd_q[i].kthread); | |
483 | ||
484 | /* Build queue interrupt mask (two interrupt masks per queue) */ | |
485 | qim = 0; | |
486 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
487 | cmd_q = &ccp->cmd_q[i]; | |
488 | qim |= cmd_q->int_ok | cmd_q->int_err; | |
489 | } | |
490 | ||
491 | /* Disable and clear interrupts */ | |
492 | iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); | |
493 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
494 | cmd_q = &ccp->cmd_q[i]; | |
495 | ||
496 | ioread32(cmd_q->reg_int_status); | |
497 | ioread32(cmd_q->reg_status); | |
498 | } | |
499 | iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); | |
500 | ||
501 | ccp->free_irq(ccp); | |
502 | ||
503 | for (i = 0; i < ccp->cmd_q_count; i++) | |
504 | dma_pool_destroy(ccp->cmd_q[i].dma_pool); | |
505 | ||
506 | /* Flush the cmd and backlog queue */ | |
507 | while (!list_empty(&ccp->cmd)) { | |
508 | /* Invoke the callback directly with an error code */ | |
509 | cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); | |
510 | list_del(&cmd->entry); | |
511 | cmd->callback(cmd->data, -ENODEV); | |
512 | } | |
513 | while (!list_empty(&ccp->backlog)) { | |
514 | /* Invoke the callback directly with an error code */ | |
515 | cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); | |
516 | list_del(&cmd->entry); | |
517 | cmd->callback(cmd->data, -ENODEV); | |
518 | } | |
519 | } | |
520 | ||
521 | /** | |
522 | * ccp_irq_handler - handle interrupts generated by the CCP device | |
523 | * | |
524 | * @irq: the irq associated with the interrupt | |
525 | * @data: the data value supplied when the irq was created | |
526 | */ | |
527 | irqreturn_t ccp_irq_handler(int irq, void *data) | |
528 | { | |
529 | struct device *dev = data; | |
530 | struct ccp_device *ccp = dev_get_drvdata(dev); | |
531 | struct ccp_cmd_queue *cmd_q; | |
532 | u32 q_int, status; | |
533 | unsigned int i; | |
534 | ||
535 | status = ioread32(ccp->io_regs + IRQ_STATUS_REG); | |
536 | ||
537 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
538 | cmd_q = &ccp->cmd_q[i]; | |
539 | ||
540 | q_int = status & (cmd_q->int_ok | cmd_q->int_err); | |
541 | if (q_int) { | |
542 | cmd_q->int_status = status; | |
543 | cmd_q->q_status = ioread32(cmd_q->reg_status); | |
544 | cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); | |
545 | ||
546 | /* On error, only save the first error value */ | |
547 | if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) | |
548 | cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); | |
549 | ||
550 | cmd_q->int_rcvd = 1; | |
551 | ||
552 | /* Acknowledge the interrupt and wake the kthread */ | |
553 | iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); | |
554 | wake_up_interruptible(&cmd_q->int_queue); | |
555 | } | |
556 | } | |
557 | ||
558 | return IRQ_HANDLED; | |
559 | } | |
560 | ||
561 | #ifdef CONFIG_PM | |
562 | bool ccp_queues_suspended(struct ccp_device *ccp) | |
563 | { | |
564 | unsigned int suspended = 0; | |
565 | unsigned long flags; | |
566 | unsigned int i; | |
567 | ||
568 | spin_lock_irqsave(&ccp->cmd_lock, flags); | |
569 | ||
570 | for (i = 0; i < ccp->cmd_q_count; i++) | |
571 | if (ccp->cmd_q[i].suspended) | |
572 | suspended++; | |
573 | ||
574 | spin_unlock_irqrestore(&ccp->cmd_lock, flags); | |
575 | ||
576 | return ccp->cmd_q_count == suspended; | |
577 | } | |
578 | #endif | |
579 | ||
c4f4b325 | 580 | #ifdef CONFIG_X86 |
63b94509 TL |
581 | static const struct x86_cpu_id ccp_support[] = { |
582 | { X86_VENDOR_AMD, 22, }, | |
490f7022 | 583 | { }, |
63b94509 | 584 | }; |
c4f4b325 | 585 | #endif |
63b94509 TL |
586 | |
587 | static int __init ccp_mod_init(void) | |
588 | { | |
c4f4b325 | 589 | #ifdef CONFIG_X86 |
63b94509 | 590 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
db34cf91 | 591 | int ret; |
63b94509 TL |
592 | |
593 | if (!x86_match_cpu(ccp_support)) | |
594 | return -ENODEV; | |
595 | ||
596 | switch (cpuinfo->x86) { | |
597 | case 22: | |
598 | if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63)) | |
599 | return -ENODEV; | |
db34cf91 TL |
600 | |
601 | ret = ccp_pci_init(); | |
602 | if (ret) | |
603 | return ret; | |
604 | ||
605 | /* Don't leave the driver loaded if init failed */ | |
606 | if (!ccp_get_device()) { | |
607 | ccp_pci_exit(); | |
608 | return -ENODEV; | |
609 | } | |
610 | ||
611 | return 0; | |
612 | ||
63b94509 | 613 | break; |
d1dd206c | 614 | } |
c4f4b325 TL |
615 | #endif |
616 | ||
617 | #ifdef CONFIG_ARM64 | |
618 | int ret; | |
619 | ||
620 | ret = ccp_platform_init(); | |
621 | if (ret) | |
622 | return ret; | |
623 | ||
624 | /* Don't leave the driver loaded if init failed */ | |
625 | if (!ccp_get_device()) { | |
626 | ccp_platform_exit(); | |
627 | return -ENODEV; | |
628 | } | |
629 | ||
630 | return 0; | |
631 | #endif | |
63b94509 TL |
632 | |
633 | return -ENODEV; | |
634 | } | |
635 | ||
636 | static void __exit ccp_mod_exit(void) | |
637 | { | |
c4f4b325 | 638 | #ifdef CONFIG_X86 |
63b94509 TL |
639 | struct cpuinfo_x86 *cpuinfo = &boot_cpu_data; |
640 | ||
641 | switch (cpuinfo->x86) { | |
642 | case 22: | |
643 | ccp_pci_exit(); | |
644 | break; | |
d1dd206c | 645 | } |
c4f4b325 TL |
646 | #endif |
647 | ||
648 | #ifdef CONFIG_ARM64 | |
649 | ccp_platform_exit(); | |
650 | #endif | |
63b94509 TL |
651 | } |
652 | ||
653 | module_init(ccp_mod_init); | |
654 | module_exit(ccp_mod_exit); |