2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
24 #include <asm/cpu_device_id.h>
26 #include <linux/ccp.h>
30 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
31 MODULE_LICENSE("GPL");
32 MODULE_VERSION("1.0.0");
33 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
35 struct ccp_tasklet_data
{
36 struct completion completion
;
40 static struct ccp_device
*ccp_dev
;
41 static inline struct ccp_device
*ccp_get_device(void)
46 static inline void ccp_add_device(struct ccp_device
*ccp
)
51 static inline void ccp_del_device(struct ccp_device
*ccp
)
57 * ccp_present - check if a CCP device is present
59 * Returns zero if a CCP device is present, -ENODEV otherwise.
68 EXPORT_SYMBOL_GPL(ccp_present
);
71 * ccp_enqueue_cmd - queue an operation for processing by the CCP
73 * @cmd: ccp_cmd struct to be processed
75 * Queue a cmd to be processed by the CCP. If queueing the cmd
76 * would exceed the defined length of the cmd queue the cmd will
77 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
78 * result in a return code of -EBUSY.
80 * The callback routine specified in the ccp_cmd struct will be
81 * called to notify the caller of completion (if the cmd was not
82 * backlogged) or advancement out of the backlog. If the cmd has
83 * advanced out of the backlog the "err" value of the callback
84 * will be -EINPROGRESS. Any other "err" value during callback is
85 * the result of the operation.
87 * The cmd has been successfully queued if:
88 * the return code is -EINPROGRESS or
89 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
91 int ccp_enqueue_cmd(struct ccp_cmd
*cmd
)
93 struct ccp_device
*ccp
= ccp_get_device();
101 /* Caller must supply a callback routine */
107 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
109 i
= ccp
->cmd_q_count
;
111 if (ccp
->cmd_count
>= MAX_CMD_QLEN
) {
113 if (cmd
->flags
& CCP_CMD_MAY_BACKLOG
)
114 list_add_tail(&cmd
->entry
, &ccp
->backlog
);
118 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
120 /* Find an idle queue */
121 if (!ccp
->suspending
) {
122 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
123 if (ccp
->cmd_q
[i
].active
)
131 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
133 /* If we found an idle queue, wake it up */
134 if (i
< ccp
->cmd_q_count
)
135 wake_up_process(ccp
->cmd_q
[i
].kthread
);
139 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd
);
141 static void ccp_do_cmd_backlog(struct work_struct
*work
)
143 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
144 struct ccp_device
*ccp
= cmd
->ccp
;
148 cmd
->callback(cmd
->data
, -EINPROGRESS
);
150 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
153 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
155 /* Find an idle queue */
156 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
157 if (ccp
->cmd_q
[i
].active
)
163 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
165 /* If we found an idle queue, wake it up */
166 if (i
< ccp
->cmd_q_count
)
167 wake_up_process(ccp
->cmd_q
[i
].kthread
);
170 static struct ccp_cmd
*ccp_dequeue_cmd(struct ccp_cmd_queue
*cmd_q
)
172 struct ccp_device
*ccp
= cmd_q
->ccp
;
173 struct ccp_cmd
*cmd
= NULL
;
174 struct ccp_cmd
*backlog
= NULL
;
177 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
181 if (ccp
->suspending
) {
182 cmd_q
->suspended
= 1;
184 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
185 wake_up_interruptible(&ccp
->suspend_queue
);
190 if (ccp
->cmd_count
) {
193 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
194 list_del(&cmd
->entry
);
199 if (!list_empty(&ccp
->backlog
)) {
200 backlog
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
,
202 list_del(&backlog
->entry
);
205 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
208 INIT_WORK(&backlog
->work
, ccp_do_cmd_backlog
);
209 schedule_work(&backlog
->work
);
215 static void ccp_do_cmd_complete(unsigned long data
)
217 struct ccp_tasklet_data
*tdata
= (struct ccp_tasklet_data
*)data
;
218 struct ccp_cmd
*cmd
= tdata
->cmd
;
220 cmd
->callback(cmd
->data
, cmd
->ret
);
221 complete(&tdata
->completion
);
224 static int ccp_cmd_queue_thread(void *data
)
226 struct ccp_cmd_queue
*cmd_q
= (struct ccp_cmd_queue
*)data
;
228 struct ccp_tasklet_data tdata
;
229 struct tasklet_struct tasklet
;
231 tasklet_init(&tasklet
, ccp_do_cmd_complete
, (unsigned long)&tdata
);
233 set_current_state(TASK_INTERRUPTIBLE
);
234 while (!kthread_should_stop()) {
237 set_current_state(TASK_INTERRUPTIBLE
);
239 cmd
= ccp_dequeue_cmd(cmd_q
);
243 __set_current_state(TASK_RUNNING
);
245 /* Execute the command */
246 cmd
->ret
= ccp_run_cmd(cmd_q
, cmd
);
248 /* Schedule the completion callback */
250 init_completion(&tdata
.completion
);
251 tasklet_schedule(&tasklet
);
252 wait_for_completion(&tdata
.completion
);
255 __set_current_state(TASK_RUNNING
);
260 static int ccp_trng_read(struct hwrng
*rng
, void *data
, size_t max
, bool wait
)
262 struct ccp_device
*ccp
= container_of(rng
, struct ccp_device
, hwrng
);
264 int len
= min_t(int, sizeof(trng_value
), max
);
267 * Locking is provided by the caller so we can update device
268 * hwrng-related fields safely
270 trng_value
= ioread32(ccp
->io_regs
+ TRNG_OUT_REG
);
272 /* Zero is returned if not data is available or if a
273 * bad-entropy error is present. Assume an error if
274 * we exceed TRNG_RETRIES reads of zero.
276 if (ccp
->hwrng_retries
++ > TRNG_RETRIES
)
282 /* Reset the counter and save the rng value */
283 ccp
->hwrng_retries
= 0;
284 memcpy(data
, &trng_value
, len
);
290 * ccp_alloc_struct - allocate and initialize the ccp_device struct
292 * @dev: device struct of the CCP
294 struct ccp_device
*ccp_alloc_struct(struct device
*dev
)
296 struct ccp_device
*ccp
;
298 ccp
= kzalloc(sizeof(*ccp
), GFP_KERNEL
);
303 INIT_LIST_HEAD(&ccp
->cmd
);
304 INIT_LIST_HEAD(&ccp
->backlog
);
306 spin_lock_init(&ccp
->cmd_lock
);
307 mutex_init(&ccp
->req_mutex
);
308 mutex_init(&ccp
->ksb_mutex
);
309 ccp
->ksb_count
= KSB_COUNT
;
316 * ccp_init - initialize the CCP device
318 * @ccp: ccp_device struct
320 int ccp_init(struct ccp_device
*ccp
)
322 struct device
*dev
= ccp
->dev
;
323 struct ccp_cmd_queue
*cmd_q
;
324 struct dma_pool
*dma_pool
;
325 char dma_pool_name
[MAX_DMAPOOL_NAME_LEN
];
326 unsigned int qmr
, qim
, i
;
329 /* Find available queues */
331 qmr
= ioread32(ccp
->io_regs
+ Q_MASK_REG
);
332 for (i
= 0; i
< MAX_HW_QUEUES
; i
++) {
333 if (!(qmr
& (1 << i
)))
336 /* Allocate a dma pool for this queue */
337 snprintf(dma_pool_name
, sizeof(dma_pool_name
), "ccp_q%d", i
);
338 dma_pool
= dma_pool_create(dma_pool_name
, dev
,
339 CCP_DMAPOOL_MAX_SIZE
,
340 CCP_DMAPOOL_ALIGN
, 0);
342 dev_err(dev
, "unable to allocate dma pool\n");
347 cmd_q
= &ccp
->cmd_q
[ccp
->cmd_q_count
];
352 cmd_q
->dma_pool
= dma_pool
;
354 /* Reserve 2 KSB regions for the queue */
355 cmd_q
->ksb_key
= KSB_START
+ ccp
->ksb_start
++;
356 cmd_q
->ksb_ctx
= KSB_START
+ ccp
->ksb_start
++;
359 /* Preset some register values and masks that are queue
362 cmd_q
->reg_status
= ccp
->io_regs
+ CMD_Q_STATUS_BASE
+
363 (CMD_Q_STATUS_INCR
* i
);
364 cmd_q
->reg_int_status
= ccp
->io_regs
+ CMD_Q_INT_STATUS_BASE
+
365 (CMD_Q_STATUS_INCR
* i
);
366 cmd_q
->int_ok
= 1 << (i
* 2);
367 cmd_q
->int_err
= 1 << ((i
* 2) + 1);
369 cmd_q
->free_slots
= CMD_Q_DEPTH(ioread32(cmd_q
->reg_status
));
371 init_waitqueue_head(&cmd_q
->int_queue
);
373 /* Build queue interrupt mask (two interrupts per queue) */
374 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
377 /* For arm64 set the recommended queue cache settings */
378 iowrite32(ccp
->axcache
, ccp
->io_regs
+ CMD_Q_CACHE_BASE
+
379 (CMD_Q_CACHE_INC
* i
));
382 dev_dbg(dev
, "queue #%u available\n", i
);
384 if (ccp
->cmd_q_count
== 0) {
385 dev_notice(dev
, "no command queues available\n");
389 dev_notice(dev
, "%u command queues available\n", ccp
->cmd_q_count
);
391 /* Disable and clear interrupts until ready */
392 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
393 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
394 cmd_q
= &ccp
->cmd_q
[i
];
396 ioread32(cmd_q
->reg_int_status
);
397 ioread32(cmd_q
->reg_status
);
399 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
402 ret
= ccp
->get_irq(ccp
);
404 dev_err(dev
, "unable to allocate an IRQ\n");
408 /* Initialize the queues used to wait for KSB space and suspend */
409 init_waitqueue_head(&ccp
->ksb_queue
);
410 init_waitqueue_head(&ccp
->suspend_queue
);
412 /* Create a kthread for each queue */
413 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
414 struct task_struct
*kthread
;
416 cmd_q
= &ccp
->cmd_q
[i
];
418 kthread
= kthread_create(ccp_cmd_queue_thread
, cmd_q
,
419 "ccp-q%u", cmd_q
->id
);
420 if (IS_ERR(kthread
)) {
421 dev_err(dev
, "error creating queue thread (%ld)\n",
423 ret
= PTR_ERR(kthread
);
427 cmd_q
->kthread
= kthread
;
428 wake_up_process(kthread
);
431 /* Register the RNG */
432 ccp
->hwrng
.name
= "ccp-rng";
433 ccp
->hwrng
.read
= ccp_trng_read
;
434 ret
= hwrng_register(&ccp
->hwrng
);
436 dev_err(dev
, "error registering hwrng (%d)\n", ret
);
440 /* Make the device struct available before enabling interrupts */
443 /* Enable interrupts */
444 iowrite32(qim
, ccp
->io_regs
+ IRQ_MASK_REG
);
449 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
450 if (ccp
->cmd_q
[i
].kthread
)
451 kthread_stop(ccp
->cmd_q
[i
].kthread
);
456 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
457 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
463 * ccp_destroy - tear down the CCP device
465 * @ccp: ccp_device struct
467 void ccp_destroy(struct ccp_device
*ccp
)
469 struct ccp_cmd_queue
*cmd_q
;
473 /* Remove general access to the device struct */
476 /* Unregister the RNG */
477 hwrng_unregister(&ccp
->hwrng
);
479 /* Stop the queue kthreads */
480 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
481 if (ccp
->cmd_q
[i
].kthread
)
482 kthread_stop(ccp
->cmd_q
[i
].kthread
);
484 /* Build queue interrupt mask (two interrupt masks per queue) */
486 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
487 cmd_q
= &ccp
->cmd_q
[i
];
488 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
491 /* Disable and clear interrupts */
492 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
493 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
494 cmd_q
= &ccp
->cmd_q
[i
];
496 ioread32(cmd_q
->reg_int_status
);
497 ioread32(cmd_q
->reg_status
);
499 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
503 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
504 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
506 /* Flush the cmd and backlog queue */
507 while (!list_empty(&ccp
->cmd
)) {
508 /* Invoke the callback directly with an error code */
509 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
510 list_del(&cmd
->entry
);
511 cmd
->callback(cmd
->data
, -ENODEV
);
513 while (!list_empty(&ccp
->backlog
)) {
514 /* Invoke the callback directly with an error code */
515 cmd
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
, entry
);
516 list_del(&cmd
->entry
);
517 cmd
->callback(cmd
->data
, -ENODEV
);
522 * ccp_irq_handler - handle interrupts generated by the CCP device
524 * @irq: the irq associated with the interrupt
525 * @data: the data value supplied when the irq was created
527 irqreturn_t
ccp_irq_handler(int irq
, void *data
)
529 struct device
*dev
= data
;
530 struct ccp_device
*ccp
= dev_get_drvdata(dev
);
531 struct ccp_cmd_queue
*cmd_q
;
535 status
= ioread32(ccp
->io_regs
+ IRQ_STATUS_REG
);
537 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
538 cmd_q
= &ccp
->cmd_q
[i
];
540 q_int
= status
& (cmd_q
->int_ok
| cmd_q
->int_err
);
542 cmd_q
->int_status
= status
;
543 cmd_q
->q_status
= ioread32(cmd_q
->reg_status
);
544 cmd_q
->q_int_status
= ioread32(cmd_q
->reg_int_status
);
546 /* On error, only save the first error value */
547 if ((q_int
& cmd_q
->int_err
) && !cmd_q
->cmd_error
)
548 cmd_q
->cmd_error
= CMD_Q_ERROR(cmd_q
->q_status
);
552 /* Acknowledge the interrupt and wake the kthread */
553 iowrite32(q_int
, ccp
->io_regs
+ IRQ_STATUS_REG
);
554 wake_up_interruptible(&cmd_q
->int_queue
);
562 bool ccp_queues_suspended(struct ccp_device
*ccp
)
564 unsigned int suspended
= 0;
568 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
570 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
571 if (ccp
->cmd_q
[i
].suspended
)
574 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
576 return ccp
->cmd_q_count
== suspended
;
581 static const struct x86_cpu_id ccp_support
[] = {
582 { X86_VENDOR_AMD
, 22, },
587 static int __init
ccp_mod_init(void)
590 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
593 if (!x86_match_cpu(ccp_support
))
596 switch (cpuinfo
->x86
) {
598 if ((cpuinfo
->x86_model
< 48) || (cpuinfo
->x86_model
> 63))
601 ret
= ccp_pci_init();
605 /* Don't leave the driver loaded if init failed */
606 if (!ccp_get_device()) {
620 ret
= ccp_platform_init();
624 /* Don't leave the driver loaded if init failed */
625 if (!ccp_get_device()) {
636 static void __exit
ccp_mod_exit(void)
639 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
641 switch (cpuinfo
->x86
) {
653 module_init(ccp_mod_init
);
654 module_exit(ccp_mod_exit
);
This page took 0.042771 seconds and 5 git commands to generate.