MAINTAINERS: Add phy-miphy28lp.c and phy-miphy365x.c to ARCH/STI architecture
[deliverable/linux.git] / drivers / char / tpm / tpm_ibmvtpm.c
1 /*
2 * Copyright (C) 2012 IBM Corporation
3 *
4 * Author: Ashley Lai <ashleydlai@gmail.com>
5 *
6 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
7 *
8 * Device driver for TCG/TCPA TPM (trusted platform module).
9 * Specifications at www.trustedcomputinggroup.org
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation, version 2 of the
14 * License.
15 *
16 */
17
18 #include <linux/dma-mapping.h>
19 #include <linux/dmapool.h>
20 #include <linux/slab.h>
21 #include <asm/vio.h>
22 #include <asm/irq.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <asm/prom.h>
29
30 #include "tpm.h"
31 #include "tpm_ibmvtpm.h"
32
33 static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34
35 static struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" }
38 };
39 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40
41 /**
42 * ibmvtpm_send_crq - Send a CRQ request
43 * @vdev: vio device struct
44 * @w1: first word
45 * @w2: second word
46 *
47 * Return value:
48 * 0 -Sucess
49 * Non-zero - Failure
50 */
51 static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
52 {
53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
54 }
55
56 /**
57 * ibmvtpm_get_data - Retrieve ibm vtpm data
58 * @dev: device struct
59 *
60 * Return value:
61 * vtpm device struct
62 */
63 static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
64 {
65 struct tpm_chip *chip = dev_get_drvdata(dev);
66 if (chip)
67 return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
68 return NULL;
69 }
70
71 /**
72 * tpm_ibmvtpm_recv - Receive data after send
73 * @chip: tpm chip struct
74 * @buf: buffer to read
75 * count: size of buffer
76 *
77 * Return value:
78 * Number of bytes read
79 */
80 static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
81 {
82 struct ibmvtpm_dev *ibmvtpm;
83 u16 len;
84 int sig;
85
86 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
87
88 if (!ibmvtpm->rtce_buf) {
89 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
90 return 0;
91 }
92
93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
94 if (sig)
95 return -EINTR;
96
97 len = ibmvtpm->res_len;
98
99 if (count < len) {
100 dev_err(ibmvtpm->dev,
101 "Invalid size in recv: count=%zd, crq_size=%d\n",
102 count, len);
103 return -EIO;
104 }
105
106 spin_lock(&ibmvtpm->rtce_lock);
107 memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
108 memset(ibmvtpm->rtce_buf, 0, len);
109 ibmvtpm->res_len = 0;
110 spin_unlock(&ibmvtpm->rtce_lock);
111 return len;
112 }
113
114 /**
115 * tpm_ibmvtpm_send - Send tpm request
116 * @chip: tpm chip struct
117 * @buf: buffer contains data to send
118 * count: size of buffer
119 *
120 * Return value:
121 * Number of bytes sent
122 */
123 static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
124 {
125 struct ibmvtpm_dev *ibmvtpm;
126 struct ibmvtpm_crq crq;
127 u64 *word = (u64 *) &crq;
128 int rc;
129
130 ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
131
132 if (!ibmvtpm->rtce_buf) {
133 dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
134 return 0;
135 }
136
137 if (count > ibmvtpm->rtce_size) {
138 dev_err(ibmvtpm->dev,
139 "Invalid size in send: count=%zd, rtce_size=%d\n",
140 count, ibmvtpm->rtce_size);
141 return -EIO;
142 }
143
144 spin_lock(&ibmvtpm->rtce_lock);
145 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
146 crq.valid = (u8)IBMVTPM_VALID_CMD;
147 crq.msg = (u8)VTPM_TPM_COMMAND;
148 crq.len = (u16)count;
149 crq.data = ibmvtpm->rtce_dma_handle;
150
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
152 cpu_to_be64(word[1]));
153 if (rc != H_SUCCESS) {
154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
155 rc = 0;
156 } else
157 rc = count;
158
159 spin_unlock(&ibmvtpm->rtce_lock);
160 return rc;
161 }
162
163 static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
164 {
165 return;
166 }
167
168 static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
169 {
170 return 0;
171 }
172
173 /**
174 * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
175 * @ibmvtpm: vtpm device struct
176 *
177 * Return value:
178 * 0 - Success
179 * Non-zero - Failure
180 */
181 static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
182 {
183 struct ibmvtpm_crq crq;
184 u64 *buf = (u64 *) &crq;
185 int rc;
186
187 crq.valid = (u8)IBMVTPM_VALID_CMD;
188 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
189
190 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
191 cpu_to_be64(buf[1]));
192 if (rc != H_SUCCESS)
193 dev_err(ibmvtpm->dev,
194 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
195
196 return rc;
197 }
198
199 /**
200 * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
201 * - Note that this is vtpm version and not tpm version
202 * @ibmvtpm: vtpm device struct
203 *
204 * Return value:
205 * 0 - Success
206 * Non-zero - Failure
207 */
208 static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
209 {
210 struct ibmvtpm_crq crq;
211 u64 *buf = (u64 *) &crq;
212 int rc;
213
214 crq.valid = (u8)IBMVTPM_VALID_CMD;
215 crq.msg = (u8)VTPM_GET_VERSION;
216
217 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
218 cpu_to_be64(buf[1]));
219 if (rc != H_SUCCESS)
220 dev_err(ibmvtpm->dev,
221 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
222
223 return rc;
224 }
225
226 /**
227 * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
228 * @ibmvtpm: vtpm device struct
229 *
230 * Return value:
231 * 0 - Success
232 * Non-zero - Failure
233 */
234 static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
235 {
236 int rc;
237
238 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
239 if (rc != H_SUCCESS)
240 dev_err(ibmvtpm->dev,
241 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
242
243 return rc;
244 }
245
246 /**
247 * ibmvtpm_crq_send_init - Send a CRQ initialize message
248 * @ibmvtpm: vtpm device struct
249 *
250 * Return value:
251 * 0 - Success
252 * Non-zero - Failure
253 */
254 static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
255 {
256 int rc;
257
258 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
259 if (rc != H_SUCCESS)
260 dev_err(ibmvtpm->dev,
261 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
262
263 return rc;
264 }
265
266 /**
267 * tpm_ibmvtpm_remove - ibm vtpm remove entry point
268 * @vdev: vio device struct
269 *
270 * Return value:
271 * 0
272 */
273 static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
274 {
275 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
276 struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev);
277 int rc = 0;
278
279 tpm_chip_unregister(chip);
280
281 free_irq(vdev->irq, ibmvtpm);
282
283 do {
284 if (rc)
285 msleep(100);
286 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
287 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
288
289 dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
290 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
291 free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
292
293 if (ibmvtpm->rtce_buf) {
294 dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
295 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
296 kfree(ibmvtpm->rtce_buf);
297 }
298
299 kfree(ibmvtpm);
300
301 return 0;
302 }
303
304 /**
305 * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
306 * @vdev: vio device struct
307 *
308 * Return value:
309 * Number of bytes the driver needs to DMA map
310 */
311 static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
312 {
313 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
314
315 /* ibmvtpm initializes at probe time, so the data we are
316 * asking for may not be set yet. Estimate that 4K required
317 * for TCE-mapped buffer in addition to CRQ.
318 */
319 if (!ibmvtpm)
320 return CRQ_RES_BUF_SIZE + PAGE_SIZE;
321
322 return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
323 }
324
325 /**
326 * tpm_ibmvtpm_suspend - Suspend
327 * @dev: device struct
328 *
329 * Return value:
330 * 0
331 */
332 static int tpm_ibmvtpm_suspend(struct device *dev)
333 {
334 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
335 struct ibmvtpm_crq crq;
336 u64 *buf = (u64 *) &crq;
337 int rc = 0;
338
339 crq.valid = (u8)IBMVTPM_VALID_CMD;
340 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
341
342 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
343 cpu_to_be64(buf[1]));
344 if (rc != H_SUCCESS)
345 dev_err(ibmvtpm->dev,
346 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
347
348 return rc;
349 }
350
351 /**
352 * ibmvtpm_reset_crq - Reset CRQ
353 * @ibmvtpm: ibm vtpm struct
354 *
355 * Return value:
356 * 0 - Success
357 * Non-zero - Failure
358 */
359 static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
360 {
361 int rc = 0;
362
363 do {
364 if (rc)
365 msleep(100);
366 rc = plpar_hcall_norets(H_FREE_CRQ,
367 ibmvtpm->vdev->unit_address);
368 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
369
370 memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
371 ibmvtpm->crq_queue.index = 0;
372
373 return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
374 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
375 }
376
377 /**
378 * tpm_ibmvtpm_resume - Resume from suspend
379 * @dev: device struct
380 *
381 * Return value:
382 * 0
383 */
384 static int tpm_ibmvtpm_resume(struct device *dev)
385 {
386 struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
387 int rc = 0;
388
389 do {
390 if (rc)
391 msleep(100);
392 rc = plpar_hcall_norets(H_ENABLE_CRQ,
393 ibmvtpm->vdev->unit_address);
394 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
395
396 if (rc) {
397 dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
398 return rc;
399 }
400
401 rc = vio_enable_interrupts(ibmvtpm->vdev);
402 if (rc) {
403 dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
404 return rc;
405 }
406
407 rc = ibmvtpm_crq_send_init(ibmvtpm);
408 if (rc)
409 dev_err(dev, "Error send_init rc=%d\n", rc);
410
411 return rc;
412 }
413
414 static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
415 {
416 return (status == 0);
417 }
418
419 static const struct tpm_class_ops tpm_ibmvtpm = {
420 .recv = tpm_ibmvtpm_recv,
421 .send = tpm_ibmvtpm_send,
422 .cancel = tpm_ibmvtpm_cancel,
423 .status = tpm_ibmvtpm_status,
424 .req_complete_mask = 0,
425 .req_complete_val = 0,
426 .req_canceled = tpm_ibmvtpm_req_canceled,
427 };
428
429 static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
430 .suspend = tpm_ibmvtpm_suspend,
431 .resume = tpm_ibmvtpm_resume,
432 };
433
434 /**
435 * ibmvtpm_crq_get_next - Get next responded crq
436 * @ibmvtpm vtpm device struct
437 *
438 * Return value:
439 * vtpm crq pointer
440 */
441 static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
442 {
443 struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
444 struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
445
446 if (crq->valid & VTPM_MSG_RES) {
447 if (++crq_q->index == crq_q->num_entry)
448 crq_q->index = 0;
449 smp_rmb();
450 } else
451 crq = NULL;
452 return crq;
453 }
454
455 /**
456 * ibmvtpm_crq_process - Process responded crq
457 * @crq crq to be processed
458 * @ibmvtpm vtpm device struct
459 *
460 * Return value:
461 * Nothing
462 */
463 static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
464 struct ibmvtpm_dev *ibmvtpm)
465 {
466 int rc = 0;
467
468 switch (crq->valid) {
469 case VALID_INIT_CRQ:
470 switch (crq->msg) {
471 case INIT_CRQ_RES:
472 dev_info(ibmvtpm->dev, "CRQ initialized\n");
473 rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
474 if (rc)
475 dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
476 return;
477 case INIT_CRQ_COMP_RES:
478 dev_info(ibmvtpm->dev,
479 "CRQ initialization completed\n");
480 return;
481 default:
482 dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
483 return;
484 }
485 case IBMVTPM_VALID_CMD:
486 switch (crq->msg) {
487 case VTPM_GET_RTCE_BUFFER_SIZE_RES:
488 if (be16_to_cpu(crq->len) <= 0) {
489 dev_err(ibmvtpm->dev, "Invalid rtce size\n");
490 return;
491 }
492 ibmvtpm->rtce_size = be16_to_cpu(crq->len);
493 ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
494 GFP_KERNEL);
495 if (!ibmvtpm->rtce_buf) {
496 dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
497 return;
498 }
499
500 ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
501 ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
502 DMA_BIDIRECTIONAL);
503
504 if (dma_mapping_error(ibmvtpm->dev,
505 ibmvtpm->rtce_dma_handle)) {
506 kfree(ibmvtpm->rtce_buf);
507 ibmvtpm->rtce_buf = NULL;
508 dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
509 }
510
511 return;
512 case VTPM_GET_VERSION_RES:
513 ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
514 return;
515 case VTPM_TPM_COMMAND_RES:
516 /* len of the data in rtce buffer */
517 ibmvtpm->res_len = be16_to_cpu(crq->len);
518 wake_up_interruptible(&ibmvtpm->wq);
519 return;
520 default:
521 return;
522 }
523 }
524 return;
525 }
526
527 /**
528 * ibmvtpm_interrupt - Interrupt handler
529 * @irq: irq number to handle
530 * @vtpm_instance: vtpm that received interrupt
531 *
532 * Returns:
533 * IRQ_HANDLED
534 **/
535 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
536 {
537 struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
538 struct ibmvtpm_crq *crq;
539
540 /* while loop is needed for initial setup (get version and
541 * get rtce_size). There should be only one tpm request at any
542 * given time.
543 */
544 while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
545 ibmvtpm_crq_process(crq, ibmvtpm);
546 crq->valid = 0;
547 smp_wmb();
548 }
549
550 return IRQ_HANDLED;
551 }
552
553 /**
554 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
555 * @vio_dev: vio device struct
556 * @id: vio device id struct
557 *
558 * Return value:
559 * 0 - Success
560 * Non-zero - Failure
561 */
562 static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
563 const struct vio_device_id *id)
564 {
565 struct ibmvtpm_dev *ibmvtpm;
566 struct device *dev = &vio_dev->dev;
567 struct ibmvtpm_crq_queue *crq_q;
568 struct tpm_chip *chip;
569 int rc = -ENOMEM, rc1;
570
571 chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
572 if (IS_ERR(chip))
573 return PTR_ERR(chip);
574
575 ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
576 if (!ibmvtpm) {
577 dev_err(dev, "kzalloc for ibmvtpm failed\n");
578 goto cleanup;
579 }
580
581 crq_q = &ibmvtpm->crq_queue;
582 crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
583 if (!crq_q->crq_addr) {
584 dev_err(dev, "Unable to allocate memory for crq_addr\n");
585 goto cleanup;
586 }
587
588 crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
589 ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
590 CRQ_RES_BUF_SIZE,
591 DMA_BIDIRECTIONAL);
592
593 if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
594 dev_err(dev, "dma mapping failed\n");
595 goto cleanup;
596 }
597
598 rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
599 ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
600 if (rc == H_RESOURCE)
601 rc = ibmvtpm_reset_crq(ibmvtpm);
602
603 if (rc) {
604 dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
605 goto reg_crq_cleanup;
606 }
607
608 rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
609 tpm_ibmvtpm_driver_name, ibmvtpm);
610 if (rc) {
611 dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
612 goto init_irq_cleanup;
613 }
614
615 rc = vio_enable_interrupts(vio_dev);
616 if (rc) {
617 dev_err(dev, "Error %d enabling interrupts\n", rc);
618 goto init_irq_cleanup;
619 }
620
621 init_waitqueue_head(&ibmvtpm->wq);
622
623 crq_q->index = 0;
624
625 ibmvtpm->dev = dev;
626 ibmvtpm->vdev = vio_dev;
627 TPM_VPRIV(chip) = (void *)ibmvtpm;
628
629 spin_lock_init(&ibmvtpm->rtce_lock);
630
631 rc = ibmvtpm_crq_send_init(ibmvtpm);
632 if (rc)
633 goto init_irq_cleanup;
634
635 rc = ibmvtpm_crq_get_version(ibmvtpm);
636 if (rc)
637 goto init_irq_cleanup;
638
639 rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
640 if (rc)
641 goto init_irq_cleanup;
642
643 return tpm_chip_register(chip);
644 init_irq_cleanup:
645 do {
646 rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
647 } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
648 reg_crq_cleanup:
649 dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
650 DMA_BIDIRECTIONAL);
651 cleanup:
652 if (ibmvtpm) {
653 if (crq_q->crq_addr)
654 free_page((unsigned long)crq_q->crq_addr);
655 kfree(ibmvtpm);
656 }
657
658 return rc;
659 }
660
661 static struct vio_driver ibmvtpm_driver = {
662 .id_table = tpm_ibmvtpm_device_table,
663 .probe = tpm_ibmvtpm_probe,
664 .remove = tpm_ibmvtpm_remove,
665 .get_desired_dma = tpm_ibmvtpm_get_desired_dma,
666 .name = tpm_ibmvtpm_driver_name,
667 .pm = &tpm_ibmvtpm_pm_ops,
668 };
669
670 /**
671 * ibmvtpm_module_init - Initialize ibm vtpm module
672 *
673 * Return value:
674 * 0 -Success
675 * Non-zero - Failure
676 */
677 static int __init ibmvtpm_module_init(void)
678 {
679 return vio_register_driver(&ibmvtpm_driver);
680 }
681
682 /**
683 * ibmvtpm_module_exit - Teardown ibm vtpm module
684 *
685 * Return value:
686 * Nothing
687 */
688 static void __exit ibmvtpm_module_exit(void)
689 {
690 vio_unregister_driver(&ibmvtpm_driver);
691 }
692
693 module_init(ibmvtpm_module_init);
694 module_exit(ibmvtpm_module_exit);
695
696 MODULE_AUTHOR("adlai@us.ibm.com");
697 MODULE_DESCRIPTION("IBM vTPM Driver");
698 MODULE_VERSION("1.0");
699 MODULE_LICENSE("GPL");
This page took 0.046275 seconds and 5 git commands to generate.