rpmsg: add Kconfig menu
[deliverable/linux.git] / drivers / remoteproc / remoteproc_core.c
CommitLineData
400e64df
OBC
1/*
2 * Remote Processor Framework
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * Ohad Ben-Cohen <ohad@wizery.com>
8 * Brian Swetland <swetland@google.com>
9 * Mark Grosen <mgrosen@ti.com>
10 * Fernando Guzman Lugo <fernando.lugo@ti.com>
11 * Suman Anna <s-anna@ti.com>
12 * Robert Tivy <rtivy@ti.com>
13 * Armando Uribe De Leon <x0095078@ti.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * version 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#define pr_fmt(fmt) "%s: " fmt, __func__
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/slab.h>
31#include <linux/mutex.h>
32#include <linux/dma-mapping.h>
33#include <linux/firmware.h>
34#include <linux/string.h>
35#include <linux/debugfs.h>
36#include <linux/remoteproc.h>
37#include <linux/iommu.h>
38#include <linux/klist.h>
39#include <linux/elf.h>
40#include <linux/virtio_ids.h>
41#include <linux/virtio_ring.h>
42
43#include "remoteproc_internal.h"
44
45static void klist_rproc_get(struct klist_node *n);
46static void klist_rproc_put(struct klist_node *n);
47
48/*
49 * klist of the available remote processors.
50 *
51 * We need this in order to support name-based lookups (needed by the
52 * rproc_get_by_name()).
53 *
54 * That said, we don't use rproc_get_by_name() anymore within the rpmsg
55 * framework. The use cases that do require its existence should be
56 * scrutinized, and hopefully migrated to rproc_boot() using device-based
57 * binding.
58 *
59 * If/when this materializes, we could drop the klist (and the by_name
60 * API).
61 */
62static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put);
63
64typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
65 struct fw_resource *rsc, int len);
66
67/*
68 * This is the IOMMU fault handler we register with the IOMMU API
69 * (when relevant; not all remote processors access memory through
70 * an IOMMU).
71 *
72 * IOMMU core will invoke this handler whenever the remote processor
73 * will try to access an unmapped device address.
74 *
75 * Currently this is mostly a stub, but it will be later used to trigger
76 * the recovery of the remote processor.
77 */
78static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
79 unsigned long iova, int flags)
80{
81 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
82
83 /*
84 * Let the iommu core know we're not really handling this fault;
85 * we just plan to use this as a recovery trigger.
86 */
87 return -ENOSYS;
88}
89
90static int rproc_enable_iommu(struct rproc *rproc)
91{
92 struct iommu_domain *domain;
93 struct device *dev = rproc->dev;
94 int ret;
95
96 /*
97 * We currently use iommu_present() to decide if an IOMMU
98 * setup is needed.
99 *
100 * This works for simple cases, but will easily fail with
101 * platforms that do have an IOMMU, but not for this specific
102 * rproc.
103 *
104 * This will be easily solved by introducing hw capabilities
105 * that will be set by the remoteproc driver.
106 */
107 if (!iommu_present(dev->bus)) {
0798e1da
MG
108 dev_dbg(dev, "iommu not found\n");
109 return 0;
400e64df
OBC
110 }
111
112 domain = iommu_domain_alloc(dev->bus);
113 if (!domain) {
114 dev_err(dev, "can't alloc iommu domain\n");
115 return -ENOMEM;
116 }
117
118 iommu_set_fault_handler(domain, rproc_iommu_fault);
119
120 ret = iommu_attach_device(domain, dev);
121 if (ret) {
122 dev_err(dev, "can't attach iommu device: %d\n", ret);
123 goto free_domain;
124 }
125
126 rproc->domain = domain;
127
128 return 0;
129
130free_domain:
131 iommu_domain_free(domain);
132 return ret;
133}
134
135static void rproc_disable_iommu(struct rproc *rproc)
136{
137 struct iommu_domain *domain = rproc->domain;
138 struct device *dev = rproc->dev;
139
140 if (!domain)
141 return;
142
143 iommu_detach_device(domain, dev);
144 iommu_domain_free(domain);
145
146 return;
147}
148
149/*
150 * Some remote processors will ask us to allocate them physically contiguous
151 * memory regions (which we call "carveouts"), and map them to specific
152 * device addresses (which are hardcoded in the firmware).
153 *
154 * They may then ask us to copy objects into specific device addresses (e.g.
155 * code/data sections) or expose us certain symbols in other device address
156 * (e.g. their trace buffer).
157 *
158 * This function is an internal helper with which we can go over the allocated
159 * carveouts and translate specific device address to kernel virtual addresses
160 * so we can access the referenced memory.
161 *
162 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
163 * but only on kernel direct mapped RAM memory. Instead, we're just using
164 * here the output of the DMA API, which should be more correct.
165 */
166static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
167{
168 struct rproc_mem_entry *carveout;
169 void *ptr = NULL;
170
171 list_for_each_entry(carveout, &rproc->carveouts, node) {
172 int offset = da - carveout->da;
173
174 /* try next carveout if da is too small */
175 if (offset < 0)
176 continue;
177
178 /* try next carveout if da is too large */
179 if (offset + len > carveout->len)
180 continue;
181
182 ptr = carveout->va + offset;
183
184 break;
185 }
186
187 return ptr;
188}
189
190/**
191 * rproc_load_segments() - load firmware segments to memory
192 * @rproc: remote processor which will be booted using these fw segments
193 * @elf_data: the content of the ELF firmware image
9bc91231 194 * @len: firmware size (in bytes)
400e64df
OBC
195 *
196 * This function loads the firmware segments to memory, where the remote
197 * processor expects them.
198 *
199 * Some remote processors will expect their code and data to be placed
200 * in specific device addresses, and can't have them dynamically assigned.
201 *
202 * We currently support only those kind of remote processors, and expect
203 * the program header's paddr member to contain those addresses. We then go
204 * through the physically contiguous "carveout" memory regions which we
205 * allocated (and mapped) earlier on behalf of the remote processor,
206 * and "translate" device address to kernel addresses, so we can copy the
207 * segments where they are expected.
208 *
209 * Currently we only support remote processors that required carveout
210 * allocations and got them mapped onto their iommus. Some processors
211 * might be different: they might not have iommus, and would prefer to
212 * directly allocate memory for every segment/resource. This is not yet
213 * supported, though.
214 */
9bc91231
OBC
215static int
216rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
400e64df
OBC
217{
218 struct device *dev = rproc->dev;
219 struct elf32_hdr *ehdr;
220 struct elf32_phdr *phdr;
221 int i, ret = 0;
222
223 ehdr = (struct elf32_hdr *)elf_data;
224 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
225
226 /* go through the available ELF segments */
227 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
228 u32 da = phdr->p_paddr;
229 u32 memsz = phdr->p_memsz;
230 u32 filesz = phdr->p_filesz;
9bc91231 231 u32 offset = phdr->p_offset;
400e64df
OBC
232 void *ptr;
233
234 if (phdr->p_type != PT_LOAD)
235 continue;
236
237 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
238 phdr->p_type, da, memsz, filesz);
239
240 if (filesz > memsz) {
241 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
242 filesz, memsz);
243 ret = -EINVAL;
244 break;
245 }
246
9bc91231
OBC
247 if (offset + filesz > len) {
248 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
249 offset + filesz, len);
250 ret = -EINVAL;
251 break;
252 }
253
400e64df
OBC
254 /* grab the kernel address for this device address */
255 ptr = rproc_da_to_va(rproc, da, memsz);
256 if (!ptr) {
257 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
258 ret = -EINVAL;
259 break;
260 }
261
262 /* put the segment where the remote processor expects it */
263 if (phdr->p_filesz)
264 memcpy(ptr, elf_data + phdr->p_offset, filesz);
265
266 /*
267 * Zero out remaining memory for this segment.
268 *
269 * This isn't strictly required since dma_alloc_coherent already
270 * did this for us. albeit harmless, we may consider removing
271 * this.
272 */
273 if (memsz > filesz)
274 memset(ptr + filesz, 0, memsz - filesz);
275 }
276
277 return ret;
278}
279
280/**
281 * rproc_handle_virtio_hdr() - handle a virtio header resource
282 * @rproc: the remote processor
283 * @rsc: the resource descriptor
284 *
285 * The existence of this virtio hdr resource entry means that the firmware
286 * of this @rproc supports this virtio device.
287 *
288 * Currently we support only a single virtio device of type VIRTIO_ID_RPMSG,
289 * but the plan is to remove this limitation and support any number
290 * of virtio devices (and of any type). We'll also add support for dynamically
291 * adding (and removing) virtio devices over the rpmsg bus, but small
292 * firmwares that doesn't want to get involved with rpmsg will be able
293 * to simple use the resource table for this.
294 *
295 * At this point this virtio header entry is rather simple: it just
296 * announces the virtio device id and the supported virtio device features.
297 * The plan though is to extend this to include the vring information and
298 * the virtio config space, too (but first, some resource table overhaul
299 * is needed: move from fixed-sized to variable-length TLV entries).
300 *
301 * For now, the 'flags' member of the resource entry contains the virtio
302 * device id, the 'da' member contains the device features, and 'pa' is
303 * where we need to store the guest features once negotiation completes.
304 * As usual, the 'id' member of this resource contains the index of this
305 * resource type (i.e. is this the first virtio hdr entry, the 2nd, ...).
306 *
307 * Returns 0 on success, or an appropriate error code otherwise
308 */
309static int rproc_handle_virtio_hdr(struct rproc *rproc, struct fw_resource *rsc)
310{
311 struct rproc_vdev *rvdev;
312
313 /* we only support VIRTIO_ID_RPMSG devices for now */
314 if (rsc->flags != VIRTIO_ID_RPMSG) {
315 dev_warn(rproc->dev, "unsupported vdev: %d\n", rsc->flags);
316 return -EINVAL;
317 }
318
319 /* we only support a single vdev per rproc for now */
320 if (rsc->id || rproc->rvdev) {
321 dev_warn(rproc->dev, "redundant vdev entry: %s\n", rsc->name);
322 return -EINVAL;
323 }
324
325 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL);
326 if (!rvdev)
327 return -ENOMEM;
328
329 /* remember the device features */
330 rvdev->dfeatures = rsc->da;
331
332 rproc->rvdev = rvdev;
333 rvdev->rproc = rproc;
334
335 return 0;
336}
337
338/**
339 * rproc_handle_vring() - handle a vring fw resource
340 * @rproc: the remote processor
341 * @rsc: the vring resource descriptor
342 *
343 * This resource entry requires allocation of non-cacheable memory
344 * for a virtio vring. Currently we only support two vrings per remote
345 * processor, required for the virtio rpmsg device.
346 *
347 * The 'len' member of @rsc should contain the number of buffers this vring
348 * support and 'da' should either contain the device address where
349 * the remote processor is expecting the vring, or indicate that
350 * dynamically allocation of the vring's device address is supported.
351 *
352 * Note: 'da' is currently not handled. This will be revised when the generic
353 * iommu-based DMA API will arrive, or a dynanic & non-iommu use case show
354 * up. Meanwhile, statically-addressed iommu-based images should use
355 * RSC_DEVMEM resource entries to map their require 'da' to the physical
356 * address of their base CMA region.
357 *
358 * Returns 0 on success, or an appropriate error code otherwise
359 */
360static int rproc_handle_vring(struct rproc *rproc, struct fw_resource *rsc)
361{
362 struct device *dev = rproc->dev;
363 struct rproc_vdev *rvdev = rproc->rvdev;
364 dma_addr_t dma;
365 int size, id = rsc->id;
366 void *va;
367
368 /* no vdev is in place ? */
369 if (!rvdev) {
370 dev_err(dev, "vring requested without a virtio dev entry\n");
371 return -EINVAL;
372 }
373
374 /* the firmware must provide the expected queue size */
375 if (!rsc->len) {
376 dev_err(dev, "missing expected queue size\n");
377 return -EINVAL;
378 }
379
380 /* we currently support two vrings per rproc (for rx and tx) */
381 if (id >= ARRAY_SIZE(rvdev->vring)) {
382 dev_err(dev, "%s: invalid vring id %d\n", rsc->name, id);
383 return -EINVAL;
384 }
385
386 /* have we already allocated this vring id ? */
387 if (rvdev->vring[id].len) {
388 dev_err(dev, "%s: duplicated id %d\n", rsc->name, id);
389 return -EINVAL;
390 }
391
392 /* actual size of vring (in bytes) */
393 size = PAGE_ALIGN(vring_size(rsc->len, AMP_VRING_ALIGN));
394
395 /*
396 * Allocate non-cacheable memory for the vring. In the future
397 * this call will also configure the IOMMU for us
398 */
399 va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
400 if (!va) {
401 dev_err(dev, "dma_alloc_coherent failed\n");
402 return -ENOMEM;
403 }
404
405 dev_dbg(dev, "vring%d: va %p dma %x qsz %d ring size %x\n", id, va,
406 dma, rsc->len, size);
407
408 rvdev->vring[id].len = rsc->len;
409 rvdev->vring[id].va = va;
410 rvdev->vring[id].dma = dma;
411
412 return 0;
413}
414
415/**
416 * rproc_handle_trace() - handle a shared trace buffer resource
417 * @rproc: the remote processor
418 * @rsc: the trace resource descriptor
419 *
420 * In case the remote processor dumps trace logs into memory,
421 * export it via debugfs.
422 *
423 * Currently, the 'da' member of @rsc should contain the device address
424 * where the remote processor is dumping the traces. Later we could also
425 * support dynamically allocating this address using the generic
426 * DMA API (but currently there isn't a use case for that).
427 *
428 * Returns 0 on success, or an appropriate error code otherwise
429 */
430static int rproc_handle_trace(struct rproc *rproc, struct fw_resource *rsc)
431{
432 struct rproc_mem_entry *trace;
433 struct device *dev = rproc->dev;
434 void *ptr;
435 char name[15];
436
437 /* what's the kernel address of this resource ? */
438 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
439 if (!ptr) {
440 dev_err(dev, "erroneous trace resource entry\n");
441 return -EINVAL;
442 }
443
444 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
445 if (!trace) {
446 dev_err(dev, "kzalloc trace failed\n");
447 return -ENOMEM;
448 }
449
450 /* set the trace buffer dma properties */
451 trace->len = rsc->len;
452 trace->va = ptr;
453
454 /* make sure snprintf always null terminates, even if truncating */
455 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
456
457 /* create the debugfs entry */
458 trace->priv = rproc_create_trace_file(name, rproc, trace);
459 if (!trace->priv) {
460 trace->va = NULL;
461 kfree(trace);
462 return -EINVAL;
463 }
464
465 list_add_tail(&trace->node, &rproc->traces);
466
467 rproc->num_traces++;
468
469 dev_dbg(dev, "%s added: va %p, da 0x%llx, len 0x%x\n", name, ptr,
470 rsc->da, rsc->len);
471
472 return 0;
473}
474
475/**
476 * rproc_handle_devmem() - handle devmem resource entry
477 * @rproc: remote processor handle
478 * @rsc: the devmem resource entry
479 *
480 * Remote processors commonly need to access certain on-chip peripherals.
481 *
482 * Some of these remote processors access memory via an iommu device,
483 * and might require us to configure their iommu before they can access
484 * the on-chip peripherals they need.
485 *
486 * This resource entry is a request to map such a peripheral device.
487 *
488 * These devmem entries will contain the physical address of the device in
489 * the 'pa' member. If a specific device address is expected, then 'da' will
490 * contain it (currently this is the only use case supported). 'len' will
491 * contain the size of the physical region we need to map.
492 *
493 * Currently we just "trust" those devmem entries to contain valid physical
494 * addresses, but this is going to change: we want the implementations to
495 * tell us ranges of physical addresses the firmware is allowed to request,
496 * and not allow firmwares to request access to physical addresses that
497 * are outside those ranges.
498 */
499static int rproc_handle_devmem(struct rproc *rproc, struct fw_resource *rsc)
500{
501 struct rproc_mem_entry *mapping;
502 int ret;
503
504 /* no point in handling this resource without a valid iommu domain */
505 if (!rproc->domain)
506 return -EINVAL;
507
508 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
509 if (!mapping) {
510 dev_err(rproc->dev, "kzalloc mapping failed\n");
511 return -ENOMEM;
512 }
513
514 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
515 if (ret) {
516 dev_err(rproc->dev, "failed to map devmem: %d\n", ret);
517 goto out;
518 }
519
520 /*
521 * We'll need this info later when we'll want to unmap everything
522 * (e.g. on shutdown).
523 *
524 * We can't trust the remote processor not to change the resource
525 * table, so we must maintain this info independently.
526 */
527 mapping->da = rsc->da;
528 mapping->len = rsc->len;
529 list_add_tail(&mapping->node, &rproc->mappings);
530
531 dev_dbg(rproc->dev, "mapped devmem pa 0x%llx, da 0x%llx, len 0x%x\n",
532 rsc->pa, rsc->da, rsc->len);
533
534 return 0;
535
536out:
537 kfree(mapping);
538 return ret;
539}
540
541/**
542 * rproc_handle_carveout() - handle phys contig memory allocation requests
543 * @rproc: rproc handle
544 * @rsc: the resource entry
545 *
546 * This function will handle firmware requests for allocation of physically
547 * contiguous memory regions.
548 *
549 * These request entries should come first in the firmware's resource table,
550 * as other firmware entries might request placing other data objects inside
551 * these memory regions (e.g. data/code segments, trace resource entries, ...).
552 *
553 * Allocating memory this way helps utilizing the reserved physical memory
554 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
555 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
556 * pressure is important; it may have a substantial impact on performance.
557 */
558static int rproc_handle_carveout(struct rproc *rproc, struct fw_resource *rsc)
559{
560 struct rproc_mem_entry *carveout, *mapping;
561 struct device *dev = rproc->dev;
562 dma_addr_t dma;
563 void *va;
564 int ret;
565
566 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
567 if (!mapping) {
568 dev_err(dev, "kzalloc mapping failed\n");
569 return -ENOMEM;
570 }
571
572 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
573 if (!carveout) {
574 dev_err(dev, "kzalloc carveout failed\n");
575 ret = -ENOMEM;
576 goto free_mapping;
577 }
578
579 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
580 if (!va) {
581 dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len);
582 ret = -ENOMEM;
583 goto free_carv;
584 }
585
586 dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len);
587
588 /*
589 * Ok, this is non-standard.
590 *
591 * Sometimes we can't rely on the generic iommu-based DMA API
592 * to dynamically allocate the device address and then set the IOMMU
593 * tables accordingly, because some remote processors might
594 * _require_ us to use hard coded device addresses that their
595 * firmware was compiled with.
596 *
597 * In this case, we must use the IOMMU API directly and map
598 * the memory to the device address as expected by the remote
599 * processor.
600 *
601 * Obviously such remote processor devices should not be configured
602 * to use the iommu-based DMA API: we expect 'dma' to contain the
603 * physical address in this case.
604 */
605 if (rproc->domain) {
606 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
607 rsc->flags);
608 if (ret) {
609 dev_err(dev, "iommu_map failed: %d\n", ret);
610 goto dma_free;
611 }
612
613 /*
614 * We'll need this info later when we'll want to unmap
615 * everything (e.g. on shutdown).
616 *
617 * We can't trust the remote processor not to change the
618 * resource table, so we must maintain this info independently.
619 */
620 mapping->da = rsc->da;
621 mapping->len = rsc->len;
622 list_add_tail(&mapping->node, &rproc->mappings);
623
624 dev_dbg(dev, "carveout mapped 0x%llx to 0x%x\n", rsc->da, dma);
625
626 /*
627 * Some remote processors might need to know the pa
628 * even though they are behind an IOMMU. E.g., OMAP4's
629 * remote M3 processor needs this so it can control
630 * on-chip hardware accelerators that are not behind
631 * the IOMMU, and therefor must know the pa.
632 *
633 * Generally we don't want to expose physical addresses
634 * if we don't have to (remote processors are generally
635 * _not_ trusted), so we might want to do this only for
636 * remote processor that _must_ have this (e.g. OMAP4's
637 * dual M3 subsystem).
638 */
639 rsc->pa = dma;
640 }
641
642 carveout->va = va;
643 carveout->len = rsc->len;
644 carveout->dma = dma;
645 carveout->da = rsc->da;
646
647 list_add_tail(&carveout->node, &rproc->carveouts);
648
649 return 0;
650
651dma_free:
652 dma_free_coherent(dev, rsc->len, va, dma);
653free_carv:
654 kfree(carveout);
655free_mapping:
656 kfree(mapping);
657 return ret;
658}
659
660/* handle firmware resource entries before booting the remote processor */
661static int
662rproc_handle_boot_rsc(struct rproc *rproc, struct fw_resource *rsc, int len)
663{
664 struct device *dev = rproc->dev;
665 int ret = 0;
666
667 while (len >= sizeof(*rsc)) {
668 dev_dbg(dev, "rsc: type %d, da 0x%llx, pa 0x%llx, len 0x%x, "
669 "id %d, name %s, flags %x\n", rsc->type, rsc->da,
670 rsc->pa, rsc->len, rsc->id, rsc->name, rsc->flags);
671
672 switch (rsc->type) {
673 case RSC_CARVEOUT:
674 ret = rproc_handle_carveout(rproc, rsc);
675 break;
676 case RSC_DEVMEM:
677 ret = rproc_handle_devmem(rproc, rsc);
678 break;
679 case RSC_TRACE:
680 ret = rproc_handle_trace(rproc, rsc);
681 break;
682 case RSC_VRING:
683 ret = rproc_handle_vring(rproc, rsc);
684 break;
685 case RSC_VIRTIO_DEV:
686 /* this one is handled early upon registration */
687 break;
688 default:
689 dev_warn(dev, "unsupported resource %d\n", rsc->type);
690 break;
691 }
692
693 if (ret)
694 break;
695
696 rsc++;
697 len -= sizeof(*rsc);
698 }
699
700 return ret;
701}
702
703/* handle firmware resource entries while registering the remote processor */
704static int
705rproc_handle_virtio_rsc(struct rproc *rproc, struct fw_resource *rsc, int len)
706{
707 struct device *dev = rproc->dev;
7d2d3956 708 int ret = -ENODEV;
400e64df
OBC
709
710 for (; len >= sizeof(*rsc); rsc++, len -= sizeof(*rsc))
711 if (rsc->type == RSC_VIRTIO_DEV) {
712 dev_dbg(dev, "found vdev %d/%s features %llx\n",
713 rsc->flags, rsc->name, rsc->da);
714 ret = rproc_handle_virtio_hdr(rproc, rsc);
715 break;
716 }
717
718 return ret;
719}
720
721/**
722 * rproc_handle_resources() - find and handle the resource table
723 * @rproc: the rproc handle
724 * @elf_data: the content of the ELF firmware image
9bc91231 725 * @len: firmware size (in bytes)
400e64df
OBC
726 * @handler: function that should be used to handle the resource table
727 *
728 * This function finds the resource table inside the remote processor's
729 * firmware, and invoke a user-supplied handler with it (we have two
730 * possible handlers: one is invoked upon registration of @rproc,
731 * in order to register the supported virito devices, and the other is
732 * invoked when @rproc is actually booted).
733 *
734 * Currently this function fails if a resource table doesn't exist.
735 * This restriction will be removed when we'll start supporting remote
736 * processors that don't need a resource table.
737 */
738static int rproc_handle_resources(struct rproc *rproc, const u8 *elf_data,
9bc91231 739 size_t len, rproc_handle_resources_t handler)
400e64df
OBC
740
741{
742 struct elf32_hdr *ehdr;
743 struct elf32_shdr *shdr;
744 const char *name_table;
745 int i, ret = -EINVAL;
746
747 ehdr = (struct elf32_hdr *)elf_data;
748 shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
749 name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
750
751 /* look for the resource table and handle it */
752 for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
753 if (!strcmp(name_table + shdr->sh_name, ".resource_table")) {
754 struct fw_resource *table = (struct fw_resource *)
755 (elf_data + shdr->sh_offset);
756
9bc91231
OBC
757 if (shdr->sh_offset + shdr->sh_size > len) {
758 dev_err(rproc->dev,
759 "truncated fw: need 0x%x avail 0x%x\n",
760 shdr->sh_offset + shdr->sh_size, len);
761 ret = -EINVAL;
762 }
763
400e64df
OBC
764 ret = handler(rproc, table, shdr->sh_size);
765
766 break;
767 }
768 }
769
770 return ret;
771}
772
773/**
774 * rproc_resource_cleanup() - clean up and free all acquired resources
775 * @rproc: rproc handle
776 *
777 * This function will free all resources acquired for @rproc, and it
778 * is called when @rproc shuts down, or just failed booting.
779 */
780static void rproc_resource_cleanup(struct rproc *rproc)
781{
782 struct rproc_mem_entry *entry, *tmp;
783 struct device *dev = rproc->dev;
784 struct rproc_vdev *rvdev = rproc->rvdev;
785 int i;
786
787 /* clean up debugfs trace entries */
788 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
789 rproc_remove_trace_file(entry->priv);
790 rproc->num_traces--;
791 list_del(&entry->node);
792 kfree(entry);
793 }
794
795 /* free the coherent memory allocated for the vrings */
796 for (i = 0; rvdev && i < ARRAY_SIZE(rvdev->vring); i++) {
797 int qsz = rvdev->vring[i].len;
798 void *va = rvdev->vring[i].va;
799 int dma = rvdev->vring[i].dma;
800
801 /* virtqueue size is expressed in number of buffers supported */
802 if (qsz) {
803 /* how many bytes does this vring really occupy ? */
804 int size = PAGE_ALIGN(vring_size(qsz, AMP_VRING_ALIGN));
805
806 dma_free_coherent(rproc->dev, size, va, dma);
807
808 rvdev->vring[i].len = 0;
809 }
810 }
811
812 /* clean up carveout allocations */
813 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
814 dma_free_coherent(dev, entry->len, entry->va, entry->dma);
815 list_del(&entry->node);
816 kfree(entry);
817 }
818
819 /* clean up iommu mapping entries */
820 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
821 size_t unmapped;
822
823 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
824 if (unmapped != entry->len) {
825 /* nothing much to do besides complaining */
826 dev_err(dev, "failed to unmap %u/%u\n", entry->len,
827 unmapped);
828 }
829
830 list_del(&entry->node);
831 kfree(entry);
832 }
833}
834
835/* make sure this fw image is sane */
836static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
837{
838 const char *name = rproc->firmware;
839 struct device *dev = rproc->dev;
840 struct elf32_hdr *ehdr;
841
842 if (!fw) {
843 dev_err(dev, "failed to load %s\n", name);
844 return -EINVAL;
845 }
846
847 if (fw->size < sizeof(struct elf32_hdr)) {
848 dev_err(dev, "Image is too small\n");
849 return -EINVAL;
850 }
851
852 ehdr = (struct elf32_hdr *)fw->data;
853
9bc91231
OBC
854 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
855 dev_err(dev, "Image is too small\n");
856 return -EINVAL;
857 }
858
400e64df
OBC
859 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
860 dev_err(dev, "Image is corrupted (bad magic)\n");
861 return -EINVAL;
862 }
863
864 if (ehdr->e_phnum == 0) {
865 dev_err(dev, "No loadable segments\n");
866 return -EINVAL;
867 }
868
869 if (ehdr->e_phoff > fw->size) {
870 dev_err(dev, "Firmware size is too small\n");
871 return -EINVAL;
872 }
873
874 return 0;
875}
876
877/*
878 * take a firmware and boot a remote processor with it.
879 */
880static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
881{
882 struct device *dev = rproc->dev;
883 const char *name = rproc->firmware;
884 struct elf32_hdr *ehdr;
885 int ret;
886
887 ret = rproc_fw_sanity_check(rproc, fw);
888 if (ret)
889 return ret;
890
891 ehdr = (struct elf32_hdr *)fw->data;
892
893 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
894
895 /*
896 * if enabling an IOMMU isn't relevant for this rproc, this is
897 * just a nop
898 */
899 ret = rproc_enable_iommu(rproc);
900 if (ret) {
901 dev_err(dev, "can't enable iommu: %d\n", ret);
902 return ret;
903 }
904
905 /*
906 * The ELF entry point is the rproc's boot addr (though this is not
907 * a configurable property of all remote processors: some will always
908 * boot at a specific hardcoded address).
909 */
910 rproc->bootaddr = ehdr->e_entry;
911
912 /* handle fw resources which are required to boot rproc */
9bc91231
OBC
913 ret = rproc_handle_resources(rproc, fw->data, fw->size,
914 rproc_handle_boot_rsc);
400e64df
OBC
915 if (ret) {
916 dev_err(dev, "Failed to process resources: %d\n", ret);
917 goto clean_up;
918 }
919
920 /* load the ELF segments to memory */
9bc91231 921 ret = rproc_load_segments(rproc, fw->data, fw->size);
400e64df
OBC
922 if (ret) {
923 dev_err(dev, "Failed to load program segments: %d\n", ret);
924 goto clean_up;
925 }
926
927 /* power up the remote processor */
928 ret = rproc->ops->start(rproc);
929 if (ret) {
930 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
931 goto clean_up;
932 }
933
934 rproc->state = RPROC_RUNNING;
935
936 dev_info(dev, "remote processor %s is now up\n", rproc->name);
937
938 return 0;
939
940clean_up:
941 rproc_resource_cleanup(rproc);
942 rproc_disable_iommu(rproc);
943 return ret;
944}
945
946/*
947 * take a firmware and look for virtio devices to register.
948 *
949 * Note: this function is called asynchronously upon registration of the
950 * remote processor (so we must wait until it completes before we try
951 * to unregister the device. one other option is just to use kref here,
952 * that might be cleaner).
953 */
954static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
955{
956 struct rproc *rproc = context;
957 struct device *dev = rproc->dev;
958 int ret;
959
960 if (rproc_fw_sanity_check(rproc, fw) < 0)
961 goto out;
962
963 /* does the fw supports any virtio devices ? */
9bc91231
OBC
964 ret = rproc_handle_resources(rproc, fw->data, fw->size,
965 rproc_handle_virtio_rsc);
400e64df
OBC
966 if (ret) {
967 dev_info(dev, "No fw virtio device was found\n");
968 goto out;
969 }
970
971 /* add the virtio device (currently only rpmsg vdevs are supported) */
972 ret = rproc_add_rpmsg_vdev(rproc);
973 if (ret)
974 goto out;
975
976out:
977 if (fw)
978 release_firmware(fw);
979 /* allow rproc_unregister() contexts, if any, to proceed */
980 complete_all(&rproc->firmware_loading_complete);
981}
982
983/**
984 * rproc_boot() - boot a remote processor
985 * @rproc: handle of a remote processor
986 *
987 * Boot a remote processor (i.e. load its firmware, power it on, ...).
988 *
989 * If the remote processor is already powered on, this function immediately
990 * returns (successfully).
991 *
992 * Returns 0 on success, and an appropriate error value otherwise.
993 */
994int rproc_boot(struct rproc *rproc)
995{
996 const struct firmware *firmware_p;
997 struct device *dev;
998 int ret;
999
1000 if (!rproc) {
1001 pr_err("invalid rproc handle\n");
1002 return -EINVAL;
1003 }
1004
1005 dev = rproc->dev;
1006
1007 ret = mutex_lock_interruptible(&rproc->lock);
1008 if (ret) {
1009 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1010 return ret;
1011 }
1012
1013 /* loading a firmware is required */
1014 if (!rproc->firmware) {
1015 dev_err(dev, "%s: no firmware to load\n", __func__);
1016 ret = -EINVAL;
1017 goto unlock_mutex;
1018 }
1019
1020 /* prevent underlying implementation from being removed */
1021 if (!try_module_get(dev->driver->owner)) {
1022 dev_err(dev, "%s: can't get owner\n", __func__);
1023 ret = -EINVAL;
1024 goto unlock_mutex;
1025 }
1026
1027 /* skip the boot process if rproc is already powered up */
1028 if (atomic_inc_return(&rproc->power) > 1) {
1029 ret = 0;
1030 goto unlock_mutex;
1031 }
1032
1033 dev_info(dev, "powering up %s\n", rproc->name);
1034
1035 /* load firmware */
1036 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1037 if (ret < 0) {
1038 dev_err(dev, "request_firmware failed: %d\n", ret);
1039 goto downref_rproc;
1040 }
1041
1042 ret = rproc_fw_boot(rproc, firmware_p);
1043
1044 release_firmware(firmware_p);
1045
1046downref_rproc:
1047 if (ret) {
1048 module_put(dev->driver->owner);
1049 atomic_dec(&rproc->power);
1050 }
1051unlock_mutex:
1052 mutex_unlock(&rproc->lock);
1053 return ret;
1054}
1055EXPORT_SYMBOL(rproc_boot);
1056
1057/**
1058 * rproc_shutdown() - power off the remote processor
1059 * @rproc: the remote processor
1060 *
1061 * Power off a remote processor (previously booted with rproc_boot()).
1062 *
1063 * In case @rproc is still being used by an additional user(s), then
1064 * this function will just decrement the power refcount and exit,
1065 * without really powering off the device.
1066 *
1067 * Every call to rproc_boot() must (eventually) be accompanied by a call
1068 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
1069 *
1070 * Notes:
1071 * - we're not decrementing the rproc's refcount, only the power refcount.
1072 * which means that the @rproc handle stays valid even after rproc_shutdown()
1073 * returns, and users can still use it with a subsequent rproc_boot(), if
1074 * needed.
1075 * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
1076 * because rproc_shutdown() _does not_ decrement the refcount of @rproc.
1077 * To decrement the refcount of @rproc, use rproc_put() (but _only_ if
1078 * you acquired @rproc using rproc_get_by_name()).
1079 */
1080void rproc_shutdown(struct rproc *rproc)
1081{
1082 struct device *dev = rproc->dev;
1083 int ret;
1084
1085 ret = mutex_lock_interruptible(&rproc->lock);
1086 if (ret) {
1087 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1088 return;
1089 }
1090
1091 /* if the remote proc is still needed, bail out */
1092 if (!atomic_dec_and_test(&rproc->power))
1093 goto out;
1094
1095 /* power off the remote processor */
1096 ret = rproc->ops->stop(rproc);
1097 if (ret) {
1098 atomic_inc(&rproc->power);
1099 dev_err(dev, "can't stop rproc: %d\n", ret);
1100 goto out;
1101 }
1102
1103 /* clean up all acquired resources */
1104 rproc_resource_cleanup(rproc);
1105
1106 rproc_disable_iommu(rproc);
1107
1108 rproc->state = RPROC_OFFLINE;
1109
1110 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1111
1112out:
1113 mutex_unlock(&rproc->lock);
1114 if (!ret)
1115 module_put(dev->driver->owner);
1116}
1117EXPORT_SYMBOL(rproc_shutdown);
1118
1119/**
1120 * rproc_release() - completely deletes the existence of a remote processor
1121 * @kref: the rproc's kref
1122 *
1123 * This function should _never_ be called directly.
1124 *
1125 * The only reasonable location to use it is as an argument when kref_put'ing
1126 * @rproc's refcount.
1127 *
1128 * This way it will be called when no one holds a valid pointer to this @rproc
1129 * anymore (and obviously after it is removed from the rprocs klist).
1130 *
1131 * Note: this function is not static because rproc_vdev_release() needs it when
1132 * it decrements @rproc's refcount.
1133 */
1134void rproc_release(struct kref *kref)
1135{
1136 struct rproc *rproc = container_of(kref, struct rproc, refcount);
1137
1138 dev_info(rproc->dev, "removing %s\n", rproc->name);
1139
1140 rproc_delete_debug_dir(rproc);
1141
1142 /* at this point no one holds a reference to rproc anymore */
1143 kfree(rproc);
1144}
1145
1146/* will be called when an rproc is added to the rprocs klist */
1147static void klist_rproc_get(struct klist_node *n)
1148{
1149 struct rproc *rproc = container_of(n, struct rproc, node);
1150
1151 kref_get(&rproc->refcount);
1152}
1153
1154/* will be called when an rproc is removed from the rprocs klist */
1155static void klist_rproc_put(struct klist_node *n)
1156{
1157 struct rproc *rproc = container_of(n, struct rproc, node);
1158
1159 kref_put(&rproc->refcount, rproc_release);
1160}
1161
1162static struct rproc *next_rproc(struct klist_iter *i)
1163{
1164 struct klist_node *n;
1165
1166 n = klist_next(i);
1167 if (!n)
1168 return NULL;
1169
1170 return container_of(n, struct rproc, node);
1171}
1172
1173/**
1174 * rproc_get_by_name() - find a remote processor by name and boot it
1175 * @name: name of the remote processor
1176 *
1177 * Finds an rproc handle using the remote processor's name, and then
1178 * boot it. If it's already powered on, then just immediately return
1179 * (successfully).
1180 *
1181 * Returns the rproc handle on success, and NULL on failure.
1182 *
1183 * This function increments the remote processor's refcount, so always
1184 * use rproc_put() to decrement it back once rproc isn't needed anymore.
1185 *
1186 * Note: currently this function (and its counterpart rproc_put()) are not
1187 * used anymore by the rpmsg subsystem. We need to scrutinize the use cases
1188 * that still need them, and see if we can migrate them to use the non
1189 * name-based boot/shutdown interface.
1190 */
1191struct rproc *rproc_get_by_name(const char *name)
1192{
1193 struct rproc *rproc;
1194 struct klist_iter i;
1195 int ret;
1196
1197 /* find the remote processor, and upref its refcount */
1198 klist_iter_init(&rprocs, &i);
1199 while ((rproc = next_rproc(&i)) != NULL)
1200 if (!strcmp(rproc->name, name)) {
1201 kref_get(&rproc->refcount);
1202 break;
1203 }
1204 klist_iter_exit(&i);
1205
1206 /* can't find this rproc ? */
1207 if (!rproc) {
1208 pr_err("can't find remote processor %s\n", name);
1209 return NULL;
1210 }
1211
1212 ret = rproc_boot(rproc);
1213 if (ret < 0) {
1214 kref_put(&rproc->refcount, rproc_release);
1215 return NULL;
1216 }
1217
1218 return rproc;
1219}
1220EXPORT_SYMBOL(rproc_get_by_name);
1221
1222/**
1223 * rproc_put() - decrement the refcount of a remote processor, and shut it down
1224 * @rproc: the remote processor
1225 *
1226 * This function tries to shutdown @rproc, and it then decrements its
1227 * refcount.
1228 *
1229 * After this function returns, @rproc may _not_ be used anymore, and its
1230 * handle should be considered invalid.
1231 *
1232 * This function should be called _iff_ the @rproc handle was grabbed by
1233 * calling rproc_get_by_name().
1234 */
1235void rproc_put(struct rproc *rproc)
1236{
1237 /* try to power off the remote processor */
1238 rproc_shutdown(rproc);
1239
1240 /* downref rproc's refcount */
1241 kref_put(&rproc->refcount, rproc_release);
1242}
1243EXPORT_SYMBOL(rproc_put);
1244
1245/**
1246 * rproc_register() - register a remote processor
1247 * @rproc: the remote processor handle to register
1248 *
1249 * Registers @rproc with the remoteproc framework, after it has been
1250 * allocated with rproc_alloc().
1251 *
1252 * This is called by the platform-specific rproc implementation, whenever
1253 * a new remote processor device is probed.
1254 *
1255 * Returns 0 on success and an appropriate error code otherwise.
1256 *
1257 * Note: this function initiates an asynchronous firmware loading
1258 * context, which will look for virtio devices supported by the rproc's
1259 * firmware.
1260 *
1261 * If found, those virtio devices will be created and added, so as a result
1262 * of registering this remote processor, additional virtio drivers will be
1263 * probed.
1264 *
1265 * Currently, though, we only support a single RPMSG virtio vdev per remote
1266 * processor.
1267 */
1268int rproc_register(struct rproc *rproc)
1269{
1270 struct device *dev = rproc->dev;
1271 int ret = 0;
1272
1273 /* expose to rproc_get_by_name users */
1274 klist_add_tail(&rproc->node, &rprocs);
1275
1276 dev_info(rproc->dev, "%s is available\n", rproc->name);
1277
1278 /* create debugfs entries */
1279 rproc_create_debug_dir(rproc);
1280
1281 /* rproc_unregister() calls must wait until async loader completes */
1282 init_completion(&rproc->firmware_loading_complete);
1283
1284 /*
1285 * We must retrieve early virtio configuration info from
1286 * the firmware (e.g. whether to register a virtio rpmsg device,
1287 * what virtio features does it support, ...).
1288 *
1289 * We're initiating an asynchronous firmware loading, so we can
1290 * be built-in kernel code, without hanging the boot process.
1291 */
1292 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1293 rproc->firmware, dev, GFP_KERNEL,
1294 rproc, rproc_fw_config_virtio);
1295 if (ret < 0) {
1296 dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
1297 complete_all(&rproc->firmware_loading_complete);
1298 klist_remove(&rproc->node);
1299 }
1300
1301 return ret;
1302}
1303EXPORT_SYMBOL(rproc_register);
1304
1305/**
1306 * rproc_alloc() - allocate a remote processor handle
1307 * @dev: the underlying device
1308 * @name: name of this remote processor
1309 * @ops: platform-specific handlers (mainly start/stop)
1310 * @firmware: name of firmware file to load
1311 * @len: length of private data needed by the rproc driver (in bytes)
1312 *
1313 * Allocates a new remote processor handle, but does not register
1314 * it yet.
1315 *
1316 * This function should be used by rproc implementations during initialization
1317 * of the remote processor.
1318 *
1319 * After creating an rproc handle using this function, and when ready,
1320 * implementations should then call rproc_register() to complete
1321 * the registration of the remote processor.
1322 *
1323 * On success the new rproc is returned, and on failure, NULL.
1324 *
1325 * Note: _never_ directly deallocate @rproc, even if it was not registered
1326 * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
1327 */
1328struct rproc *rproc_alloc(struct device *dev, const char *name,
1329 const struct rproc_ops *ops,
1330 const char *firmware, int len)
1331{
1332 struct rproc *rproc;
1333
1334 if (!dev || !name || !ops)
1335 return NULL;
1336
1337 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
1338 if (!rproc) {
1339 dev_err(dev, "%s: kzalloc failed\n", __func__);
1340 return NULL;
1341 }
1342
1343 rproc->dev = dev;
1344 rproc->name = name;
1345 rproc->ops = ops;
1346 rproc->firmware = firmware;
1347 rproc->priv = &rproc[1];
1348
1349 atomic_set(&rproc->power, 0);
1350
1351 kref_init(&rproc->refcount);
1352
1353 mutex_init(&rproc->lock);
1354
1355 INIT_LIST_HEAD(&rproc->carveouts);
1356 INIT_LIST_HEAD(&rproc->mappings);
1357 INIT_LIST_HEAD(&rproc->traces);
1358
1359 rproc->state = RPROC_OFFLINE;
1360
1361 return rproc;
1362}
1363EXPORT_SYMBOL(rproc_alloc);
1364
1365/**
1366 * rproc_free() - free an rproc handle that was allocated by rproc_alloc
1367 * @rproc: the remote processor handle
1368 *
1369 * This function should _only_ be used if @rproc was only allocated,
1370 * but not registered yet.
1371 *
1372 * If @rproc was already successfully registered (by calling rproc_register()),
1373 * then use rproc_unregister() instead.
1374 */
1375void rproc_free(struct rproc *rproc)
1376{
1377 kfree(rproc);
1378}
1379EXPORT_SYMBOL(rproc_free);
1380
1381/**
1382 * rproc_unregister() - unregister a remote processor
1383 * @rproc: rproc handle to unregister
1384 *
1385 * Unregisters a remote processor, and decrements its refcount.
1386 * If its refcount drops to zero, then @rproc will be freed. If not,
1387 * it will be freed later once the last reference is dropped.
1388 *
1389 * This function should be called when the platform specific rproc
1390 * implementation decides to remove the rproc device. it should
1391 * _only_ be called if a previous invocation of rproc_register()
1392 * has completed successfully.
1393 *
1394 * After rproc_unregister() returns, @rproc is _not_ valid anymore and
1395 * it shouldn't be used. More specifically, don't call rproc_free()
1396 * or try to directly free @rproc after rproc_unregister() returns;
1397 * none of these are needed, and calling them is a bug.
1398 *
1399 * Returns 0 on success and -EINVAL if @rproc isn't valid.
1400 */
1401int rproc_unregister(struct rproc *rproc)
1402{
1403 if (!rproc)
1404 return -EINVAL;
1405
1406 /* if rproc is just being registered, wait */
1407 wait_for_completion(&rproc->firmware_loading_complete);
1408
1409 /* was an rpmsg vdev created ? */
1410 if (rproc->rvdev)
1411 rproc_remove_rpmsg_vdev(rproc);
1412
1413 klist_remove(&rproc->node);
1414
1415 kref_put(&rproc->refcount, rproc_release);
1416
1417 return 0;
1418}
1419EXPORT_SYMBOL(rproc_unregister);
1420
1421static int __init remoteproc_init(void)
1422{
1423 rproc_init_debugfs();
1424 return 0;
1425}
1426module_init(remoteproc_init);
1427
1428static void __exit remoteproc_exit(void)
1429{
1430 rproc_exit_debugfs();
1431}
1432module_exit(remoteproc_exit);
1433
1434MODULE_LICENSE("GPL v2");
1435MODULE_DESCRIPTION("Generic Remote Processor Framework");
This page took 0.076809 seconds and 5 git commands to generate.