Commit | Line | Data |
---|---|---|
6afc0dc3 GL |
1 | /* |
2 | * Support for dynamic device trees. | |
3 | * | |
4 | * On some platforms, the device tree can be manipulated at runtime. | |
5 | * The routines in this section support adding, removing and changing | |
6 | * device tree nodes. | |
7 | */ | |
8 | ||
9 | #include <linux/of.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/proc_fs.h> | |
14 | ||
15 | #include "of_private.h" | |
16 | ||
17 | /** | |
18 | * of_node_get() - Increment refcount of a node | |
19 | * @node: Node to inc refcount, NULL is supported to simplify writing of | |
20 | * callers | |
21 | * | |
22 | * Returns node. | |
23 | */ | |
24 | struct device_node *of_node_get(struct device_node *node) | |
25 | { | |
26 | if (node) | |
27 | kobject_get(&node->kobj); | |
28 | return node; | |
29 | } | |
30 | EXPORT_SYMBOL(of_node_get); | |
31 | ||
32 | /** | |
33 | * of_node_put() - Decrement refcount of a node | |
34 | * @node: Node to dec refcount, NULL is supported to simplify writing of | |
35 | * callers | |
36 | */ | |
37 | void of_node_put(struct device_node *node) | |
38 | { | |
39 | if (node) | |
40 | kobject_put(&node->kobj); | |
41 | } | |
42 | EXPORT_SYMBOL(of_node_put); | |
43 | ||
44 | static void of_node_remove(struct device_node *np) | |
45 | { | |
46 | struct property *pp; | |
47 | ||
48 | BUG_ON(!of_node_is_initialized(np)); | |
49 | ||
50 | /* only remove properties if on sysfs */ | |
51 | if (of_node_is_attached(np)) { | |
52 | for_each_property_of_node(np, pp) | |
53 | sysfs_remove_bin_file(&np->kobj, &pp->attr); | |
54 | kobject_del(&np->kobj); | |
55 | } | |
56 | ||
57 | /* finally remove the kobj_init ref */ | |
58 | of_node_put(np); | |
59 | } | |
60 | ||
61 | static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); | |
62 | ||
63 | int of_reconfig_notifier_register(struct notifier_block *nb) | |
64 | { | |
65 | return blocking_notifier_chain_register(&of_reconfig_chain, nb); | |
66 | } | |
67 | EXPORT_SYMBOL_GPL(of_reconfig_notifier_register); | |
68 | ||
69 | int of_reconfig_notifier_unregister(struct notifier_block *nb) | |
70 | { | |
71 | return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); | |
72 | } | |
73 | EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); | |
74 | ||
75 | int of_reconfig_notify(unsigned long action, void *p) | |
76 | { | |
77 | int rc; | |
78 | ||
79 | rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); | |
80 | return notifier_to_errno(rc); | |
81 | } | |
82 | ||
83 | int of_property_notify(int action, struct device_node *np, | |
84 | struct property *prop) | |
85 | { | |
86 | struct of_prop_reconfig pr; | |
87 | ||
88 | /* only call notifiers if the node is attached */ | |
89 | if (!of_node_is_attached(np)) | |
90 | return 0; | |
91 | ||
92 | pr.dn = np; | |
93 | pr.prop = prop; | |
94 | return of_reconfig_notify(action, &pr); | |
95 | } | |
96 | ||
97 | /** | |
98 | * of_attach_node() - Plug a device node into the tree and global list. | |
99 | */ | |
100 | int of_attach_node(struct device_node *np) | |
101 | { | |
102 | unsigned long flags; | |
103 | int rc; | |
104 | ||
105 | rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np); | |
106 | if (rc) | |
107 | return rc; | |
108 | ||
109 | raw_spin_lock_irqsave(&devtree_lock, flags); | |
110 | np->sibling = np->parent->child; | |
111 | np->allnext = np->parent->allnext; | |
112 | np->parent->allnext = np; | |
113 | np->parent->child = np; | |
114 | of_node_clear_flag(np, OF_DETACHED); | |
115 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | |
116 | ||
117 | of_node_add(np); | |
118 | return 0; | |
119 | } | |
120 | ||
121 | /** | |
122 | * of_detach_node() - "Unplug" a node from the device tree. | |
123 | * | |
124 | * The caller must hold a reference to the node. The memory associated with | |
125 | * the node is not freed until its refcount goes to zero. | |
126 | */ | |
127 | int of_detach_node(struct device_node *np) | |
128 | { | |
129 | struct device_node *parent; | |
130 | unsigned long flags; | |
131 | int rc = 0; | |
132 | ||
133 | rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np); | |
134 | if (rc) | |
135 | return rc; | |
136 | ||
137 | raw_spin_lock_irqsave(&devtree_lock, flags); | |
138 | ||
139 | if (of_node_check_flag(np, OF_DETACHED)) { | |
140 | /* someone already detached it */ | |
141 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | |
142 | return rc; | |
143 | } | |
144 | ||
145 | parent = np->parent; | |
146 | if (!parent) { | |
147 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | |
148 | return rc; | |
149 | } | |
150 | ||
151 | if (of_allnodes == np) | |
152 | of_allnodes = np->allnext; | |
153 | else { | |
154 | struct device_node *prev; | |
155 | for (prev = of_allnodes; | |
156 | prev->allnext != np; | |
157 | prev = prev->allnext) | |
158 | ; | |
159 | prev->allnext = np->allnext; | |
160 | } | |
161 | ||
162 | if (parent->child == np) | |
163 | parent->child = np->sibling; | |
164 | else { | |
165 | struct device_node *prevsib; | |
166 | for (prevsib = np->parent->child; | |
167 | prevsib->sibling != np; | |
168 | prevsib = prevsib->sibling) | |
169 | ; | |
170 | prevsib->sibling = np->sibling; | |
171 | } | |
172 | ||
173 | of_node_set_flag(np, OF_DETACHED); | |
174 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | |
175 | ||
176 | of_node_remove(np); | |
177 | return rc; | |
178 | } | |
179 | ||
180 | /** | |
181 | * of_node_release() - release a dynamically allocated node | |
182 | * @kref: kref element of the node to be released | |
183 | * | |
184 | * In of_node_put() this function is passed to kref_put() as the destructor. | |
185 | */ | |
186 | void of_node_release(struct kobject *kobj) | |
187 | { | |
188 | struct device_node *node = kobj_to_device_node(kobj); | |
189 | struct property *prop = node->properties; | |
190 | ||
191 | /* We should never be releasing nodes that haven't been detached. */ | |
192 | if (!of_node_check_flag(node, OF_DETACHED)) { | |
193 | pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); | |
194 | dump_stack(); | |
195 | return; | |
196 | } | |
197 | ||
198 | if (!of_node_check_flag(node, OF_DYNAMIC)) | |
199 | return; | |
200 | ||
201 | while (prop) { | |
202 | struct property *next = prop->next; | |
203 | kfree(prop->name); | |
204 | kfree(prop->value); | |
205 | kfree(prop); | |
206 | prop = next; | |
207 | ||
208 | if (!prop) { | |
209 | prop = node->deadprops; | |
210 | node->deadprops = NULL; | |
211 | } | |
212 | } | |
213 | kfree(node->full_name); | |
214 | kfree(node->data); | |
215 | kfree(node); | |
216 | } | |
69843396 PA |
217 | |
218 | /** | |
219 | * __of_prop_dup - Copy a property dynamically. | |
220 | * @prop: Property to copy | |
221 | * @allocflags: Allocation flags (typically pass GFP_KERNEL) | |
222 | * | |
223 | * Copy a property by dynamically allocating the memory of both the | |
224 | * property stucture and the property name & contents. The property's | |
225 | * flags have the OF_DYNAMIC bit set so that we can differentiate between | |
226 | * dynamically allocated properties and not. | |
227 | * Returns the newly allocated property or NULL on out of memory error. | |
228 | */ | |
229 | struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags) | |
230 | { | |
231 | struct property *new; | |
232 | ||
233 | new = kzalloc(sizeof(*new), allocflags); | |
234 | if (!new) | |
235 | return NULL; | |
236 | ||
237 | /* | |
238 | * NOTE: There is no check for zero length value. | |
239 | * In case of a boolean property This will allocate a value | |
240 | * of zero bytes. We do this to work around the use | |
241 | * of of_get_property() calls on boolean values. | |
242 | */ | |
243 | new->name = kstrdup(prop->name, allocflags); | |
244 | new->value = kmemdup(prop->value, prop->length, allocflags); | |
245 | new->length = prop->length; | |
246 | if (!new->name || !new->value) | |
247 | goto err_free; | |
248 | ||
249 | /* mark the property as dynamic */ | |
250 | of_property_set_flag(new, OF_DYNAMIC); | |
251 | ||
252 | return new; | |
253 | ||
254 | err_free: | |
255 | kfree(new->name); | |
256 | kfree(new->value); | |
257 | kfree(new); | |
258 | return NULL; | |
259 | } | |
260 | ||
261 | /** | |
262 | * __of_node_alloc() - Create an empty device node dynamically. | |
263 | * @full_name: Full name of the new device node | |
264 | * @allocflags: Allocation flags (typically pass GFP_KERNEL) | |
265 | * | |
266 | * Create an empty device tree node, suitable for further modification. | |
267 | * The node data are dynamically allocated and all the node flags | |
268 | * have the OF_DYNAMIC & OF_DETACHED bits set. | |
269 | * Returns the newly allocated node or NULL on out of memory error. | |
270 | */ | |
271 | struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags) | |
272 | { | |
273 | struct device_node *node; | |
274 | ||
275 | node = kzalloc(sizeof(*node), allocflags); | |
276 | if (!node) | |
277 | return NULL; | |
278 | ||
279 | node->full_name = kstrdup(full_name, allocflags); | |
280 | of_node_set_flag(node, OF_DYNAMIC); | |
281 | of_node_set_flag(node, OF_DETACHED); | |
282 | if (!node->full_name) | |
283 | goto err_free; | |
284 | ||
285 | of_node_init(node); | |
286 | ||
287 | return node; | |
288 | ||
289 | err_free: | |
290 | kfree(node->full_name); | |
291 | kfree(node); | |
292 | return NULL; | |
293 | } |