netdev: ucc_geth: Use is_multicast_ether_addr helper
[deliverable/linux.git] / drivers / dca / dca-core.c
1 /*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This driver supports an interface for DCA clients and providers to meet.
24 */
25
26 #include <linux/kernel.h>
27 #include <linux/notifier.h>
28 #include <linux/device.h>
29 #include <linux/dca.h>
30 #include <linux/slab.h>
31
32 #define DCA_VERSION "1.12.1"
33
34 MODULE_VERSION(DCA_VERSION);
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Intel Corporation");
37
38 static DEFINE_SPINLOCK(dca_lock);
39
40 static LIST_HEAD(dca_domains);
41
42 static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44 static int dca_providers_blocked;
45
46 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47 {
48 struct pci_dev *pdev = to_pci_dev(dev);
49 struct pci_bus *bus = pdev->bus;
50
51 while (bus->parent)
52 bus = bus->parent;
53
54 return bus;
55 }
56
57 static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58 {
59 struct dca_domain *domain;
60
61 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 if (!domain)
63 return NULL;
64
65 INIT_LIST_HEAD(&domain->dca_providers);
66 domain->pci_rc = rc;
67
68 return domain;
69 }
70
71 static void dca_free_domain(struct dca_domain *domain)
72 {
73 list_del(&domain->node);
74 kfree(domain);
75 }
76
77 static int dca_provider_ioat_ver_3_0(struct device *dev)
78 {
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90 }
91
92 static void unregister_dca_providers(void)
93 {
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 if (!domain)
114 return;
115
116 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
117 list_del(&dca->node);
118 list_add(&dca->node, &unregistered_providers);
119 }
120
121 dca_free_domain(domain);
122
123 spin_unlock_irqrestore(&dca_lock, flags);
124
125 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
126 dca_sysfs_remove_provider(dca);
127 list_del(&dca->node);
128 }
129 }
130
131 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
132 {
133 struct dca_domain *domain;
134
135 list_for_each_entry(domain, &dca_domains, node)
136 if (domain->pci_rc == rc)
137 return domain;
138
139 return NULL;
140 }
141
142 static struct dca_domain *dca_get_domain(struct device *dev)
143 {
144 struct pci_bus *rc;
145 struct dca_domain *domain;
146
147 rc = dca_pci_rc_from_dev(dev);
148 domain = dca_find_domain(rc);
149
150 if (!domain) {
151 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
152 dca_providers_blocked = 1;
153 } else {
154 domain = dca_allocate_domain(rc);
155 if (domain)
156 list_add(&domain->node, &dca_domains);
157 }
158 }
159
160 return domain;
161 }
162
163 static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
164 {
165 struct dca_provider *dca;
166 struct pci_bus *rc;
167 struct dca_domain *domain;
168
169 if (dev) {
170 rc = dca_pci_rc_from_dev(dev);
171 domain = dca_find_domain(rc);
172 if (!domain)
173 return NULL;
174 } else {
175 if (!list_empty(&dca_domains))
176 domain = list_first_entry(&dca_domains,
177 struct dca_domain,
178 node);
179 else
180 return NULL;
181 }
182
183 list_for_each_entry(dca, &domain->dca_providers, node)
184 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
185 return dca;
186
187 return NULL;
188 }
189
190 /**
191 * dca_add_requester - add a dca client to the list
192 * @dev - the device that wants dca service
193 */
194 int dca_add_requester(struct device *dev)
195 {
196 struct dca_provider *dca;
197 int err, slot = -ENODEV;
198 unsigned long flags;
199 struct pci_bus *pci_rc;
200 struct dca_domain *domain;
201
202 if (!dev)
203 return -EFAULT;
204
205 spin_lock_irqsave(&dca_lock, flags);
206
207 /* check if the requester has not been added already */
208 dca = dca_find_provider_by_dev(dev);
209 if (dca) {
210 spin_unlock_irqrestore(&dca_lock, flags);
211 return -EEXIST;
212 }
213
214 pci_rc = dca_pci_rc_from_dev(dev);
215 domain = dca_find_domain(pci_rc);
216 if (!domain) {
217 spin_unlock_irqrestore(&dca_lock, flags);
218 return -ENODEV;
219 }
220
221 list_for_each_entry(dca, &domain->dca_providers, node) {
222 slot = dca->ops->add_requester(dca, dev);
223 if (slot >= 0)
224 break;
225 }
226
227 spin_unlock_irqrestore(&dca_lock, flags);
228
229 if (slot < 0)
230 return slot;
231
232 err = dca_sysfs_add_req(dca, dev, slot);
233 if (err) {
234 spin_lock_irqsave(&dca_lock, flags);
235 if (dca == dca_find_provider_by_dev(dev))
236 dca->ops->remove_requester(dca, dev);
237 spin_unlock_irqrestore(&dca_lock, flags);
238 return err;
239 }
240
241 return 0;
242 }
243 EXPORT_SYMBOL_GPL(dca_add_requester);
244
245 /**
246 * dca_remove_requester - remove a dca client from the list
247 * @dev - the device that wants dca service
248 */
249 int dca_remove_requester(struct device *dev)
250 {
251 struct dca_provider *dca;
252 int slot;
253 unsigned long flags;
254
255 if (!dev)
256 return -EFAULT;
257
258 spin_lock_irqsave(&dca_lock, flags);
259 dca = dca_find_provider_by_dev(dev);
260 if (!dca) {
261 spin_unlock_irqrestore(&dca_lock, flags);
262 return -ENODEV;
263 }
264 slot = dca->ops->remove_requester(dca, dev);
265 spin_unlock_irqrestore(&dca_lock, flags);
266
267 if (slot < 0)
268 return slot;
269
270 dca_sysfs_remove_req(dca, slot);
271
272 return 0;
273 }
274 EXPORT_SYMBOL_GPL(dca_remove_requester);
275
276 /**
277 * dca_common_get_tag - return the dca tag (serves both new and old api)
278 * @dev - the device that wants dca service
279 * @cpu - the cpuid as returned by get_cpu()
280 */
281 u8 dca_common_get_tag(struct device *dev, int cpu)
282 {
283 struct dca_provider *dca;
284 u8 tag;
285 unsigned long flags;
286
287 spin_lock_irqsave(&dca_lock, flags);
288
289 dca = dca_find_provider_by_dev(dev);
290 if (!dca) {
291 spin_unlock_irqrestore(&dca_lock, flags);
292 return -ENODEV;
293 }
294 tag = dca->ops->get_tag(dca, dev, cpu);
295
296 spin_unlock_irqrestore(&dca_lock, flags);
297 return tag;
298 }
299
300 /**
301 * dca3_get_tag - return the dca tag to the requester device
302 * for the given cpu (new api)
303 * @dev - the device that wants dca service
304 * @cpu - the cpuid as returned by get_cpu()
305 */
306 u8 dca3_get_tag(struct device *dev, int cpu)
307 {
308 if (!dev)
309 return -EFAULT;
310
311 return dca_common_get_tag(dev, cpu);
312 }
313 EXPORT_SYMBOL_GPL(dca3_get_tag);
314
315 /**
316 * dca_get_tag - return the dca tag for the given cpu (old api)
317 * @cpu - the cpuid as returned by get_cpu()
318 */
319 u8 dca_get_tag(int cpu)
320 {
321 struct device *dev = NULL;
322
323 return dca_common_get_tag(dev, cpu);
324 }
325 EXPORT_SYMBOL_GPL(dca_get_tag);
326
327 /**
328 * alloc_dca_provider - get data struct for describing a dca provider
329 * @ops - pointer to struct of dca operation function pointers
330 * @priv_size - size of extra mem to be added for provider's needs
331 */
332 struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
333 {
334 struct dca_provider *dca;
335 int alloc_size;
336
337 alloc_size = (sizeof(*dca) + priv_size);
338 dca = kzalloc(alloc_size, GFP_KERNEL);
339 if (!dca)
340 return NULL;
341 dca->ops = ops;
342
343 return dca;
344 }
345 EXPORT_SYMBOL_GPL(alloc_dca_provider);
346
347 /**
348 * free_dca_provider - release the dca provider data struct
349 * @ops - pointer to struct of dca operation function pointers
350 * @priv_size - size of extra mem to be added for provider's needs
351 */
352 void free_dca_provider(struct dca_provider *dca)
353 {
354 kfree(dca);
355 }
356 EXPORT_SYMBOL_GPL(free_dca_provider);
357
358 /**
359 * register_dca_provider - register a dca provider
360 * @dca - struct created by alloc_dca_provider()
361 * @dev - device providing dca services
362 */
363 int register_dca_provider(struct dca_provider *dca, struct device *dev)
364 {
365 int err;
366 unsigned long flags;
367 struct dca_domain *domain;
368
369 spin_lock_irqsave(&dca_lock, flags);
370 if (dca_providers_blocked) {
371 spin_unlock_irqrestore(&dca_lock, flags);
372 return -ENODEV;
373 }
374 spin_unlock_irqrestore(&dca_lock, flags);
375
376 err = dca_sysfs_add_provider(dca, dev);
377 if (err)
378 return err;
379
380 spin_lock_irqsave(&dca_lock, flags);
381 domain = dca_get_domain(dev);
382 if (!domain) {
383 if (dca_providers_blocked) {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 dca_sysfs_remove_provider(dca);
386 unregister_dca_providers();
387 } else {
388 spin_unlock_irqrestore(&dca_lock, flags);
389 }
390 return -ENODEV;
391 }
392 list_add(&dca->node, &domain->dca_providers);
393 spin_unlock_irqrestore(&dca_lock, flags);
394
395 blocking_notifier_call_chain(&dca_provider_chain,
396 DCA_PROVIDER_ADD, NULL);
397 return 0;
398 }
399 EXPORT_SYMBOL_GPL(register_dca_provider);
400
401 /**
402 * unregister_dca_provider - remove a dca provider
403 * @dca - struct created by alloc_dca_provider()
404 */
405 void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
406 {
407 unsigned long flags;
408 struct pci_bus *pci_rc;
409 struct dca_domain *domain;
410
411 blocking_notifier_call_chain(&dca_provider_chain,
412 DCA_PROVIDER_REMOVE, NULL);
413
414 spin_lock_irqsave(&dca_lock, flags);
415
416 list_del(&dca->node);
417
418 pci_rc = dca_pci_rc_from_dev(dev);
419 domain = dca_find_domain(pci_rc);
420 if (list_empty(&domain->dca_providers))
421 dca_free_domain(domain);
422
423 spin_unlock_irqrestore(&dca_lock, flags);
424
425 dca_sysfs_remove_provider(dca);
426 }
427 EXPORT_SYMBOL_GPL(unregister_dca_provider);
428
429 /**
430 * dca_register_notify - register a client's notifier callback
431 */
432 void dca_register_notify(struct notifier_block *nb)
433 {
434 blocking_notifier_chain_register(&dca_provider_chain, nb);
435 }
436 EXPORT_SYMBOL_GPL(dca_register_notify);
437
438 /**
439 * dca_unregister_notify - remove a client's notifier callback
440 */
441 void dca_unregister_notify(struct notifier_block *nb)
442 {
443 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
444 }
445 EXPORT_SYMBOL_GPL(dca_unregister_notify);
446
447 static int __init dca_init(void)
448 {
449 pr_info("dca service started, version %s\n", DCA_VERSION);
450 return dca_sysfs_init();
451 }
452
453 static void __exit dca_exit(void)
454 {
455 dca_sysfs_exit();
456 }
457
458 arch_initcall(dca_init);
459 module_exit(dca_exit);
460
This page took 0.039986 seconds and 5 git commands to generate.