ACPI: Allow setting SCI_EN bit in PM1_CONTROL register
[deliverable/linux.git] / arch / s390 / appldata / appldata_base.c
CommitLineData
1da177e4
LT
1/*
2 * arch/s390/appldata/appldata_base.c
3 *
4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules.
7 *
5b5dd21a 8 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
1da177e4 9 *
5b5dd21a 10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
1da177e4
LT
11 */
12
1da177e4
LT
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
1da177e4
LT
17#include <linux/interrupt.h>
18#include <linux/proc_fs.h>
2dcea57a 19#include <linux/mm.h>
1da177e4
LT
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/sysctl.h>
1da177e4
LT
23#include <linux/notifier.h>
24#include <linux/cpu.h>
f26d583e 25#include <linux/workqueue.h>
1f38d613
GS
26#include <asm/appldata.h>
27#include <asm/timer.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/smp.h>
1da177e4
LT
31
32#include "appldata.h"
33
34
35#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
36#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
37 sampling interval in
38 milliseconds */
39
40#define TOD_MICRO 0x01000 /* nr. of TOD clock units
41 for 1 microsecond */
1da177e4
LT
42/*
43 * /proc entries (sysctl)
44 */
45static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
46static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
47 void __user *buffer, size_t *lenp, loff_t *ppos);
48static int appldata_interval_handler(ctl_table *ctl, int write,
49 struct file *filp,
50 void __user *buffer,
51 size_t *lenp, loff_t *ppos);
52
53static struct ctl_table_header *appldata_sysctl_header;
54static struct ctl_table appldata_table[] = {
55 {
56 .ctl_name = CTL_APPLDATA_TIMER,
57 .procname = "timer",
58 .mode = S_IRUGO | S_IWUSR,
59 .proc_handler = &appldata_timer_handler,
60 },
61 {
62 .ctl_name = CTL_APPLDATA_INTERVAL,
63 .procname = "interval",
64 .mode = S_IRUGO | S_IWUSR,
65 .proc_handler = &appldata_interval_handler,
66 },
67 { .ctl_name = 0 }
68};
69
70static struct ctl_table appldata_dir_table[] = {
71 {
72 .ctl_name = CTL_APPLDATA,
73 .procname = appldata_proc_name,
74 .maxlen = 0,
75 .mode = S_IRUGO | S_IXUGO,
76 .child = appldata_table,
77 },
78 { .ctl_name = 0 }
79};
80
81/*
82 * Timer
83 */
84DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
85static atomic_t appldata_expire_count = ATOMIC_INIT(0);
86
87static DEFINE_SPINLOCK(appldata_timer_lock);
88static int appldata_interval = APPLDATA_CPU_INTERVAL;
89static int appldata_timer_active;
90
91/*
f26d583e 92 * Work queue
1da177e4 93 */
f26d583e
GS
94static struct workqueue_struct *appldata_wq;
95static void appldata_work_fn(void *data);
96static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
97
1da177e4
LT
98
99/*
100 * Ops list
101 */
102static DEFINE_SPINLOCK(appldata_ops_lock);
103static LIST_HEAD(appldata_ops_list);
104
105
f26d583e 106/*************************** timer, work, DIAG *******************************/
1da177e4
LT
107/*
108 * appldata_timer_function()
109 *
f26d583e 110 * schedule work and reschedule timer
1da177e4
LT
111 */
112static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
113{
114 P_DEBUG(" -= Timer =-\n");
115 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
116 atomic_read(&appldata_expire_count));
117 if (atomic_dec_and_test(&appldata_expire_count)) {
118 atomic_set(&appldata_expire_count, num_online_cpus());
f26d583e 119 queue_work(appldata_wq, (struct work_struct *) data);
1da177e4
LT
120 }
121}
122
123/*
f26d583e 124 * appldata_work_fn()
1da177e4
LT
125 *
126 * call data gathering function for each (active) module
127 */
f26d583e 128static void appldata_work_fn(void *data)
1da177e4
LT
129{
130 struct list_head *lh;
131 struct appldata_ops *ops;
132 int i;
133
f26d583e 134 P_DEBUG(" -= Work Queue =-\n");
1da177e4
LT
135 i = 0;
136 spin_lock(&appldata_ops_lock);
137 list_for_each(lh, &appldata_ops_list) {
138 ops = list_entry(lh, struct appldata_ops, list);
139 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
140 ++i, ops->active, ops->name);
141 if (ops->active == 1) {
142 ops->callback(ops->data);
143 }
144 }
145 spin_unlock(&appldata_ops_lock);
146}
147
148/*
149 * appldata_diag()
150 *
151 * prepare parameter list, issue DIAG 0xDC
152 */
5b5dd21a
GS
153int appldata_diag(char record_nr, u16 function, unsigned long buffer,
154 u16 length, char *mod_lvl)
1da177e4 155{
1f38d613 156 struct appldata_product_id id = {
1da177e4 157 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
1f38d613
GS
158 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
159 .prod_fn = 0xD5D3, /* "NL" */
1f38d613
GS
160 .version_nr = 0xF2F6, /* "26" */
161 .release_nr = 0xF0F1, /* "01" */
1da177e4
LT
162 };
163
925afbd6
GS
164 id.record_nr = record_nr;
165 id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
1f38d613 166 return appldata_asm(&id, function, (void *) buffer, length);
1da177e4 167}
f26d583e 168/************************ timer, work, DIAG <END> ****************************/
1da177e4
LT
169
170
171/****************************** /proc stuff **********************************/
172
173/*
174 * appldata_mod_vtimer_wrap()
175 *
176 * wrapper function for mod_virt_timer(), because smp_call_function_on()
177 * accepts only one parameter.
178 */
179static void __appldata_mod_vtimer_wrap(void *p) {
180 struct {
181 struct vtimer_list *timer;
182 u64 expires;
183 } *args = p;
184 mod_virt_timer(args->timer, args->expires);
185}
186
187#define APPLDATA_ADD_TIMER 0
188#define APPLDATA_DEL_TIMER 1
189#define APPLDATA_MOD_TIMER 2
190
191/*
192 * __appldata_vtimer_setup()
193 *
194 * Add, delete or modify virtual timers on all online cpus.
195 * The caller needs to get the appldata_timer_lock spinlock.
196 */
197static void
198__appldata_vtimer_setup(int cmd)
199{
200 u64 per_cpu_interval;
201 int i;
202
203 switch (cmd) {
204 case APPLDATA_ADD_TIMER:
205 if (appldata_timer_active)
206 break;
207 per_cpu_interval = (u64) (appldata_interval*1000 /
208 num_online_cpus()) * TOD_MICRO;
209 for_each_online_cpu(i) {
210 per_cpu(appldata_timer, i).expires = per_cpu_interval;
211 smp_call_function_on(add_virt_timer_periodic,
212 &per_cpu(appldata_timer, i),
213 0, 1, i);
214 }
215 appldata_timer_active = 1;
216 P_INFO("Monitoring timer started.\n");
217 break;
218 case APPLDATA_DEL_TIMER:
219 for_each_online_cpu(i)
220 del_virt_timer(&per_cpu(appldata_timer, i));
221 if (!appldata_timer_active)
222 break;
223 appldata_timer_active = 0;
224 atomic_set(&appldata_expire_count, num_online_cpus());
225 P_INFO("Monitoring timer stopped.\n");
226 break;
227 case APPLDATA_MOD_TIMER:
228 per_cpu_interval = (u64) (appldata_interval*1000 /
229 num_online_cpus()) * TOD_MICRO;
230 if (!appldata_timer_active)
231 break;
232 for_each_online_cpu(i) {
233 struct {
234 struct vtimer_list *timer;
235 u64 expires;
236 } args;
237 args.timer = &per_cpu(appldata_timer, i);
238 args.expires = per_cpu_interval;
239 smp_call_function_on(__appldata_mod_vtimer_wrap,
240 &args, 0, 1, i);
241 }
242 }
243}
244
245/*
246 * appldata_timer_handler()
247 *
248 * Start/Stop timer, show status of timer (0 = not active, 1 = active)
249 */
250static int
251appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
252 void __user *buffer, size_t *lenp, loff_t *ppos)
253{
254 int len;
255 char buf[2];
256
257 if (!*lenp || *ppos) {
258 *lenp = 0;
259 return 0;
260 }
261 if (!write) {
262 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
263 if (len > *lenp)
264 len = *lenp;
265 if (copy_to_user(buffer, buf, len))
266 return -EFAULT;
267 goto out;
268 }
269 len = *lenp;
270 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
271 return -EFAULT;
272 spin_lock(&appldata_timer_lock);
273 if (buf[0] == '1')
274 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
275 else if (buf[0] == '0')
276 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
277 spin_unlock(&appldata_timer_lock);
278out:
279 *lenp = len;
280 *ppos += len;
281 return 0;
282}
283
284/*
285 * appldata_interval_handler()
286 *
287 * Set (CPU) timer interval for collection of data (in milliseconds), show
288 * current timer interval.
289 */
290static int
291appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
292 void __user *buffer, size_t *lenp, loff_t *ppos)
293{
294 int len, interval;
295 char buf[16];
296
297 if (!*lenp || *ppos) {
298 *lenp = 0;
299 return 0;
300 }
301 if (!write) {
302 len = sprintf(buf, "%i\n", appldata_interval);
303 if (len > *lenp)
304 len = *lenp;
305 if (copy_to_user(buffer, buf, len))
306 return -EFAULT;
307 goto out;
308 }
309 len = *lenp;
310 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
311 return -EFAULT;
312 }
313 sscanf(buf, "%i", &interval);
314 if (interval <= 0) {
315 P_ERROR("Timer CPU interval has to be > 0!\n");
316 return -EINVAL;
317 }
318
319 spin_lock(&appldata_timer_lock);
320 appldata_interval = interval;
321 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
322 spin_unlock(&appldata_timer_lock);
323
324 P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
325 interval);
326out:
327 *lenp = len;
328 *ppos += len;
329 return 0;
330}
331
332/*
333 * appldata_generic_handler()
334 *
335 * Generic start/stop monitoring and DIAG, show status of
336 * monitoring (0 = not in process, 1 = in process)
337 */
338static int
339appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
340 void __user *buffer, size_t *lenp, loff_t *ppos)
341{
342 struct appldata_ops *ops = NULL, *tmp_ops;
343 int rc, len, found;
344 char buf[2];
345 struct list_head *lh;
346
347 found = 0;
f26d583e 348 spin_lock(&appldata_ops_lock);
1da177e4
LT
349 list_for_each(lh, &appldata_ops_list) {
350 tmp_ops = list_entry(lh, struct appldata_ops, list);
351 if (&tmp_ops->ctl_table[2] == ctl) {
352 found = 1;
353 }
354 }
355 if (!found) {
f26d583e 356 spin_unlock(&appldata_ops_lock);
1da177e4
LT
357 return -ENODEV;
358 }
359 ops = ctl->data;
360 if (!try_module_get(ops->owner)) { // protect this function
f26d583e 361 spin_unlock(&appldata_ops_lock);
1da177e4
LT
362 return -ENODEV;
363 }
f26d583e 364 spin_unlock(&appldata_ops_lock);
1da177e4
LT
365
366 if (!*lenp || *ppos) {
367 *lenp = 0;
368 module_put(ops->owner);
369 return 0;
370 }
371 if (!write) {
372 len = sprintf(buf, ops->active ? "1\n" : "0\n");
373 if (len > *lenp)
374 len = *lenp;
375 if (copy_to_user(buffer, buf, len)) {
376 module_put(ops->owner);
377 return -EFAULT;
378 }
379 goto out;
380 }
381 len = *lenp;
382 if (copy_from_user(buf, buffer,
383 len > sizeof(buf) ? sizeof(buf) : len)) {
384 module_put(ops->owner);
385 return -EFAULT;
386 }
387
f26d583e 388 spin_lock(&appldata_ops_lock);
1da177e4 389 if ((buf[0] == '1') && (ops->active == 0)) {
f26d583e
GS
390 // protect work queue callback
391 if (!try_module_get(ops->owner)) {
392 spin_unlock(&appldata_ops_lock);
1da177e4
LT
393 module_put(ops->owner);
394 return -ENODEV;
395 }
1da177e4
LT
396 ops->callback(ops->data); // init record
397 rc = appldata_diag(ops->record_nr,
398 APPLDATA_START_INTERVAL_REC,
5b5dd21a
GS
399 (unsigned long) ops->data, ops->size,
400 ops->mod_lvl);
1da177e4
LT
401 if (rc != 0) {
402 P_ERROR("START DIAG 0xDC for %s failed, "
403 "return code: %d\n", ops->name, rc);
404 module_put(ops->owner);
1da177e4
LT
405 } else {
406 P_INFO("Monitoring %s data enabled, "
407 "DIAG 0xDC started.\n", ops->name);
5b5dd21a 408 ops->active = 1;
1da177e4
LT
409 }
410 } else if ((buf[0] == '0') && (ops->active == 1)) {
411 ops->active = 0;
412 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
5b5dd21a
GS
413 (unsigned long) ops->data, ops->size,
414 ops->mod_lvl);
1da177e4
LT
415 if (rc != 0) {
416 P_ERROR("STOP DIAG 0xDC for %s failed, "
417 "return code: %d\n", ops->name, rc);
418 } else {
419 P_INFO("Monitoring %s data disabled, "
420 "DIAG 0xDC stopped.\n", ops->name);
421 }
422 module_put(ops->owner);
423 }
f26d583e 424 spin_unlock(&appldata_ops_lock);
1da177e4
LT
425out:
426 *lenp = len;
427 *ppos += len;
428 module_put(ops->owner);
429 return 0;
430}
431
432/*************************** /proc stuff <END> *******************************/
433
434
435/************************* module-ops management *****************************/
436/*
437 * appldata_register_ops()
438 *
439 * update ops list, register /proc/sys entries
440 */
441int appldata_register_ops(struct appldata_ops *ops)
442{
443 struct list_head *lh;
444 struct appldata_ops *tmp_ops;
445 int i;
446
447 i = 0;
448
449 if ((ops->size > APPLDATA_MAX_REC_SIZE) ||
450 (ops->size < 0)){
451 P_ERROR("Invalid size of %s record = %i, maximum = %i!\n",
452 ops->name, ops->size, APPLDATA_MAX_REC_SIZE);
453 return -ENOMEM;
454 }
455 if ((ops->ctl_nr == CTL_APPLDATA) ||
456 (ops->ctl_nr == CTL_APPLDATA_TIMER) ||
457 (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) {
458 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
459 return -EBUSY;
460 }
fb630517 461 ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
1da177e4
LT
462 if (ops->ctl_table == NULL) {
463 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
464 return -ENOMEM;
465 }
1da177e4 466
f26d583e 467 spin_lock(&appldata_ops_lock);
1da177e4
LT
468 list_for_each(lh, &appldata_ops_list) {
469 tmp_ops = list_entry(lh, struct appldata_ops, list);
470 P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
471 ++i, tmp_ops->name, tmp_ops->ctl_nr);
472 P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n",
473 tmp_ops->name, tmp_ops->ctl_nr, ops->name,
474 ops->ctl_nr);
475 if (strncmp(tmp_ops->name, ops->name,
476 APPLDATA_PROC_NAME_LENGTH) == 0) {
477 P_ERROR("Name \"%s\" already registered!\n", ops->name);
478 kfree(ops->ctl_table);
f26d583e 479 spin_unlock(&appldata_ops_lock);
1da177e4
LT
480 return -EBUSY;
481 }
482 if (tmp_ops->ctl_nr == ops->ctl_nr) {
483 P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
484 kfree(ops->ctl_table);
f26d583e 485 spin_unlock(&appldata_ops_lock);
1da177e4
LT
486 return -EBUSY;
487 }
488 }
489 list_add(&ops->list, &appldata_ops_list);
f26d583e 490 spin_unlock(&appldata_ops_lock);
1da177e4
LT
491
492 ops->ctl_table[0].ctl_name = CTL_APPLDATA;
493 ops->ctl_table[0].procname = appldata_proc_name;
494 ops->ctl_table[0].maxlen = 0;
495 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
496 ops->ctl_table[0].child = &ops->ctl_table[2];
497
498 ops->ctl_table[1].ctl_name = 0;
499
500 ops->ctl_table[2].ctl_name = ops->ctl_nr;
501 ops->ctl_table[2].procname = ops->name;
502 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
503 ops->ctl_table[2].proc_handler = appldata_generic_handler;
504 ops->ctl_table[2].data = ops;
505
506 ops->ctl_table[3].ctl_name = 0;
507
508 ops->sysctl_header = register_sysctl_table(ops->ctl_table,1);
509
510 P_INFO("%s-ops registered!\n", ops->name);
511 return 0;
512}
513
514/*
515 * appldata_unregister_ops()
516 *
517 * update ops list, unregister /proc entries, stop DIAG if necessary
518 */
519void appldata_unregister_ops(struct appldata_ops *ops)
520{
330d57fb 521 void *table;
f26d583e 522 spin_lock(&appldata_ops_lock);
1da177e4 523 list_del(&ops->list);
330d57fb
AV
524 /* at that point any incoming access will fail */
525 table = ops->ctl_table;
1da177e4 526 ops->ctl_table = NULL;
f26d583e 527 spin_unlock(&appldata_ops_lock);
330d57fb
AV
528 unregister_sysctl_table(ops->sysctl_header);
529 kfree(table);
1da177e4
LT
530 P_INFO("%s-ops unregistered!\n", ops->name);
531}
532/********************** module-ops management <END> **************************/
533
534
535/******************************* init / exit *********************************/
536
537static void
538appldata_online_cpu(int cpu)
539{
540 init_virt_timer(&per_cpu(appldata_timer, cpu));
541 per_cpu(appldata_timer, cpu).function = appldata_timer_function;
542 per_cpu(appldata_timer, cpu).data = (unsigned long)
f26d583e 543 &appldata_work;
1da177e4
LT
544 atomic_inc(&appldata_expire_count);
545 spin_lock(&appldata_timer_lock);
546 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
547 spin_unlock(&appldata_timer_lock);
548}
549
550static void
551appldata_offline_cpu(int cpu)
552{
553 del_virt_timer(&per_cpu(appldata_timer, cpu));
554 if (atomic_dec_and_test(&appldata_expire_count)) {
555 atomic_set(&appldata_expire_count, num_online_cpus());
f26d583e 556 queue_work(appldata_wq, &appldata_work);
1da177e4
LT
557 }
558 spin_lock(&appldata_timer_lock);
559 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
560 spin_unlock(&appldata_timer_lock);
561}
562
be6b5a35 563#ifdef CONFIG_HOTPLUG_CPU
5cb900a3 564static int __cpuinit
1da177e4
LT
565appldata_cpu_notify(struct notifier_block *self,
566 unsigned long action, void *hcpu)
567{
568 switch (action) {
569 case CPU_ONLINE:
570 appldata_online_cpu((long) hcpu);
571 break;
1da177e4
LT
572 case CPU_DEAD:
573 appldata_offline_cpu((long) hcpu);
574 break;
1da177e4
LT
575 default:
576 break;
577 }
578 return NOTIFY_OK;
579}
580
be6b5a35 581static struct notifier_block appldata_nb = {
1da177e4
LT
582 .notifier_call = appldata_cpu_notify,
583};
be6b5a35 584#endif
1da177e4
LT
585
586/*
587 * appldata_init()
588 *
f26d583e 589 * init timer, register /proc entries
1da177e4
LT
590 */
591static int __init appldata_init(void)
592{
593 int i;
594
595 P_DEBUG("sizeof(parameter_list) = %lu\n",
596 sizeof(struct appldata_parameter_list));
597
f26d583e
GS
598 appldata_wq = create_singlethread_workqueue("appldata");
599 if (!appldata_wq) {
600 P_ERROR("Could not create work queue\n");
601 return -ENOMEM;
602 }
603
1da177e4
LT
604 for_each_online_cpu(i)
605 appldata_online_cpu(i);
606
607 /* Register cpu hotplug notifier */
be6b5a35 608 register_hotcpu_notifier(&appldata_nb);
1da177e4
LT
609
610 appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
611#ifdef MODULE
612 appldata_dir_table[0].de->owner = THIS_MODULE;
613 appldata_table[0].de->owner = THIS_MODULE;
614 appldata_table[1].de->owner = THIS_MODULE;
615#endif
616
1da177e4
LT
617 P_DEBUG("Base interface initialized.\n");
618 return 0;
619}
620
621/*
622 * appldata_exit()
623 *
f26d583e 624 * stop timer, unregister /proc entries
1da177e4
LT
625 */
626static void __exit appldata_exit(void)
627{
628 struct list_head *lh;
629 struct appldata_ops *ops;
630 int rc, i;
631
632 P_DEBUG("Unloading module ...\n");
633 /*
634 * ops list should be empty, but just in case something went wrong...
635 */
f26d583e 636 spin_lock(&appldata_ops_lock);
1da177e4
LT
637 list_for_each(lh, &appldata_ops_list) {
638 ops = list_entry(lh, struct appldata_ops, list);
639 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
5b5dd21a
GS
640 (unsigned long) ops->data, ops->size,
641 ops->mod_lvl);
1da177e4
LT
642 if (rc != 0) {
643 P_ERROR("STOP DIAG 0xDC for %s failed, "
644 "return code: %d\n", ops->name, rc);
645 }
646 }
f26d583e 647 spin_unlock(&appldata_ops_lock);
1da177e4
LT
648
649 for_each_online_cpu(i)
650 appldata_offline_cpu(i);
651
652 appldata_timer_active = 0;
653
654 unregister_sysctl_table(appldata_sysctl_header);
655
f26d583e 656 destroy_workqueue(appldata_wq);
1da177e4
LT
657 P_DEBUG("... module unloaded!\n");
658}
659/**************************** init / exit <END> ******************************/
660
661
662module_init(appldata_init);
663module_exit(appldata_exit);
664MODULE_LICENSE("GPL");
665MODULE_AUTHOR("Gerald Schaefer");
666MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
667
668EXPORT_SYMBOL_GPL(appldata_register_ops);
669EXPORT_SYMBOL_GPL(appldata_unregister_ops);
5b5dd21a 670EXPORT_SYMBOL_GPL(appldata_diag);
1da177e4
LT
671
672#ifdef MODULE
673/*
674 * Kernel symbols needed by appldata_mem and appldata_os modules.
675 * However, if this file is compiled as a module (for testing only), these
676 * symbols are not exported. In this case, we define them locally and export
677 * those.
678 */
679void si_swapinfo(struct sysinfo *val)
680{
681 val->freeswap = -1ul;
682 val->totalswap = -1ul;
683}
684
685unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
686 -1 - FIXED_1/200};
687int nr_threads = -1;
688
689void get_full_page_state(struct page_state *ps)
690{
691 memset(ps, -1, sizeof(struct page_state));
692}
693
694unsigned long nr_running(void)
695{
696 return -1;
697}
698
699unsigned long nr_iowait(void)
700{
701 return -1;
702}
703
704/*unsigned long nr_context_switches(void)
705{
706 return -1;
707}*/
708#endif /* MODULE */
709EXPORT_SYMBOL_GPL(si_swapinfo);
710EXPORT_SYMBOL_GPL(nr_threads);
1da177e4
LT
711EXPORT_SYMBOL_GPL(nr_running);
712EXPORT_SYMBOL_GPL(nr_iowait);
713//EXPORT_SYMBOL_GPL(nr_context_switches);
This page took 0.282199 seconds and 5 git commands to generate.