staging: unisys: move timskmod.h functionality
[deliverable/linux.git] / drivers / staging / unisys / visorutil / periodic_work.c
1 /* periodic_work.c
2 *
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 */
17
18 /*
19 * Helper functions to schedule periodic work in Linux kernel mode.
20 */
21 #include <linux/sched.h>
22
23 #include "timskmod.h"
24 #include "periodic_work.h"
25
26 #define MYDRVNAME "periodic_work"
27
28 struct periodic_work {
29 rwlock_t lock;
30 struct delayed_work work;
31 void (*workfunc)(void *);
32 void *workfuncarg;
33 bool is_scheduled;
34 bool want_to_stop;
35 ulong jiffy_interval;
36 struct workqueue_struct *workqueue;
37 const char *devnam;
38 };
39
40 static void periodic_work_func(struct work_struct *work)
41 {
42 struct periodic_work *pw;
43
44 pw = container_of(work, struct periodic_work, work.work);
45 (*pw->workfunc)(pw->workfuncarg);
46 }
47
48 struct periodic_work *visor_periodic_work_create(ulong jiffy_interval,
49 struct workqueue_struct *workqueue,
50 void (*workfunc)(void *),
51 void *workfuncarg,
52 const char *devnam)
53 {
54 struct periodic_work *pw;
55
56 pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY);
57 if (!pw)
58 return NULL;
59
60 rwlock_init(&pw->lock);
61 pw->jiffy_interval = jiffy_interval;
62 pw->workqueue = workqueue;
63 pw->workfunc = workfunc;
64 pw->workfuncarg = workfuncarg;
65 pw->devnam = devnam;
66 return pw;
67 }
68 EXPORT_SYMBOL_GPL(visor_periodic_work_create);
69
70 void visor_periodic_work_destroy(struct periodic_work *pw)
71 {
72 kfree(pw);
73 }
74 EXPORT_SYMBOL_GPL(visor_periodic_work_destroy);
75
76 /** Call this from your periodic work worker function to schedule the next
77 * call.
78 * If this function returns false, there was a failure and the
79 * periodic work is no longer scheduled
80 */
81 bool visor_periodic_work_nextperiod(struct periodic_work *pw)
82 {
83 bool rc = false;
84
85 write_lock(&pw->lock);
86 if (pw->want_to_stop) {
87 pw->is_scheduled = false;
88 pw->want_to_stop = false;
89 rc = true; /* yes, true; see visor_periodic_work_stop() */
90 goto unlock;
91 } else if (queue_delayed_work(pw->workqueue, &pw->work,
92 pw->jiffy_interval) < 0) {
93 pw->is_scheduled = false;
94 rc = false;
95 goto unlock;
96 }
97 rc = true;
98 unlock:
99 write_unlock(&pw->lock);
100 return rc;
101 }
102 EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod);
103
104 /** This function returns true iff new periodic work was actually started.
105 * If this function returns false, then no work was started
106 * (either because it was already started, or because of a failure).
107 */
108 bool visor_periodic_work_start(struct periodic_work *pw)
109 {
110 bool rc = false;
111
112 write_lock(&pw->lock);
113 if (pw->is_scheduled) {
114 rc = false;
115 goto unlock;
116 }
117 if (pw->want_to_stop) {
118 rc = false;
119 goto unlock;
120 }
121 INIT_DELAYED_WORK(&pw->work, &periodic_work_func);
122 if (queue_delayed_work(pw->workqueue, &pw->work,
123 pw->jiffy_interval) < 0) {
124 rc = false;
125 goto unlock;
126 }
127 pw->is_scheduled = true;
128 rc = true;
129 unlock:
130 write_unlock(&pw->lock);
131 return rc;
132 }
133 EXPORT_SYMBOL_GPL(visor_periodic_work_start);
134
135 /** This function returns true iff your call actually stopped the periodic
136 * work.
137 *
138 * -- PAY ATTENTION... this is important --
139 *
140 * NO NO #1
141 *
142 * Do NOT call this function from some function that is running on the
143 * same workqueue as the work you are trying to stop might be running
144 * on! If you violate this rule, visor_periodic_work_stop() MIGHT work,
145 * but it also MIGHT get hung up in an infinite loop saying
146 * "waiting for delayed work...". This will happen if the delayed work
147 * you are trying to cancel has been put in the workqueue list, but can't
148 * run yet because we are running that same workqueue thread right now.
149 *
150 * Bottom line: If you need to call visor_periodic_work_stop() from a
151 * workitem, be sure the workitem is on a DIFFERENT workqueue than the
152 * workitem that you are trying to cancel.
153 *
154 * If I could figure out some way to check for this "no no" condition in
155 * the code, I would. It would have saved me the trouble of writing this
156 * long comment. And also, don't think this is some "theoretical" race
157 * condition. It is REAL, as I have spent the day chasing it.
158 *
159 * NO NO #2
160 *
161 * Take close note of the locks that you own when you call this function.
162 * You must NOT own any locks that are needed by the periodic work
163 * function that is currently installed. If you DO, a deadlock may result,
164 * because stopping the periodic work often involves waiting for the last
165 * iteration of the periodic work function to complete. Again, if you hit
166 * this deadlock, you will get hung up in an infinite loop saying
167 * "waiting for delayed work...".
168 */
169 bool visor_periodic_work_stop(struct periodic_work *pw)
170 {
171 bool stopped_something = false;
172
173 write_lock(&pw->lock);
174 stopped_something = pw->is_scheduled && (!pw->want_to_stop);
175 while (pw->is_scheduled) {
176 pw->want_to_stop = true;
177 if (cancel_delayed_work(&pw->work)) {
178 /* We get here if the delayed work was pending as
179 * delayed work, but was NOT run.
180 */
181 WARN_ON(!pw->is_scheduled);
182 pw->is_scheduled = false;
183 } else {
184 /* If we get here, either the delayed work:
185 * - was run, OR,
186 * - is running RIGHT NOW on another processor, OR,
187 * - wasn't even scheduled (there is a miniscule
188 * timing window where this could be the case)
189 * flush_workqueue() would make sure it is finished
190 * executing, but that still isn't very useful, which
191 * explains the loop...
192 */
193 }
194 if (pw->is_scheduled) {
195 write_unlock(&pw->lock);
196 __set_current_state(TASK_INTERRUPTIBLE);
197 schedule_timeout(10);
198 write_lock(&pw->lock);
199 } else {
200 pw->want_to_stop = false;
201 }
202 }
203 write_unlock(&pw->lock);
204 return stopped_something;
205 }
206 EXPORT_SYMBOL_GPL(visor_periodic_work_stop);
This page took 0.037504 seconds and 5 git commands to generate.