Staging: hv: remove wrapper functions for bit operations
[deliverable/linux.git] / drivers / staging / hv / osd.c
CommitLineData
3e7ee490
HJ
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 *
22 */
23
3e7ee490
HJ
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/highmem.h>
29#include <linux/vmalloc.h>
3e7ee490
HJ
30#include <linux/ioport.h>
31#include <linux/irq.h>
32#include <linux/interrupt.h>
33#include <linux/wait.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36#include <linux/kernel.h>
37#include <linux/timer.h>
38#include <linux/jiffies.h>
39#include <linux/delay.h>
40#include <linux/time.h>
41
42#include <asm/io.h>
43#include <asm/bitops.h>
44#include <asm/kmap_types.h>
45#include <asm/atomic.h>
46
09d50ff8 47#include "include/osd.h"
3e7ee490 48
454f18a9
BP
49
50/* Data types */
51
3e7ee490 52
de65a384 53struct osd_callback_struct {
3e7ee490 54 struct work_struct work;
de65a384
BP
55 void (*callback)(void *);
56 void *data;
57};
3e7ee490 58
3e7ee490
HJ
59int InterlockedIncrement(int *val)
60{
3e7ee490 61 return atomic_inc_return((atomic_t*)val);
3e7ee490
HJ
62}
63
64int InterlockedDecrement(int *val)
65{
3e7ee490 66 return atomic_dec_return((atomic_t*)val);
3e7ee490
HJ
67}
68
69#ifndef atomic_cmpxchg
70#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
71#endif
72int InterlockedCompareExchange(int *val, int new, int curr)
73{
454f18a9 74 /* return ((int)cmpxchg(((atomic_t*)val), curr, new)); */
3e7ee490
HJ
75 return atomic_cmpxchg((atomic_t*)val, curr, new);
76
77}
78
3e7ee490
HJ
79void* VirtualAllocExec(unsigned int size)
80{
81#ifdef __x86_64__
82 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
83#else
84 return __vmalloc(size, GFP_KERNEL, __pgprot(__PAGE_KERNEL & (~_PAGE_NX)));
85#endif
86}
87
88void VirtualFree(void* VirtAddr)
89{
90 return vfree(VirtAddr);
91}
92
93void* PageAlloc(unsigned int count)
94{
95 void *p;
96 p = (void *)__get_free_pages(GFP_KERNEL, get_order(count * PAGE_SIZE));
97 if (p) memset(p, 0, count * PAGE_SIZE);
98 return p;
99
454f18a9
BP
100 /* struct page* page = alloc_page(GFP_KERNEL|__GFP_ZERO); */
101 /* void *p; */
3e7ee490 102
454f18a9
BP
103 /* BUGBUG: We need to use kmap in case we are in HIMEM region */
104 /* p = page_address(page); */
105 /* if (p) memset(p, 0, PAGE_SIZE); */
106 /* return p; */
3e7ee490
HJ
107}
108
109void PageFree(void* page, unsigned int count)
110{
111 free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
112 /*struct page* p = virt_to_page(page);
113 __free_page(p);*/
114}
115
116
117void* PageMapVirtualAddress(unsigned long Pfn)
118{
119 return kmap_atomic(pfn_to_page(Pfn), KM_IRQ0);
120}
121
122void PageUnmapVirtualAddress(void* VirtAddr)
123{
124 kunmap_atomic(VirtAddr, KM_IRQ0);
125}
126
3e7ee490
HJ
127void *MemMapIO(unsigned long phys, unsigned long size)
128{
454f18a9 129 return (void*)GetVirtualAddress(phys); /* return ioremap_nocache(phys, size); */
3e7ee490
HJ
130}
131
132void MemUnmapIO(void *virt)
133{
454f18a9 134 /* iounmap(virt); */
3e7ee490
HJ
135}
136
bd1de709 137static void TimerCallback(unsigned long data)
3e7ee490 138{
06d2e318 139 struct osd_timer *t = (struct osd_timer *) data;
3e7ee490
HJ
140
141 t->callback(t->context);
142}
143
06d2e318 144struct osd_timer *TimerCreate(PFN_TIMER_CALLBACK pfnTimerCB, void* context)
3e7ee490 145{
06d2e318 146 struct osd_timer *t = kmalloc(sizeof(struct osd_timer), GFP_KERNEL);
3e7ee490
HJ
147 if (!t)
148 {
149 return NULL;
150 }
151
152 t->callback = pfnTimerCB;
153 t->context = context;
154
155 init_timer(&t->timer);
156 t->timer.data = (unsigned long)t;
157 t->timer.function = TimerCallback;
158
159 return t;
160}
161
06d2e318 162void TimerStart(struct osd_timer *t, u32 expirationInUs)
3e7ee490 163{
3e7ee490
HJ
164 t->timer.expires = jiffies + usecs_to_jiffies(expirationInUs);
165 add_timer(&t->timer);
166}
167
06d2e318 168int TimerStop(struct osd_timer *t)
3e7ee490 169{
3e7ee490
HJ
170 return del_timer(&t->timer);
171}
172
06d2e318 173void TimerClose(struct osd_timer *t)
3e7ee490 174{
3e7ee490
HJ
175 del_timer(&t->timer);
176 kfree(t);
177}
178
aedb444a 179struct osd_waitevent *WaitEventCreate(void)
3e7ee490 180{
aedb444a 181 struct osd_waitevent *wait = kmalloc(sizeof(struct osd_waitevent), GFP_KERNEL);
3e7ee490
HJ
182 if (!wait)
183 {
184 return NULL;
185 }
186
187 wait->condition = 0;
188 init_waitqueue_head(&wait->event);
189 return wait;
190}
191
aedb444a 192void WaitEventSet(struct osd_waitevent *waitEvent)
3e7ee490 193{
3e7ee490
HJ
194 waitEvent->condition = 1;
195 wake_up_interruptible(&waitEvent->event);
196}
197
aedb444a 198int WaitEventWait(struct osd_waitevent *waitEvent)
3e7ee490
HJ
199{
200 int ret=0;
3e7ee490 201
aedb444a
BP
202 ret = wait_event_interruptible(waitEvent->event,
203 waitEvent->condition);
3e7ee490
HJ
204 waitEvent->condition = 0;
205 return ret;
206}
207
aedb444a 208int WaitEventWaitEx(struct osd_waitevent *waitEvent, u32 TimeoutInMs)
3e7ee490
HJ
209{
210 int ret=0;
3e7ee490 211
aedb444a
BP
212 ret = wait_event_interruptible_timeout(waitEvent->event,
213 waitEvent->condition,
214 msecs_to_jiffies(TimeoutInMs));
3e7ee490
HJ
215 waitEvent->condition = 0;
216 return ret;
217}
218
c4b0bc94 219void* Physical2LogicalAddr(unsigned long PhysAddr)
3e7ee490
HJ
220{
221 void* logicalAddr = phys_to_virt(PhysAddr);
222 BUG_ON(!virt_addr_valid(logicalAddr));
223 return logicalAddr;
224}
225
c4b0bc94 226unsigned long Logical2PhysicalAddr(void * LogicalAddr)
3e7ee490
HJ
227{
228 BUG_ON(!virt_addr_valid(LogicalAddr));
229 return virt_to_phys(LogicalAddr);
230}
231
232
c4b0bc94 233unsigned long Virtual2Physical(void * VirtAddr)
3e7ee490 234{
c4b0bc94 235 unsigned long pfn = vmalloc_to_pfn(VirtAddr);
3e7ee490
HJ
236
237 return pfn << PAGE_SHIFT;
238}
239
de65a384 240static void osd_callback_work(struct work_struct *work)
3e7ee490 241{
de65a384
BP
242 struct osd_callback_struct *cb = container_of(work,
243 struct osd_callback_struct,
244 work);
245 (cb->callback)(cb->data);
3e7ee490 246
de65a384 247 kfree(cb);
3e7ee490
HJ
248}
249
de65a384
BP
250int osd_schedule_callback(struct workqueue_struct *wq,
251 void (*func)(void *),
252 void *data)
3e7ee490 253{
de65a384 254 struct osd_callback_struct *cb;
3e7ee490 255
de65a384
BP
256 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
257 if (!cb)
3e7ee490 258 {
de65a384 259 printk(KERN_ERR "unable to allocate memory in osd_schedule_callback");
3e7ee490
HJ
260 return -1;
261 }
262
de65a384
BP
263 cb->callback = func;
264 cb->data = data;
265 INIT_WORK(&cb->work, osd_callback_work);
266 return queue_work(wq, &cb->work);
3e7ee490
HJ
267}
268
This page took 0.040512 seconds and 5 git commands to generate.