[PATCH] mspec driver
[deliverable/linux.git] / drivers / char / mspec.c
CommitLineData
17a3b050
JS
1/*
2 * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
3 * reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License
7 * as published by the Free Software Foundation.
8 */
9
10/*
11 * SN Platform Special Memory (mspec) Support
12 *
13 * This driver exports the SN special memory (mspec) facility to user
14 * processes.
15 * There are three types of memory made available thru this driver:
16 * fetchops, uncached and cached.
17 *
18 * Fetchops are atomic memory operations that are implemented in the
19 * memory controller on SGI SN hardware.
20 *
21 * Uncached are used for memory write combining feature of the ia64
22 * cpu.
23 *
24 * Cached are used for areas of memory that are used as cached addresses
25 * on our partition and used as uncached addresses from other partitions.
26 * Due to a design constraint of the SN2 Shub, you can not have processors
27 * on the same FSB perform both a cached and uncached reference to the
28 * same cache line. These special memory cached regions prevent the
29 * kernel from ever dropping in a TLB entry and therefore prevent the
30 * processor from ever speculating a cache line from this page.
31 */
32
33#include <linux/config.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/miscdevice.h>
40#include <linux/spinlock.h>
41#include <linux/mm.h>
42#include <linux/vmalloc.h>
43#include <linux/string.h>
44#include <linux/slab.h>
45#include <linux/numa.h>
46#include <asm/page.h>
47#include <asm/system.h>
48#include <asm/pgtable.h>
49#include <asm/atomic.h>
50#include <asm/tlbflush.h>
51#include <asm/uncached.h>
52#include <asm/sn/addrs.h>
53#include <asm/sn/arch.h>
54#include <asm/sn/mspec.h>
55#include <asm/sn/sn_cpuid.h>
56#include <asm/sn/io.h>
57#include <asm/sn/bte.h>
58#include <asm/sn/shubio.h>
59
60
61#define FETCHOP_ID "SGI Fetchop,"
62#define CACHED_ID "Cached,"
63#define UNCACHED_ID "Uncached"
64#define REVISION "4.0"
65#define MSPEC_BASENAME "mspec"
66
67/*
68 * Page types allocated by the device.
69 */
70enum {
71 MSPEC_FETCHOP = 1,
72 MSPEC_CACHED,
73 MSPEC_UNCACHED
74};
75
76static int is_sn2;
77
78/*
79 * One of these structures is allocated when an mspec region is mmaped. The
80 * structure is pointed to by the vma->vm_private_data field in the vma struct.
81 * This structure is used to record the addresses of the mspec pages.
82 */
83struct vma_data {
84 atomic_t refcnt; /* Number of vmas sharing the data. */
85 spinlock_t lock; /* Serialize access to the vma. */
86 int count; /* Number of pages allocated. */
87 int type; /* Type of pages allocated. */
88 unsigned long maddr[0]; /* Array of MSPEC addresses. */
89};
90
91/* used on shub2 to clear FOP cache in the HUB */
92static unsigned long scratch_page[MAX_NUMNODES];
93#define SH2_AMO_CACHE_ENTRIES 4
94
95static inline int
96mspec_zero_block(unsigned long addr, int len)
97{
98 int status;
99
100 if (is_sn2) {
101 if (is_shub2()) {
102 int nid;
103 void *p;
104 int i;
105
106 nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
107 p = (void *)TO_AMO(scratch_page[nid]);
108
109 for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
110 FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
111 p += FETCHOP_VAR_SIZE;
112 }
113 }
114
115 status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
116 BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
117 } else {
118 memset((char *) addr, 0, len);
119 status = 0;
120 }
121 return status;
122}
123
124/*
125 * mspec_open
126 *
127 * Called when a device mapping is created by a means other than mmap
128 * (via fork, etc.). Increments the reference count on the underlying
129 * mspec data so it is not freed prematurely.
130 */
131static void
132mspec_open(struct vm_area_struct *vma)
133{
134 struct vma_data *vdata;
135
136 vdata = vma->vm_private_data;
137 atomic_inc(&vdata->refcnt);
138}
139
140/*
141 * mspec_close
142 *
143 * Called when unmapping a device mapping. Frees all mspec pages
144 * belonging to the vma.
145 */
146static void
147mspec_close(struct vm_area_struct *vma)
148{
149 struct vma_data *vdata;
150 int i, pages, result, vdata_size;
151
152 vdata = vma->vm_private_data;
153 if (!atomic_dec_and_test(&vdata->refcnt))
154 return;
155
156 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
157 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
158 for (i = 0; i < pages; i++) {
159 if (vdata->maddr[i] == 0)
160 continue;
161 /*
162 * Clear the page before sticking it back
163 * into the pool.
164 */
165 result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
166 if (!result)
167 uncached_free_page(vdata->maddr[i]);
168 else
169 printk(KERN_WARNING "mspec_close(): "
170 "failed to zero page %i\n",
171 result);
172 }
173
174 if (vdata_size <= PAGE_SIZE)
175 kfree(vdata);
176 else
177 vfree(vdata);
178}
179
180
181/*
182 * mspec_nopfn
183 *
184 * Creates a mspec page and maps it to user space.
185 */
186static unsigned long
187mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
188{
189 unsigned long paddr, maddr;
190 unsigned long pfn;
191 int index;
192 struct vma_data *vdata = vma->vm_private_data;
193
194 index = (address - vma->vm_start) >> PAGE_SHIFT;
195 maddr = (volatile unsigned long) vdata->maddr[index];
196 if (maddr == 0) {
197 maddr = uncached_alloc_page(numa_node_id());
198 if (maddr == 0)
199 return NOPFN_OOM;
200
201 spin_lock(&vdata->lock);
202 if (vdata->maddr[index] == 0) {
203 vdata->count++;
204 vdata->maddr[index] = maddr;
205 } else {
206 uncached_free_page(maddr);
207 maddr = vdata->maddr[index];
208 }
209 spin_unlock(&vdata->lock);
210 }
211
212 if (vdata->type == MSPEC_FETCHOP)
213 paddr = TO_AMO(maddr);
214 else
215 paddr = __pa(TO_CAC(maddr));
216
217 pfn = paddr >> PAGE_SHIFT;
218
219 return pfn;
220}
221
222static struct vm_operations_struct mspec_vm_ops = {
223 .open = mspec_open,
224 .close = mspec_close,
225 .nopfn = mspec_nopfn
226};
227
228/*
229 * mspec_mmap
230 *
231 * Called when mmaping the device. Initializes the vma with a fault handler
232 * and private data structure necessary to allocate, track, and free the
233 * underlying pages.
234 */
235static int
236mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
237{
238 struct vma_data *vdata;
239 int pages, vdata_size;
240
241 if (vma->vm_pgoff != 0)
242 return -EINVAL;
243
244 if ((vma->vm_flags & VM_SHARED) == 0)
245 return -EINVAL;
246
247 if ((vma->vm_flags & VM_WRITE) == 0)
248 return -EPERM;
249
250 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
251 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
252 if (vdata_size <= PAGE_SIZE)
253 vdata = kmalloc(vdata_size, GFP_KERNEL);
254 else
255 vdata = vmalloc(vdata_size);
256 if (!vdata)
257 return -ENOMEM;
258 memset(vdata, 0, vdata_size);
259
260 vdata->type = type;
261 spin_lock_init(&vdata->lock);
262 vdata->refcnt = ATOMIC_INIT(1);
263 vma->vm_private_data = vdata;
264
265 vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
266 if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
267 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
268 vma->vm_ops = &mspec_vm_ops;
269
270 return 0;
271}
272
273static int
274fetchop_mmap(struct file *file, struct vm_area_struct *vma)
275{
276 return mspec_mmap(file, vma, MSPEC_FETCHOP);
277}
278
279static int
280cached_mmap(struct file *file, struct vm_area_struct *vma)
281{
282 return mspec_mmap(file, vma, MSPEC_CACHED);
283}
284
285static int
286uncached_mmap(struct file *file, struct vm_area_struct *vma)
287{
288 return mspec_mmap(file, vma, MSPEC_UNCACHED);
289}
290
291static struct file_operations fetchop_fops = {
292 .owner = THIS_MODULE,
293 .mmap = fetchop_mmap
294};
295
296static struct miscdevice fetchop_miscdev = {
297 .minor = MISC_DYNAMIC_MINOR,
298 .name = "sgi_fetchop",
299 .fops = &fetchop_fops
300};
301
302static struct file_operations cached_fops = {
303 .owner = THIS_MODULE,
304 .mmap = cached_mmap
305};
306
307static struct miscdevice cached_miscdev = {
308 .minor = MISC_DYNAMIC_MINOR,
309 .name = "mspec_cached",
310 .fops = &cached_fops
311};
312
313static struct file_operations uncached_fops = {
314 .owner = THIS_MODULE,
315 .mmap = uncached_mmap
316};
317
318static struct miscdevice uncached_miscdev = {
319 .minor = MISC_DYNAMIC_MINOR,
320 .name = "mspec_uncached",
321 .fops = &uncached_fops
322};
323
324/*
325 * mspec_init
326 *
327 * Called at boot time to initialize the mspec facility.
328 */
329static int __init
330mspec_init(void)
331{
332 int ret;
333 int nid;
334
335 /*
336 * The fetchop device only works on SN2 hardware, uncached and cached
337 * memory drivers should both be valid on all ia64 hardware
338 */
339 if (ia64_platform_is("sn2")) {
340 is_sn2 = 1;
341 if (is_shub2()) {
342 ret = -ENOMEM;
343 for_each_online_node(nid) {
344 int actual_nid;
345 int nasid;
346 unsigned long phys;
347
348 scratch_page[nid] = uncached_alloc_page(nid);
349 if (scratch_page[nid] == 0)
350 goto free_scratch_pages;
351 phys = __pa(scratch_page[nid]);
352 nasid = get_node_number(phys);
353 actual_nid = nasid_to_cnodeid(nasid);
354 if (actual_nid != nid)
355 goto free_scratch_pages;
356 }
357 }
358
359 ret = misc_register(&fetchop_miscdev);
360 if (ret) {
361 printk(KERN_ERR
362 "%s: failed to register device %i\n",
363 FETCHOP_ID, ret);
364 goto free_scratch_pages;
365 }
366 }
367 ret = misc_register(&cached_miscdev);
368 if (ret) {
369 printk(KERN_ERR "%s: failed to register device %i\n",
370 CACHED_ID, ret);
371 if (is_sn2)
372 misc_deregister(&fetchop_miscdev);
373 goto free_scratch_pages;
374 }
375 ret = misc_register(&uncached_miscdev);
376 if (ret) {
377 printk(KERN_ERR "%s: failed to register device %i\n",
378 UNCACHED_ID, ret);
379 misc_deregister(&cached_miscdev);
380 if (is_sn2)
381 misc_deregister(&fetchop_miscdev);
382 goto free_scratch_pages;
383 }
384
385 printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
386 MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
387 CACHED_ID, UNCACHED_ID);
388
389 return 0;
390
391 free_scratch_pages:
392 for_each_node(nid) {
393 if (scratch_page[nid] != 0)
394 uncached_free_page(scratch_page[nid]);
395 }
396 return ret;
397}
398
399static void __exit
400mspec_exit(void)
401{
402 int nid;
403
404 misc_deregister(&uncached_miscdev);
405 misc_deregister(&cached_miscdev);
406 if (is_sn2) {
407 misc_deregister(&fetchop_miscdev);
408
409 for_each_node(nid) {
410 if (scratch_page[nid] != 0)
411 uncached_free_page(scratch_page[nid]);
412 }
413 }
414}
415
416module_init(mspec_init);
417module_exit(mspec_exit);
418
419MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
420MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
421MODULE_LICENSE("GPL");
This page took 0.039063 seconds and 5 git commands to generate.