if (addr > end - size)
goto out;
}
+ if ((size + addr) < addr)
+ goto out;
+ if (addr > end - size)
+ goto out;
found:
area->next = *p;
int i;
for (i = 0; i < area->nr_pages; i++) {
- BUG_ON(!area->pages[i]);
- __free_page(area->pages[i]);
+ struct page *page = area->pages[i];
+
+ BUG_ON(!page);
+ __free_page(page);
}
if (area->flags & VM_VPAGES)
}
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node)
+static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ pgprot_t prot, int node)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
}
for (i = 0; i < area->nr_pages; i++) {
+ struct page *page;
+
if (node < 0)
- area->pages[i] = alloc_page(gfp_mask);
+ page = alloc_page(gfp_mask);
else
- area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
- if (unlikely(!area->pages[i])) {
+ page = alloc_pages_node(node, gfp_mask, 0);
+
+ if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto fail;
}
+ area->pages[i] = page;
}
if (map_vm_area(area, prot, &pages))
* @vma: vma to cover (map full range of vma)
* @addr: vmalloc memory
* @pgoff: number of pages into addr before first page to map
- * @returns: 0 for success, -Exxx on failure
+ *
+ * Returns: 0 for success, -Exxx on failure
*
* This function checks that addr is a valid vmalloc'ed area, and
* that it is big enough to cover the vma. Will return failure if
}
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
- * @returns: NULL on failure, vm_struct on success
+ *
+ * Returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings